diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ad189bb59ff..24ede7001b4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -170,10 +170,11 @@ For dependencies, we can use anything compliant with [this list](https://opensou ### Code generation -Code generation for `Ops` and related classes is done during `tensorflow-core-api`'s `compile` phase, using the annotation processor in -`tensorflow-core-generator`. If you change or add any operator classes (annotated with `org.tensorflow.op.annotation.Operator`), endpoint methods ( -annotated with `org.tensorflow.op.annotation.Endpoint`), or change the annotation processor, be sure to re-run a -`mvn install` in `tensorflow-core-api` (`-Pdev` is fine for this, it just needs to run the annotation processor). +Code generation for `Ops` and related classes is done during `tensorflow-core-api` and `tensorflow-core-kotlin`'s `compile` phase, +using the annotation processors in `tensorflow-core-generator` and `tensorflow-kotlin-generator`, respectively. If you change or add any +operator classes (annotated with `org.tensorflow.op.annotation.Operator`), endpoint methods (annotated with `org.tensorflow.op.annotation.Endpoint`), +or change the annotation processor, be sure to re-run a `mvn compile` in `tensorflow-core-api` **and** `tensorflow-core-kotlin` +(`-Pdev` is fine for this, it just needs to run the annotation processor). ### Working with Bazel generation @@ -189,6 +190,19 @@ bazel-out/k8-opt/bin/external/org_tensorflow/tensorflow/libtensorflow_cc.so --ou (called in `tensorflow-core-api`). +### Kotlin API + +The Kotlin api should be kept to a thin wrapper of the Java API, using extension functions and codegen wherever possible. +We do not want to get into a situation where we are maintaining two separate but related APIs. + +The codegen (`tensorflow-core-kotlin-generator`) is an annotation processor that reads the `@Operator` classes from the `tensorflow-core-api` Java sources. +If you add operators or re-generate them from the native library, be sure to re-run a `mvn install` in `tensorflow-core-kotlin-api`. + +#### Formatting + +[ktfmt](https://github.com/facebookincubator/ktfmt) is used to format the Kotlin files. This is +checked and done via maven in the same way as Java formatting. To do the formatting via IntelliJ see +ktfmt's repo. ## Adding Gradients diff --git a/README.md b/README.md index 305fb1e759a..3eab22ea048 100644 --- a/README.md +++ b/README.md @@ -21,8 +21,13 @@ The following describes the layout of the repository and its different artifacts * `tensorflow-core` * All artifacts that build up the core language bindings of TensorFlow for Java * Intended audience: projects that provide their own APIs or frameworks on top of - TensorFlow and just want a thin layer to access the TensorFlow runtime from the JVM + TensorFlow and just want a thin layer to access the TensorFlow runtime from the JVM +* `tensorflow-core-kotlin` + * Kotlin API bindings for `tensorflow-core`. These are thin wrappers around the core APIs + to make them more idiomatic for use in Kotlin, such as using parameters with default values + operation builders instead of an `Options` vararg. + * `tensorflow-framework` * Primary API for building and training neural networks with TensorFlow * Intended audience: neural network developers @@ -112,6 +117,12 @@ the platforms you are targeting. For this purpose the `-platform` artifacts incl the conventions established on this page: * [Reducing the Number of Dependencies](https://github.com/bytedeco/javacpp-presets/wiki/Reducing-the-Number-of-Dependencies) +### Kotlin API + +Since the Kotlin API is just a wrapper of the Java API, it uses the Java platform artifacts instead of providing its own. +To use, follow the instructions above for the Java API, but add `tensorflow-core-kotlin-api`, +replacing `tensorflow-core-api` if you have explicitly included it. + ### Snapshots Snapshots of TensorFlow Java artifacts are automatically distributed after each update in the code. To use them, you need diff --git a/pom.xml b/pom.xml index f4f1b18928b..ed123e9228b 100644 --- a/pom.xml +++ b/pom.xml @@ -32,6 +32,7 @@ tensorflow-core + tensorflow-kotlin-parent tensorflow-framework diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index b6f9da1a2bd..142aac1065f 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -20,7 +20,7 @@ ${native.build.skip} ${native.build.skip} org.tensorflow.core.api - 0.3.3 + 0.4.0-SNAPSHOT 1.0.1 diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 223754b0480..f1a3ab1dd79 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -340,7 +340,7 @@ * } * } */ -public final class Ops { +public final class Ops implements WithOps { public final NnOps nn; public final SummaryOps summary; @@ -371,10 +371,10 @@ public final class Ops { public final TpuOps tpu; - public final AudioOps audio; - public final MathOps math; + public final AudioOps audio; + public final SignalOps signal; public final TrainOps train; @@ -400,8 +400,8 @@ public final class Ops { sparse = new SparseOps(this); bitwise = new BitwiseOps(this); tpu = new TpuOps(this); - audio = new AudioOps(this); math = new MathOps(this); + audio = new AudioOps(this); signal = new SignalOps(this); train = new TrainOps(this); quantization = new QuantizationOps(this); @@ -8068,11 +8068,15 @@ public ZerosLike zerosLike(Operand x) { return ZerosLike.create(scope, x); } + @Override + public Ops tf() { + return this; + } + /** - * Returns an API that builds operations with the provided name prefix. - * - * @see {@link Scope#withSubScope(String)} + * {@inheritDoc} */ + @Override public Ops withSubScope(String childScopeName) { return new Ops(scope.withSubScope(childScopeName)); } @@ -8109,28 +8113,25 @@ public T liftToInitScope(T op) { } /** - * Returns an API that uses the provided name for an op. - * - * @see {@link Scope#withName(String)} + * {@inheritDoc} */ + @Override public Ops withName(String opName) { return new Ops(scope.withName(opName)); } /** - * Returns an API that places the created operations on the device(s) matching the provided spec. - * - * @see {@link Scope#withDevice(DeviceSpec)} + * {@inheritDoc} */ + @Override public Ops withDevice(DeviceSpec deviceSpec) { return new Ops(scope.withDevice(deviceSpec)); } /** - * Returns an API that adds operations to the graph with the provided control dependencies. - * - * @see {@link Scope#withControlDependencies(Iterable>)} + * {@inheritDoc} */ + @Override public Ops withControlDependencies(Iterable controls) { return new Ops(scope.withControlDependencies(controls)); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java index 87745138f01..845efa92fb8 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java @@ -16,10 +16,12 @@ package org.tensorflow; import org.tensorflow.op.Op; +import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; +import org.tensorflow.op.WithOps; /** Defines an environment for creating and executing TensorFlow {@link Operation}s. */ -public interface ExecutionEnvironment { +public interface ExecutionEnvironment extends WithOps { enum Types { GRAPH, @@ -126,4 +128,9 @@ default ExecutionEnvironment initEnv() { *

Should generally only be used internally. */ boolean isInitOp(Operation op); + + @Override + default Ops tf() { + return Ops.create(this); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java index 251f5a6e4b3..a171bbe3108 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java @@ -1,18 +1,18 @@ /* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow; import java.util.Collections; @@ -161,7 +161,7 @@ private static TensorInfo toTensorInfo(Output operand) { Shape shape = operand.shape(); TensorShapeProto.Builder tensorShapeBuilder = TensorShapeProto.newBuilder(); for (int i = 0; i < shape.numDimensions(); ++i) { - tensorShapeBuilder.addDim(Dim.newBuilder().setSize(shape.size(i))); + tensorShapeBuilder.addDim(Dim.newBuilder().setSize(shape.get(i))); } return TensorInfo.newBuilder() .setDtype(operand.dataType()) diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java new file mode 100644 index 00000000000..474127b4ca1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java @@ -0,0 +1,73 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.op; + +import java.util.Arrays; +import org.tensorflow.DeviceSpec; + +/** A context that provides a TensorFlow op builder. */ +public interface WithOps { + + /** Get the op builder for this context. */ + Ops tf(); + + /** + * Returns an API that builds operations with the provided name prefix. + * + * @see Scope#withSubScope(String) + */ + default WithOps withSubScope(String childScopeName) { + return tf().withSubScope(childScopeName); + } + + /** + * Returns an API that uses the provided name for an op. + * + * @see Scope#withName(String) + */ + default WithOps withName(String opName) { + return tf().withName(opName); + } + + /** + * Returns an API that places the created operations on the device(s) matching the provided spec. + * + * @see Scope#withDevice(DeviceSpec) + */ + default WithOps withDevice(DeviceSpec deviceSpec) { + return tf().withDevice(deviceSpec); + } + + /** + * Returns an API that adds operations to the graph with the provided control dependencies. + * + * @see Scope#withControlDependencies(Iterable) + */ + default WithOps withControlDependencies(Iterable controls) { + return tf().withControlDependencies(controls); + } + + /** + * Returns an API that adds operations to the graph with the provided control dependencies. + * + * @see Scope#withControlDependencies(Iterable) + */ + default WithOps withControlDependencies(Op... controls) { + return withControlDependencies(Arrays.asList(controls)); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationTest.java index e2dc82f4c48..fbed7861ed9 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationTest.java @@ -55,8 +55,8 @@ public void outputDataTypeAndShape() { .setAttr("value", t) .build(); assertEquals(DataType.DT_INT32, op.dtype(0)); - assertEquals(2, op.shape(0).size(0)); - assertEquals(3, op.shape(0).size(1)); + assertEquals(2, op.shape(0).get(0)); + assertEquals(3, op.shape(0).get(1)); } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java index 84e1e56df56..5b9b8d059da 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java @@ -144,8 +144,8 @@ public void setAttrShape() { .build() .output(0); assertEquals(2, n.shape().numDimensions()); - assertEquals(-1, n.shape().size(0)); - assertEquals(784, n.shape().size(1)); + assertEquals(-1, n.shape().get(0)); + assertEquals(784, n.shape().get(1)); assertEquals(DataType.DT_FLOAT, n.dataType()); } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java index be6f952fb6a..8e3f742b6bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java @@ -200,7 +200,7 @@ public void exportFunctionWithVariables() throws IOException { assertNotNull(inputInfo); assertEquals(xyShape.numDimensions(), inputInfo.getTensorShape().getDimCount()); for (int i = 0; i < xyShape.numDimensions(); ++i) { - assertEquals(xyShape.size(i), inputInfo.getTensorShape().getDim(i).getSize()); + assertEquals(xyShape.get(i), inputInfo.getTensorShape().getDim(i).getSize()); } TensorInfo outputInfo = signatureDef.getOutputsMap().get("reducedSum"); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java index 9415a986222..0d3015d0445 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java @@ -66,7 +66,7 @@ public void createWithRawData() { Shape strings_shape = Shape.scalar(); byte[] strings_; // raw TF_STRING try (TString t = TString.tensorOf(NdArrays.scalarOfObject(strings))) { - strings_ = new byte[(int)t.numBytes()]; + strings_ = new byte[(int) t.numBytes()]; t.asRawTensor().data().read(strings_); } @@ -86,8 +86,11 @@ public void createWithRawData() { // validate creating a tensor using a direct byte buffer (in host order) { - DoubleBuffer buf = ByteBuffer.allocateDirect(8 * doubles.length).order(ByteOrder.nativeOrder()) - .asDoubleBuffer().put(doubles); + DoubleBuffer buf = + ByteBuffer.allocateDirect(8 * doubles.length) + .order(ByteOrder.nativeOrder()) + .asDoubleBuffer() + .put(doubles); try (TFloat64 t = TFloat64.tensorOf(doubles_shape, d -> d.write(DataBuffers.of(buf)))) { double[] actual = new double[doubles.length]; t.read(DataBuffers.of(actual)); @@ -140,10 +143,10 @@ public void createFromBufferWithNonNativeByteOrder() { @Test public void createWithTypedBuffer() { - IntBuffer ints = IntBuffer.wrap(new int[]{1, 2, 3, 4}); - FloatBuffer floats = FloatBuffer.wrap(new float[]{1f, 2f, 3f, 4f}); - DoubleBuffer doubles = DoubleBuffer.wrap(new double[]{1d, 2d, 3d, 4d}); - LongBuffer longs = LongBuffer.wrap(new long[]{1L, 2L, 3L, 4L}); + IntBuffer ints = IntBuffer.wrap(new int[] {1, 2, 3, 4}); + FloatBuffer floats = FloatBuffer.wrap(new float[] {1f, 2f, 3f, 4f}); + DoubleBuffer doubles = DoubleBuffer.wrap(new double[] {1d, 2d, 3d, 4d}); + LongBuffer longs = LongBuffer.wrap(new long[] {1L, 2L, 3L, 4L}); // validate creating a tensor using a typed buffer { @@ -243,7 +246,7 @@ public void readFromRawData() { // validate the use of direct buffers { ByteBuffer bbuf = - ByteBuffer.allocateDirect((int)tdoubles.numBytes()).order(ByteOrder.nativeOrder()); + ByteBuffer.allocateDirect((int) tdoubles.numBytes()).order(ByteOrder.nativeOrder()); tdoubles.asRawTensor().data().copyTo(DataBuffers.of(bbuf), tdoubles.numBytes()); assertEquals(doubles[0], bbuf.asDoubleBuffer().get(0), EPSILON); } @@ -251,13 +254,17 @@ public void readFromRawData() { // validate byte order conversion { DoubleBuffer foreignBuf = - ByteBuffer.allocate((int)tdoubles.numBytes()) + ByteBuffer.allocate((int) tdoubles.numBytes()) .order( ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN) .asDoubleBuffer(); - tdoubles.asRawTensor().data().asDoubles().copyTo(DataBuffers.of(foreignBuf), foreignBuf.capacity()); + tdoubles + .asRawTensor() + .data() + .asDoubles() + .copyTo(DataBuffers.of(foreignBuf), foreignBuf.capacity()); double[] actual = new double[foreignBuf.remaining()]; foreignBuf.get(actual); assertArrayEquals(doubles, actual, EPSILON); @@ -320,51 +327,55 @@ public void scalars() { @Test public void nDimensional() { - DoubleNdArray vector = StdArrays.ndCopyOf(new double[]{1.414, 2.718, 3.1415}); + DoubleNdArray vector = StdArrays.ndCopyOf(new double[] {1.414, 2.718, 3.1415}); try (TFloat64 t = TFloat64.tensorOf(vector)) { assertEquals(TFloat64.class, t.type()); assertEquals(DataType.DT_DOUBLE, t.dataType()); assertEquals(1, t.shape().numDimensions()); - assertEquals(3, t.shape().size(0)); + assertEquals(3, t.shape().get(0)); assertEquals(vector, t); } - IntNdArray matrix = StdArrays.ndCopyOf(new int[][]{{1, 2, 3}, {4, 5, 6}}); + IntNdArray matrix = StdArrays.ndCopyOf(new int[][] {{1, 2, 3}, {4, 5, 6}}); try (TInt32 t = TInt32.tensorOf(matrix)) { assertEquals(TInt32.class, t.type()); assertEquals(DataType.DT_INT32, t.dataType()); assertEquals(2, t.shape().numDimensions()); - assertEquals(2, t.shape().size(0)); - assertEquals(3, t.shape().size(1)); + assertEquals(2, t.shape().get(0)); + assertEquals(3, t.shape().get(1)); assertEquals(matrix, t); } - LongNdArray threeD = StdArrays.ndCopyOf(new long[][][]{ - {{1}, {3}, {5}, {7}, {9}}, {{2}, {4}, {6}, {8}, {0}}, - }); + LongNdArray threeD = + StdArrays.ndCopyOf( + new long[][][] { + {{1}, {3}, {5}, {7}, {9}}, {{2}, {4}, {6}, {8}, {0}}, + }); try (TInt64 t = TInt64.tensorOf(threeD)) { assertEquals(TInt64.class, t.type()); assertEquals(DataType.DT_INT64, t.dataType()); assertEquals(3, t.shape().numDimensions()); - assertEquals(2, t.shape().size(0)); - assertEquals(5, t.shape().size(1)); - assertEquals(1, t.shape().size(2)); + assertEquals(2, t.shape().get(0)); + assertEquals(5, t.shape().get(1)); + assertEquals(1, t.shape().get(2)); assertEquals(threeD, t); } - BooleanNdArray fourD = StdArrays.ndCopyOf(new boolean[][][][]{ - {{{false, false, false, true}, {false, false, true, false}}}, - {{{false, false, true, true}, {false, true, false, false}}}, - {{{false, true, false, true}, {false, true, true, false}}}, - }); + BooleanNdArray fourD = + StdArrays.ndCopyOf( + new boolean[][][][] { + {{{false, false, false, true}, {false, false, true, false}}}, + {{{false, false, true, true}, {false, true, false, false}}}, + {{{false, true, false, true}, {false, true, true, false}}}, + }); try (TBool t = TBool.tensorOf(fourD)) { assertEquals(TBool.class, t.type()); assertEquals(DataType.DT_BOOL, t.dataType()); assertEquals(4, t.shape().numDimensions()); - assertEquals(3, t.shape().size(0)); - assertEquals(1, t.shape().size(1)); - assertEquals(2, t.shape().size(2)); - assertEquals(4, t.shape().size(3)); + assertEquals(3, t.shape().get(0)); + assertEquals(1, t.shape().get(1)); + assertEquals(2, t.shape().get(2)); + assertEquals(4, t.shape().get(3)); assertEquals(fourD, t); } } @@ -381,19 +392,21 @@ public void testNDimensionalStringTensor() { assertEquals(TString.class, t.type()); assertEquals(DataType.DT_STRING, t.dataType()); assertEquals(2, t.shape().numDimensions()); - assertEquals(4, t.shape().size(0)); - assertEquals(3, t.shape().size(1)); + assertEquals(4, t.shape().get(0)); + assertEquals(3, t.shape().get(1)); assertEquals(matrix, t); } NdArray byteMatrix = NdArrays.ofObjects(byte[].class, matrix.shape()); - matrix.scalars().forEachIndexed((i, s) -> byteMatrix.setObject(s.getObject().getBytes(UTF_8), i)); + matrix + .scalars() + .forEachIndexed((i, s) -> byteMatrix.setObject(s.getObject().getBytes(UTF_8), i)); try (TString t = TString.tensorOfBytes(byteMatrix)) { assertEquals(TString.class, t.type()); assertEquals(DataType.DT_STRING, t.dataType()); assertEquals(2, t.shape().numDimensions()); - assertEquals(4, t.shape().size(0)); - assertEquals(3, t.shape().size(1)); + assertEquals(4, t.shape().get(0)); + assertEquals(3, t.shape().get(1)); assertEquals(byteMatrix, t.asBytes()); assertEquals(matrix, t); } @@ -406,7 +419,7 @@ public void testUint8TensorFromArray() { assertEquals(TUint8.class, t.type()); assertEquals(DataType.DT_UINT8, t.dataType()); assertEquals(1, t.shape().numDimensions()); - assertEquals(4, t.shape().size(0)); + assertEquals(4, t.shape().get(0)); byte[] got = new byte[4]; t.read(DataBuffers.of(got)); @@ -421,7 +434,7 @@ public void testCreateFromArrayOfBoxed() { assertEquals(TInt32.class, t.type()); assertEquals(DataType.DT_INT32, t.dataType()); assertEquals(1, t.shape().numDimensions()); - assertEquals(4, t.shape().size(0)); + assertEquals(4, t.shape().get(0)); Integer[] got = new Integer[4]; t.read(DataBuffers.ofObjects(got)); @@ -512,9 +525,10 @@ public void fromHandle() { // // An exception is made for this test, where the pitfalls of this is avoided by not calling // close() on both Tensors. - final FloatNdArray matrix = StdArrays.ndCopyOf(new float[][]{{1, 2, 3}, {4, 5, 6}}); + final FloatNdArray matrix = StdArrays.ndCopyOf(new float[][] {{1, 2, 3}, {4, 5, 6}}); try (TFloat32 src = TFloat32.tensorOf(matrix)) { - TFloat32 cpy = (TFloat32)RawTensor.fromHandle(src.asRawTensor().nativeHandle()).asTypedTensor(); + TFloat32 cpy = + (TFloat32) RawTensor.fromHandle(src.asRawTensor().nativeHandle()).asTypedTensor(); assertEquals(src.type(), cpy.type()); assertEquals(src.dataType(), cpy.dataType()); assertEquals(src.shape().numDimensions(), cpy.shape().numDimensions()); diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java index 7252d258814..958b74de1bf 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java @@ -65,6 +65,8 @@ public class Names { public static final TypeName ArrayOp = ArrayTypeName.of(Op); public static final TypeName ArrayOperation = ArrayTypeName.of(Operation); + public static final ClassName WithOps = ClassName.get(OpPackage, "WithOps"); + public static final ClassName Operand = ClassName.get(TensorflowPackage, "Operand"); public static final ClassName Output = ClassName.get(TensorflowPackage, "Output"); diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java new file mode 100644 index 00000000000..793a7aa7b57 --- /dev/null +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java @@ -0,0 +1,557 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +package org.tensorflow.processor.operator; + +import com.github.javaparser.ast.comments.JavadocComment; +import com.github.javaparser.javadoc.Javadoc; +import com.google.common.base.CaseFormat; +import com.google.common.base.Strings; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; +import com.squareup.javapoet.ClassName; +import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.ParameterSpec; +import com.squareup.javapoet.ParameterizedTypeName; +import com.squareup.javapoet.TypeName; +import com.squareup.javapoet.TypeVariableName; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.Filer; +import javax.annotation.processing.Messager; +import javax.annotation.processing.ProcessingEnvironment; +import javax.annotation.processing.RoundEnvironment; +import javax.lang.model.SourceVersion; +import javax.lang.model.element.AnnotationMirror; +import javax.lang.model.element.AnnotationValue; +import javax.lang.model.element.Element; +import javax.lang.model.element.ExecutableElement; +import javax.lang.model.element.Modifier; +import javax.lang.model.element.Name; +import javax.lang.model.element.TypeElement; +import javax.lang.model.element.TypeParameterElement; +import javax.lang.model.element.VariableElement; +import javax.lang.model.type.NoType; +import javax.lang.model.type.TypeMirror; +import javax.lang.model.type.TypeVariable; +import javax.lang.model.util.ElementFilter; +import javax.lang.model.util.Elements; +import javax.lang.model.util.Types; +import javax.tools.Diagnostic.Kind; + +/** + * A compile-time Processor that aggregates classes annotated with {@code + * org.tensorflow.op.annotation.Operator} and generates the {@code Ops} convenience API. Please + * refer to the {@code Operator} annotation for details about the API generated for each annotated + * class. + * + *

Note that this processor can only be invoked once, in a single compilation run that includes + * all the {@code Operator} annotated source classes. The reason is that the {@code Ops} API is an + * "aggregating" API, and annotation processing does not permit modifying an already generated + * class. + */ +public abstract class BaseOperatorProcessor extends AbstractProcessor { + + @Override + public SourceVersion getSupportedSourceVersion() { + return SourceVersion.latest(); + } + + @Override + public synchronized void init(ProcessingEnvironment processingEnv) { + super.init(processingEnv); + messager = processingEnv.getMessager(); + filer = processingEnv.getFiler(); + elements = processingEnv.getElementUtils(); + types = processingEnv.getTypeUtils(); + } + + @Override + public boolean process(Set annotations, RoundEnvironment roundEnv) { + // Nothing needs to be done at the end of all rounds. + if (roundEnv.processingOver()) { + return false; + } + + // Nothing to look at in this round. + if (annotations.size() == 0) { + return false; + } + + // We expect to be registered for exactly one annotation. + if (annotations.size() != 1) { + throw new IllegalStateException( + "Unexpected - multiple annotations registered: " + annotations); + } + TypeElement annotation = annotations.iterator().next(); + Set annotated = roundEnv.getElementsAnnotatedWith(annotation); + + // If there are no annotated elements, claim the annotation but do nothing. + if (annotated.size() == 0) { + return true; + } + + // This processor has to aggregate all op classes in one round, as it generates a single Ops + // API class which cannot be modified once generated. If we find an annotation after we've + // generated our code, flag the location of each such class. + if (hasRun) { + for (Element e : annotated) { + error( + e, + "The Operator processor has already processed @Operator annotated sources\n" + + "and written out an Ops API. It cannot process additional @Operator sources.\n" + + "One reason this can happen is if other annotation processors generate\n" + + "new @Operator source files."); + } + return true; + } + + // Collect all classes tagged with our annotation. + Multimap groupedMethods = HashMultimap.create(); + if (!collectOpsMethods(roundEnv, groupedMethods, annotation)) { + return true; + } + + // Nothing to do when there are no tagged classes. + if (groupedMethods.isEmpty()) { + return true; + } + + // Validate operator classes and generate Op API. + writeApi(groupedMethods); + + hasRun = true; + return true; + } + + @Override + public Set getSupportedAnnotationTypes() { + return Collections.singleton("org.tensorflow.op.annotation.Operator"); + } + + protected static class OpsSpec { + protected static final Comparator PARAMETER_SPEC_COMPARATOR = + (o1, o2) -> { + if (o1.javaMethod.parameters.size() > o2.javaMethod.parameters.size()) { + return 1; + } + if (o1.javaMethod.parameters.size() < o2.javaMethod.parameters.size()) { + return -1; + } + List firstParams = o1.javaMethod.parameters; + List secondParams = o2.javaMethod.parameters; + for (int i = 0; i < firstParams.size(); i++) { + ParameterSpec first = firstParams.get(i); + ParameterSpec second = secondParams.get(i); + int compare = first.name.compareTo(second.name); + if (compare != 0) { + return compare; + } + } + return 0; + }; + protected static final Comparator METHOD_SPEC_COMPARATOR = + Comparator.comparing((OpMethod m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); + + public final @Nullable OpsSpec parent; + public final String groupName; + public final String fieldName; + public final ClassName className; + public final List methods; + public final List subGroups = new ArrayList<>(); + + OpsSpec( + OpsSpec parent, + String groupName, + String fieldName, + ClassName className, + Collection methods) { + this.parent = parent; + this.groupName = groupName; + this.fieldName = fieldName; + this.className = className; + this.methods = new ArrayList<>(methods); + this.methods.sort(METHOD_SPEC_COMPARATOR); + } + + Iterable javaMethods() { + return methods.stream().map(x -> x.javaMethod).collect(Collectors.toList()); + } + } + + protected static final class OpMethod { + final String name; + final TypeElement opClass; + final ExecutableElement endpointMethod; + final boolean describeByClass; + final boolean deprecated; + final MethodSpec javaMethod; + + public OpMethod( + String name, + TypeElement opClass, + ExecutableElement endpointMethod, + boolean describeByClass, + boolean deprecated, + MethodSpec javaMethod) { + this.name = name; + this.opClass = opClass; + this.endpointMethod = endpointMethod; + this.describeByClass = describeByClass; + this.deprecated = deprecated; + this.javaMethod = javaMethod; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof OpMethod)) { + return false; + } + + OpMethod opMethod = (OpMethod) o; + + return javaMethod.equals(opMethod.javaMethod); + } + + @Override + public int hashCode() { + return javaMethod.hashCode(); + } + } + + protected static final Pattern JAVADOC_TAG_PATTERN = + Pattern.compile("@(?:param|return|throws|exception|see|deprecated)\\s+.*"); + protected static final ClassName T_OP = ClassName.get("org.tensorflow.op", "Op"); + protected static final ClassName T_OPS = ClassName.get("org.tensorflow.op", "Ops"); + protected static final TypeName T_ITERABLE_OP = + ParameterizedTypeName.get(ClassName.get(Iterable.class), T_OP); + protected static final ClassName T_OPERATOR = + ClassName.get("org.tensorflow.op.annotation", "Operator"); + protected static final ClassName T_ENDPOINT = + ClassName.get("org.tensorflow.op.annotation", "Endpoint"); + protected static final ClassName T_SCOPE = ClassName.get("org.tensorflow.op", "Scope"); + protected static final ClassName T_EXEC_ENV = + ClassName.get("org.tensorflow", "ExecutionEnvironment"); + protected static final ClassName T_EAGER_SESSION = + ClassName.get("org.tensorflow", "EagerSession"); + protected static final ClassName T_STRING = ClassName.get(String.class); + + protected static final String LICENSE = + "Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n" + + "\n" + + "Licensed under the Apache License, Version 2.0 (the \"License\");\n" + + "you may not use this file except in compliance with the License.\n" + + "You may obtain a copy of the License at\n" + + "\n" + + " http://www.apache.org/licenses/LICENSE-2.0\n" + + "\n" + + "Unless required by applicable law or agreed to in writing, software\n" + + "distributed under the License is distributed on an \"AS IS\" BASIS,\n" + + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + + "See the License for the specific language governing permissions and\n" + + "limitations under the License.\n" + + "==============================================================================\n"; + + protected Filer filer; + protected Messager messager; + protected Elements elements; + protected Types types; + protected boolean hasRun = false; + + protected void error(Element e, String message, Object... args) { + if (args != null && args.length > 0) { + message = String.format(message, args); + } + messager.printMessage(Kind.ERROR, message, e); + } + + protected abstract void write(T spec); + + protected void writeApi(Multimap groupedMethods) { + // Build tree of *Ops classes that needs to be generated by this processor. The 'Ops' class + // resides at the root of the tree while other classes are nodes. + OpsSpec ops = new OpsSpec(null, null, null, T_OPS, groupedMethods.removeAll("")); + Collection groupOps = collectGroupOps(ops, groupedMethods); + + write(buildTopClass(ops)); + groupOps.forEach(g -> write(buildGroupClass(g))); + } + + protected boolean collectOpsMethods( + RoundEnvironment roundEnv, + Multimap groupedMethods, + TypeElement annotation) { + boolean result = true; + for (Element e : roundEnv.getElementsAnnotatedWith(annotation)) { + // @Operator can only apply to types, so e must be a TypeElement. + if (!(e instanceof TypeElement)) { + error( + e, + "@Operator can only be applied to classes, but this is a %s", + e.getKind().toString()); + result = false; + continue; + } + collectOpMethods(groupedMethods, (TypeElement) e, annotation); + } + return result; + } + + protected void collectOpMethods( + Multimap groupedMethods, TypeElement opClass, TypeElement annotation) { + boolean opClassDeprecated = opClass.getAnnotation(Deprecated.class) != null; + AnnotationMirror operatorAnnot = getAnnotationMirror(opClass, annotation.getQualifiedName()); + if (operatorAnnot == null) { + throw new IllegalArgumentException( + "Annotation " + + annotation.getSimpleName() + + " not present on element " + + opClass.getSimpleName()); + } + String opGroup = getAnnotationElementValueAsString("group", operatorAnnot); + String opName = getAnnotationElementValueAsString("name", operatorAnnot); + if (Strings.isNullOrEmpty(opName)) { + opName = + CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, ClassName.get(opClass).simpleName()); + } + // Build an endpoint for each method annotated with @Endpoint, which takes in parameter a scope + // and, optionally, a list of arguments + for (ExecutableElement opMethod : ElementFilter.methodsIn(opClass.getEnclosedElements())) { + AnnotationMirror endpointAnnot = + getAnnotationMirror(opMethod, elements.getName(T_ENDPOINT.toString())); + if (endpointAnnot != null) { + if (!opMethod.getModifiers().containsAll(Arrays.asList(Modifier.STATIC, Modifier.PUBLIC))) { + throw new IllegalArgumentException( + "Endpoint " + opMethod + " of class " + opClass + " must be static and public"); + } + if (opMethod.getParameters().isEmpty() + || !((TypeElement) types.asElement(opMethod.getParameters().get(0).asType())) + .getQualifiedName() + .equals(elements.getName(T_SCOPE.toString()))) { + throw new IllegalArgumentException( + "Endpoint " + + opMethod + + " of class " + + opClass + + " must take an instance of " + + T_SCOPE + + " as its first parameter"); + } + String endpointGroup = getAnnotationElementValueAsString("group", endpointAnnot); + if (endpointGroup.isEmpty()) { + endpointGroup = opGroup; + } + String endpointName = getAnnotationElementValueAsString("name", endpointAnnot); + if (endpointName.isEmpty()) { + endpointName = opName; + } + boolean describeByClass = + getAnnotationElementValueAsBoolean("describeByClass", endpointAnnot, false); + boolean deprecated = opMethod.getAnnotation(Deprecated.class) != null || opClassDeprecated; + OpMethod method = + buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); + groupedMethods.put(endpointGroup, method); + } + } + } + + protected OpMethod buildOpMethod( + String methodName, + TypeElement opClass, + ExecutableElement endpointMethod, + boolean describeByClass, + boolean deprecated) { + MethodSpec.Builder builder = + MethodSpec.methodBuilder(methodName) + .addModifiers(Modifier.PUBLIC) + .returns(TypeName.get(endpointMethod.getReturnType())) + .varargs(endpointMethod.isVarArgs()) + .addJavadoc( + "$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass).toText()); + + if (deprecated) { + builder.addAnnotation(Deprecated.class); + } + for (TypeParameterElement tp : endpointMethod.getTypeParameters()) { + TypeVariableName tvn = TypeVariableName.get((TypeVariable) tp.asType()); + builder.addTypeVariable(tvn); + } + for (TypeMirror thrownType : endpointMethod.getThrownTypes()) { + builder.addException(TypeName.get(thrownType)); + } + StringBuilder call = new StringBuilder(); + if (!NoType.class.isAssignableFrom(endpointMethod.getReturnType().getClass())) { + call.append("return "); + } + call.append("$T.").append(endpointMethod.getSimpleName()).append("(scope"); + boolean first = true; + for (VariableElement param : endpointMethod.getParameters()) { + ParameterSpec p = ParameterSpec.get(param); + if (first) { + first = false; + continue; + } + call.append(", "); + call.append(p.name); + builder.addParameter(p); + } + call.append(")"); + builder.addStatement(call.toString(), ClassName.get(opClass)); + return new OpMethod( + methodName, opClass, endpointMethod, describeByClass, deprecated, builder.build()); + } + + protected Javadoc buildOpMethodJavadoc( + TypeElement opClass, ExecutableElement endpointMethod, boolean copyClassDescription) { + Javadoc methodJavadoc = parseJavadoc(endpointMethod); + + Javadoc javadoc; + + if (!copyClassDescription) { + javadoc = new Javadoc(methodJavadoc.getDescription()); + } else { + javadoc = parseJavadoc(opClass); + } + + // Copy all endpoint method tags to the description, except for the `scope` parameter which + // will be inferred by the Ops class + methodJavadoc + .getBlockTags() + .forEach( + t -> { + if (!(t.getTagName().equals("param") + && t.getName().map(s -> s.equals("scope")).orElse(false))) { + javadoc.addBlockTag(t); + } + }); + + return javadoc; + } + + protected static Collection collectGroupOps( + OpsSpec ops, Multimap groupedMethods) { + Map groups = new HashMap<>(); + + // The `group` label added in the `@Operator` annotation has the same syntax as a package name, + // which (in most + // case) consists of a simple label but could also be a deeper tree, like `linalg.sparse`. In + // this case, + // the `LinalgSparseOps` group should be added as the `sparse` field of the `LinalgOps` group, + // and the latter + // should be added as the `linalg` field of the `Ops` root class. + groupedMethods + .keys() + .forEach( + group -> { + OpsSpec parentClass = ops; + int startPos = 0; + do { + int delimiterPos = group.indexOf('.', startPos); + String groupName = delimiterPos < 0 ? group : group.substring(0, delimiterPos); + OpsSpec groupOps = groups.get(groupName); + + // Create spec for this group if we have not encountered it yet in our iteration + if (groupOps == null) { + String fieldName = + delimiterPos < 0 + ? group.substring(startPos) + : group.substring(startPos, delimiterPos); + ClassName className = + ClassName.get( + "org.tensorflow.op", + CaseFormat.LOWER_UNDERSCORE.to( + CaseFormat.UPPER_CAMEL, groupName.replace('.', '_')) + + "Ops"); + groupOps = + new OpsSpec( + parentClass, + groupName, + fieldName, + className, + groupedMethods.get(groupName)); + parentClass.subGroups.add(groupOps); + groups.put(groupName, groupOps); + } + parentClass = groupOps; + startPos = delimiterPos + 1; + } while (startPos > 0); + }); + + return groups.values(); + } + + protected abstract T buildGroupClass(OpsSpec spec); + + protected abstract T buildTopClass(OpsSpec spec); + + protected static AnnotationMirror getAnnotationMirror(Element element, Name annotationName) { + for (AnnotationMirror am : element.getAnnotationMirrors()) { + if (((TypeElement) am.getAnnotationType().asElement()) + .getQualifiedName() + .equals(annotationName)) { + return am; + } + } + return null; + } + + protected static AnnotationValue getAnnotationElementValue( + String elementName, AnnotationMirror am) { + for (Map.Entry entry : + am.getElementValues().entrySet()) { + if (entry.getKey().getSimpleName().contentEquals(elementName)) { + return entry.getValue(); + } + } + return null; + } + + protected static String getAnnotationElementValueAsString( + String elementName, AnnotationMirror am) { + AnnotationValue value = getAnnotationElementValue(elementName, am); + return value != null ? value.getValue().toString() : ""; + } + + protected static boolean getAnnotationElementValueAsBoolean( + String elementName, AnnotationMirror am, boolean defaultValue) { + AnnotationValue value = getAnnotationElementValue(elementName, am); + return value != null ? Boolean.parseBoolean(value.toString()) : defaultValue; + } + + protected Javadoc parseJavadoc(Element element) { + String docComment = elements.getDocComment(element); + JavadocComment javadocComment; + if (docComment != null) { + javadocComment = new JavadocComment(docComment); + } else { + javadocComment = new JavadocComment(); + } + return javadocComment.parse(); + } +} diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 99277e8fe24..b07029a48e8 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -15,53 +15,14 @@ */ package org.tensorflow.processor.operator; -import com.github.javaparser.ast.comments.JavadocComment; -import com.github.javaparser.javadoc.Javadoc; -import com.google.common.base.CaseFormat; -import com.google.common.base.Strings; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; -import com.squareup.javapoet.ClassName; import com.squareup.javapoet.FieldSpec; import com.squareup.javapoet.JavaFile; import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; import com.squareup.javapoet.TypeVariableName; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.regex.Pattern; -import javax.annotation.processing.AbstractProcessor; -import javax.annotation.processing.Filer; -import javax.annotation.processing.Messager; -import javax.annotation.processing.ProcessingEnvironment; -import javax.annotation.processing.RoundEnvironment; -import javax.lang.model.SourceVersion; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.Element; -import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.TypeParameterElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.NoType; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.type.TypeVariable; -import javax.lang.model.util.ElementFilter; -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; -import javax.tools.Diagnostic.Kind; import org.tensorflow.Names; /** @@ -75,159 +36,10 @@ * "aggregating" API, and annotation processing does not permit modifying an already generated * class. */ -public final class OperatorProcessor extends AbstractProcessor { +public final class OperatorProcessor extends BaseOperatorProcessor { @Override - public SourceVersion getSupportedSourceVersion() { - return SourceVersion.latest(); - } - - @Override - public synchronized void init(ProcessingEnvironment processingEnv) { - super.init(processingEnv); - messager = processingEnv.getMessager(); - filer = processingEnv.getFiler(); - elements = processingEnv.getElementUtils(); - types = processingEnv.getTypeUtils(); - } - - @Override - public boolean process(Set annotations, RoundEnvironment roundEnv) { - // Nothing needs to be done at the end of all rounds. - if (roundEnv.processingOver()) { - return false; - } - - // Nothing to look at in this round. - if (annotations.size() == 0) { - return false; - } - - // We expect to be registered for exactly one annotation. - if (annotations.size() != 1) { - throw new IllegalStateException( - "Unexpected - multiple annotations registered: " + annotations); - } - TypeElement annotation = annotations.iterator().next(); - Set annotated = roundEnv.getElementsAnnotatedWith(annotation); - - // If there are no annotated elements, claim the annotation but do nothing. - if (annotated.size() == 0) { - return true; - } - - // This processor has to aggregate all op classes in one round, as it generates a single Ops - // API class which cannot be modified once generated. If we find an annotation after we've - // generated our code, flag the location of each such class. - if (hasRun) { - for (Element e : annotated) { - error( - e, - "The Operator processor has already processed @Operator annotated sources\n" - + "and written out an Ops API. It cannot process additional @Operator sources.\n" - + "One reason this can happen is if other annotation processors generate\n" - + "new @Operator source files."); - } - return true; - } - - // Collect all classes tagged with our annotation. - Multimap groupedMethods = HashMultimap.create(); - if (!collectOpsMethods(roundEnv, groupedMethods, annotation)) { - return true; - } - - // Nothing to do when there are no tagged classes. - if (groupedMethods.isEmpty()) { - return true; - } - - // Validate operator classes and generate Op API. - writeApi(groupedMethods); - - hasRun = true; - return true; - } - - @Override - public Set getSupportedAnnotationTypes() { - return Collections.singleton("org.tensorflow.op.annotation.Operator"); - } - - private static class OpsSpec { - - private static final Comparator PARAMETER_SPEC_COMPARATOR = - (o1, o2) -> { - if (o1.parameters.size() > o2.parameters.size()) { - return 1; - } - if (o1.parameters.size() < o2.parameters.size()) { - return -1; - } - List firstParams = o1.parameters; - List secondParams = o2.parameters; - for (int i = 0; i < firstParams.size(); i++) { - ParameterSpec first = firstParams.get(i); - ParameterSpec second = secondParams.get(i); - int compare = first.name.compareTo(second.name); - if (compare != 0) { - return compare; - } - } - return 0; - }; - private static final Comparator METHOD_SPEC_COMPARATOR = - Comparator.comparing((MethodSpec m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); - - final String groupName; - final String fieldName; - final ClassName className; - final List methods; - final List subGroups = new ArrayList<>(); - - OpsSpec( - String groupName, String fieldName, ClassName className, Collection methods) { - this.groupName = groupName; - this.fieldName = fieldName; - this.className = className; - this.methods = new ArrayList<>(methods); - this.methods.sort(METHOD_SPEC_COMPARATOR); - } - } - - private static final Pattern JAVADOC_TAG_PATTERN = - Pattern.compile("@(?:param|return|throws|exception|see|deprecated)\\s+.*"); - - private static final String LICENSE = - "Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n" - + "\n" - + "Licensed under the Apache License, Version 2.0 (the \"License\");\n" - + "you may not use this file except in compliance with the License.\n" - + "You may obtain a copy of the License at\n" - + "\n" - + " http://www.apache.org/licenses/LICENSE-2.0\n" - + "\n" - + "Unless required by applicable law or agreed to in writing, software\n" - + "distributed under the License is distributed on an \"AS IS\" BASIS,\n" - + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" - + "See the License for the specific language governing permissions and\n" - + "limitations under the License.\n" - + "==============================================================================\n"; - - private Filer filer; - private Messager messager; - private Elements elements; - private Types types; - private boolean hasRun = false; - - private void error(Element e, String message, Object... args) { - if (args != null && args.length > 0) { - message = String.format(message, args); - } - messager.printMessage(Kind.ERROR, message, e); - } - - private void write(TypeSpec spec) { + protected void write(TypeSpec spec) { try { JavaFile.builder("org.tensorflow.op", spec) .addFileComment(LICENSE) @@ -240,213 +52,8 @@ private void write(TypeSpec spec) { } } - private void writeApi(Multimap groupedMethods) { - // Build tree of *Ops classes that needs to be generated by this processor. The 'Ops' class - // resides at the root of the tree while other classes are nodes. - OpsSpec ops = new OpsSpec(null, null, Names.Ops, groupedMethods.removeAll("")); - Collection groupOps = collectGroupOps(ops, groupedMethods); - - write(buildTopClass(ops)); - groupOps.forEach(g -> write(buildGroupClass(g))); - } - - private boolean collectOpsMethods( - RoundEnvironment roundEnv, - Multimap groupedMethods, - TypeElement annotation) { - boolean result = true; - for (Element e : roundEnv.getElementsAnnotatedWith(annotation)) { - // @Operator can only apply to types, so e must be a TypeElement. - if (!(e instanceof TypeElement)) { - error( - e, - "@Operator can only be applied to classes, but this is a %s", - e.getKind().toString()); - result = false; - continue; - } - collectOpMethods(groupedMethods, (TypeElement) e, annotation); - } - return result; - } - - private void collectOpMethods( - Multimap groupedMethods, TypeElement opClass, TypeElement annotation) { - boolean opClassDeprecated = opClass.getAnnotation(Deprecated.class) != null; - AnnotationMirror operatorAnnot = getAnnotationMirror(opClass, annotation.getQualifiedName()); - if (operatorAnnot == null) { - throw new IllegalArgumentException( - "Annotation " - + annotation.getSimpleName() - + " not present on element " - + opClass.getSimpleName()); - } - String opGroup = getAnnotationElementValueAsString("group", operatorAnnot); - String opName = getAnnotationElementValueAsString("name", operatorAnnot); - if (Strings.isNullOrEmpty(opName)) { - opName = - CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, ClassName.get(opClass).simpleName()); - } - // Build an endpoint for each method annotated with @Endpoint, which takes in parameter a scope - // and, optionally, a list of arguments - for (ExecutableElement opMethod : ElementFilter.methodsIn(opClass.getEnclosedElements())) { - AnnotationMirror endpointAnnot = - getAnnotationMirror(opMethod, elements.getName(Names.Endpoint.toString())); - if (endpointAnnot != null) { - if (!opMethod.getModifiers().containsAll(Arrays.asList(Modifier.STATIC, Modifier.PUBLIC))) { - throw new IllegalArgumentException( - "Endpoint " + opMethod + " of class " + opClass + " must be static and public"); - } - if (opMethod.getParameters().isEmpty() - || !((TypeElement) types.asElement(opMethod.getParameters().get(0).asType())) - .getQualifiedName() - .equals(elements.getName(Names.Scope.toString()))) { - throw new IllegalArgumentException( - "Endpoint " - + opMethod - + " of class " - + opClass - + " must take an instance of " - + Names.Scope - + " as its first parameter"); - } - String endpointGroup = getAnnotationElementValueAsString("group", endpointAnnot); - if (endpointGroup.isEmpty()) { - endpointGroup = opGroup; - } - String endpointName = getAnnotationElementValueAsString("name", endpointAnnot); - if (endpointName.isEmpty()) { - endpointName = opName; - } - boolean describeByClass = - getAnnotationElementValueAsBoolean("describeByClass", endpointAnnot, false); - boolean deprecated = opMethod.getAnnotation(Deprecated.class) != null || opClassDeprecated; - MethodSpec method = - buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); - groupedMethods.put(endpointGroup, method); - } - } - } - - private MethodSpec buildOpMethod( - String methodName, - TypeElement opClass, - ExecutableElement endpointMethod, - boolean describeByClass, - boolean deprecated) { - MethodSpec.Builder builder = - MethodSpec.methodBuilder(methodName) - .addModifiers(Modifier.PUBLIC) - .returns(TypeName.get(endpointMethod.getReturnType())) - .varargs(endpointMethod.isVarArgs()) - .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass)); - - if (deprecated) { - builder.addAnnotation(Deprecated.class); - } - for (TypeParameterElement tp : endpointMethod.getTypeParameters()) { - TypeVariableName tvn = TypeVariableName.get((TypeVariable) tp.asType()); - builder.addTypeVariable(tvn); - } - for (TypeMirror thrownType : endpointMethod.getThrownTypes()) { - builder.addException(TypeName.get(thrownType)); - } - StringBuilder call = new StringBuilder(); - if (!NoType.class.isAssignableFrom(endpointMethod.getReturnType().getClass())) { - call.append("return "); - } - call.append("$T.").append(endpointMethod.getSimpleName()).append("(scope"); - boolean first = true; - for (VariableElement param : endpointMethod.getParameters()) { - ParameterSpec p = ParameterSpec.get(param); - if (first) { - first = false; - continue; - } - call.append(", "); - call.append(p.name); - builder.addParameter(p); - } - call.append(")"); - builder.addStatement(call.toString(), ClassName.get(opClass)); - return builder.build(); - } - - private String buildOpMethodJavadoc( - TypeElement opClass, ExecutableElement endpointMethod, boolean copyClassDescription) { - Javadoc methodJavadoc = parseJavadoc(endpointMethod); - - Javadoc javadoc; - - if (!copyClassDescription) { - javadoc = new Javadoc(methodJavadoc.getDescription()); - } else { - javadoc = parseJavadoc(opClass); - } - - // Copy all endpoint method tags to the description, except for the `scope` parameter which - // will be inferred by the Ops class - methodJavadoc - .getBlockTags() - .forEach( - t -> { - if (!(t.getTagName().equals("param") - && t.getName().map(s -> s.equals("scope")).orElse(false))) { - javadoc.addBlockTag(t); - } - }); - - return javadoc.toText(); - } - - private static Collection collectGroupOps( - OpsSpec ops, Multimap groupedMethods) { - Map groups = new HashMap<>(); - - // The `group` label added in the `@Operator` annotation has the same syntax as a package name, - // which (in most - // case) consists of a simple label but could also be a deeper tree, like `linalg.sparse`. In - // this case, - // the `LinalgSparseOps` group should be added as the `sparse` field of the `LinalgOps` group, - // and the latter - // should be added as the `linalg` field of the `Ops` root class. - groupedMethods - .keys() - .forEach( - group -> { - OpsSpec parentClass = ops; - int startPos = 0; - do { - int delimiterPos = group.indexOf('.', startPos); - String groupName = delimiterPos < 0 ? group : group.substring(0, delimiterPos); - OpsSpec groupOps = groups.get(groupName); - - // Create spec for this group if we have not encountered it yet in our iteration - if (groupOps == null) { - String fieldName = - delimiterPos < 0 - ? group.substring(startPos) - : group.substring(startPos, delimiterPos); - ClassName className = - ClassName.get( - "org.tensorflow.op", - CaseFormat.LOWER_UNDERSCORE.to( - CaseFormat.UPPER_CAMEL, groupName.replace('.', '_')) - + "Ops"); - groupOps = - new OpsSpec(groupName, fieldName, className, groupedMethods.get(groupName)); - parentClass.subGroups.add(groupOps); - groups.put(groupName, groupOps); - } - parentClass = groupOps; - startPos = delimiterPos + 1; - } while (startPos > 0); - }); - - return groups.values(); - } - - private static TypeSpec buildGroupClass(OpsSpec spec) { + @Override + protected TypeSpec buildGroupClass(OpsSpec spec) { // System.out.println("Generating " + spec.className + " class"); MethodSpec.Builder ctorBuilder = @@ -464,7 +71,7 @@ private static TypeSpec buildGroupClass(OpsSpec spec) { spec.groupName, Names.Op, Names.Ops) - .addMethods(spec.methods); + .addMethods(spec.javaMethods()); MethodSpec.Builder opsBuilder = MethodSpec.methodBuilder("ops") @@ -490,7 +97,8 @@ private static TypeSpec buildGroupClass(OpsSpec spec) { return builder.build(); } - private static TypeSpec buildTopClass(OpsSpec spec) { + @Override + protected TypeSpec buildTopClass(OpsSpec spec) { // System.out.println("Generating " + spec.className + " class"); MethodSpec.Builder ctorBuilder = @@ -500,6 +108,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { TypeSpec.Builder opsBuilder = TypeSpec.classBuilder("Ops") + .addSuperinterface(Names.WithOps) .addModifiers(Modifier.PUBLIC, Modifier.FINAL) .addJavadoc( "An API for building operations as {@link $T Op}s\n

\n" @@ -532,22 +141,28 @@ private static TypeSpec buildTopClass(OpsSpec spec) { + "}\n", Names.Op, Names.Operator) - .addMethods(spec.methods); + .addMethods(spec.javaMethods()); addGroupFields(opsBuilder, ctorBuilder, spec.subGroups, true); opsBuilder.addMethod(ctorBuilder.build()); + opsBuilder.addMethod( + MethodSpec.methodBuilder("tf") + .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) + .returns(Names.Ops) + .addStatement("return this") + .build()); + opsBuilder.addMethod( MethodSpec.methodBuilder("withSubScope") .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) .addParameter(Names.String, "childScopeName") .returns(Names.Ops) .addStatement("return new $T(scope.withSubScope(childScopeName))", Names.Ops) - .addJavadoc( - "Returns an API that builds operations with the provided name prefix.\n" - + "\n@see {@link $T#withSubScope(String)}\n", - Names.Scope) + .addJavadoc("{@inheritDoc}") .build()); String initScopeComment = @@ -586,37 +201,31 @@ private static TypeSpec buildTopClass(OpsSpec spec) { opsBuilder.addMethod( MethodSpec.methodBuilder("withName") .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) .addParameter(Names.String, "opName") .returns(Names.Ops) .addStatement("return new Ops(scope.withName(opName))") - .addJavadoc( - "Returns an API that uses the provided name for an op.\n\n" - + "@see {@link $T#withName(String)}\n", - Names.Scope) + .addJavadoc("{@inheritDoc}") .build()); opsBuilder.addMethod( MethodSpec.methodBuilder("withDevice") .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) .addParameter(Names.DeviceSpec, "deviceSpec") .returns(Names.Ops) .addStatement("return new Ops(scope.withDevice(deviceSpec))") - .addJavadoc( - "Returns an API that places the created operations on the device(s) matching the provided spec.\n\n" - + "@see {@link $T#withDevice(DeviceSpec)}\n", - Names.Scope) + .addJavadoc("{@inheritDoc}") .build()); opsBuilder.addMethod( MethodSpec.methodBuilder("withControlDependencies") .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) .addParameter(Names.IterableOp, "controls") .returns(Names.Ops) .addStatement("return new Ops(scope.withControlDependencies(controls))") - .addJavadoc( - "Returns an API that adds operations to the graph with the provided control dependencies.\n\n" - + "@see {@link $T#withControlDependencies(Iterable>)}\n", - Names.Scope) + .addJavadoc("{@inheritDoc}") .build()); opsBuilder.addMethod( @@ -700,6 +309,8 @@ private static void addGroupFields( boolean isTopClass) { groups.forEach( group -> { + System.out.println( + "Adding field in " + classBuilder.build().name + ": " + group.fieldName); classBuilder.addField( FieldSpec.builder(group.className, group.fieldName) .addModifiers(Modifier.PUBLIC, Modifier.FINAL) @@ -712,48 +323,4 @@ private static void addGroupFields( .build(); }); } - - private static AnnotationMirror getAnnotationMirror(Element element, Name annotationName) { - for (AnnotationMirror am : element.getAnnotationMirrors()) { - if (((TypeElement) am.getAnnotationType().asElement()) - .getQualifiedName() - .equals(annotationName)) { - return am; - } - } - return null; - } - - private static AnnotationValue getAnnotationElementValue( - String elementName, AnnotationMirror am) { - for (Map.Entry entry : - am.getElementValues().entrySet()) { - if (entry.getKey().getSimpleName().contentEquals(elementName)) { - return entry.getValue(); - } - } - return null; - } - - private static String getAnnotationElementValueAsString(String elementName, AnnotationMirror am) { - AnnotationValue value = getAnnotationElementValue(elementName, am); - return value != null ? value.getValue().toString() : ""; - } - - private static boolean getAnnotationElementValueAsBoolean( - String elementName, AnnotationMirror am, boolean defaultValue) { - AnnotationValue value = getAnnotationElementValue(elementName, am); - return value != null ? Boolean.parseBoolean(value.toString()) : defaultValue; - } - - private Javadoc parseJavadoc(Element element) { - String docComment = elements.getDocComment(element); - JavadocComment javadocComment; - if (docComment != null) { - javadocComment = new JavadocComment(docComment); - } else { - javadocComment = new JavadocComment(); - } - return javadocComment.parse(); - } } diff --git a/tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml b/tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml index 812a53d129b..fed50858f48 100644 --- a/tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml +++ b/tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-core-platform-mkl-gpu TensorFlow Core API Library Platform MKL GPU diff --git a/tensorflow-core/tensorflow-core-platform-mkl/pom.xml b/tensorflow-core/tensorflow-core-platform-mkl/pom.xml index 9800ff1cb95..0c855068865 100644 --- a/tensorflow-core/tensorflow-core-platform-mkl/pom.xml +++ b/tensorflow-core/tensorflow-core-platform-mkl/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-core-platform-mkl TensorFlow Core API Library Platform MKL diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java index ea73f764a38..e258330df70 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java @@ -66,8 +66,8 @@ public Operand call(Ops tf, Operand dims, Class type) { if (shape.numDimensions() != 2) { throw new IllegalArgumentException("2D matrix required, got " + shape.numDimensions()); } - boolean isSquare = shape.size(0) == shape.size(1); - long diagSize = Math.min(shape.size(0), shape.size(1)); + boolean isSquare = shape.get(0) == shape.get(1); + long diagSize = Math.min(shape.get(0), shape.get(1)); Shape diagShape = Shape.of(diagSize); Operand op; @@ -79,8 +79,8 @@ public Operand call(Ops tf, Operand dims, Class type) { tf.linalg.matrixDiag( diagOnes, tf.constant(0), // don't cast here, expecting TInt32 - tf.constant((int) shape.size(0)), - tf.constant((int) shape.size(1)), + tf.constant((int) shape.get(0)), + tf.constant((int) shape.get(1)), zero); } else { Operand zeroMatrix = tf.zeros(dims, type); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java index 240d915f97f..a24b791fd47 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java @@ -91,8 +91,8 @@ public Operand call(Ops tf, Operand dims, Class type) { } long numRows = 1; int i = 0; - for (; i < dimsShape.numDimensions() - 1; i++) numRows *= dimsShape.size(i); - long numCols = dimsShape.size(i); + for (; i < dimsShape.numDimensions() - 1; i++) numRows *= dimsShape.get(i); + long numCols = dimsShape.get(i); Shape flatShape = Shape.of(Math.max(numRows, numCols), Math.min(numRows, numCols)); long[] seeds = {seed, 0}; Operand op = diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index d23059b88fd..f01ce2e75e0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -572,7 +572,7 @@ public static Operand sparseCategoricalCrossentropy( tf.reshape( predictions, tf.constant( - new long[] {-1L, predictionsShape.size(predictionsShape.numDimensions() - 1)})); + new long[] {-1L, predictionsShape.get(predictionsShape.numDimensions() - 1)})); } Operand loss = ftf.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); @@ -648,7 +648,7 @@ private static Operand smoothCategoricalLabels( Operand smoothing = cast(tf, tf.constant(labelSmoothing), labelType); Shape labelsShape = labels.shape(); int numDims = labelsShape.numDimensions(); - Operand numClasses = cast(tf, tf.constant(labelsShape.size(numDims - 1)), labelType); + Operand numClasses = cast(tf, tf.constant(labelsShape.get(numDims - 1)), labelType); Operand oneMinusSmoothing = cast(tf, tf.constant(1.f - labelSmoothing), labelType); return tf.math.add(tf.math.mul(labels, oneMinusSmoothing), tf.math.div(smoothing, numClasses)); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java index f6b0de71b0d..11c838277a4 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java @@ -14,6 +14,10 @@ =======================================================================*/ package org.tensorflow.framework.losses.impl; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.Arrays; +import java.util.Collections; import org.tensorflow.Operand; import org.tensorflow.framework.losses.Reduction; import org.tensorflow.ndarray.Shape; @@ -26,11 +30,6 @@ import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; -import java.util.Arrays; -import java.util.Collections; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * These are helper methods for Losses and Metrics and will be module private when Java modularity * is applied to TensorFlow Java. These methods should not be used outside of the losses and metrics @@ -101,7 +100,7 @@ public static LossTuple squeezeOrExpandDimensions( long labelsRank = labelsShape.numDimensions(); if (labelsRank != Shape.UNKNOWN_SIZE && predictionsRank != Shape.UNKNOWN_SIZE) { // Use static rank for 'label' and 'prediction'. - if (predictionsRank - labelsRank != 1 || predictionsShape.size(-1) == 1) { + if (predictionsRank - labelsRank != 1 || predictionsShape.get(-1) == 1) { lossTuple = removeSqueezableDimensions(tf, labels, predictions); } } else { // use dynamic rank @@ -213,9 +212,9 @@ public static LossTuple removeSqueezableDimensions( if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { // Use static rank. int rankDiff = predictionsRank - labelsRank; - if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { + if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.get(-1), 1)) { predictions = tf.squeeze(predictions); - } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.size(-1), 1)) { + } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.get(-1), 1)) { labels = tf.squeeze(labels); } return new LossTuple<>(labels, predictions); @@ -224,7 +223,7 @@ public static LossTuple removeSqueezableDimensions( // TODO: hold for lazy select feature, // Operand rankDiff = tf.math.sub(tf.rank(predictions), tf.rank(labels)); - if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.size(-1), 1)) { + if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.get(-1), 1)) { /* * TODO, if we ever get a select that does lazy evaluation, but for now do the tf.squeeze * predictions = tf.select( tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), @@ -232,7 +231,7 @@ public static LossTuple removeSqueezableDimensions( */ predictions = tf.squeeze(predictions, Squeeze.axis(Collections.singletonList(-1L))); } - if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.size(-1), 1)) { + if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.get(-1), 1)) { /* * TODO, if we ever get a select that does lazy evaluation labels = tf.select( * tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), tf.squeeze(labels, diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index 70a81da8d1e..d9e96081233 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -110,8 +110,8 @@ public static Op assertBroadcastable( } for (int i = 0; i < valuesRankStatic; i++) { - if (valuesShapeStatic.size(i) != weightsShapeStatic.size(i) - && weightsShapeStatic.size(i) != 1) { + if (valuesShapeStatic.get(i) != weightsShapeStatic.get(i) + && weightsShapeStatic.get(i) != 1) { throw new NotBroadcastableException( String.format( "%s Mismatch at dim %d. values.shape=%s weights.shape=%s.", diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index b1e2ce6c928..8bcd38bb7d6 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -97,8 +97,8 @@ public static Operand sigmoidCrossEntropyWithLogits( private static boolean isCompatible(Shape shape, Shape other) { if (shape.numDimensions() != other.numDimensions()) return false; for (int i = 0; i < shape.numDimensions(); i++) { - long aShapeDim = shape.size(i); - long bShapeDim = other.size(i); + long aShapeDim = shape.get(i); + long bShapeDim = other.get(i); if (aShapeDim == bShapeDim || (aShapeDim == Shape.UNKNOWN_SIZE || bShapeDim == Shape.UNKNOWN_SIZE)) { continue; diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index a95110c9a96..5e3ed52a220 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -21,6 +21,7 @@ import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; +@Operator(group = "nn") public class SoftmaxCrossEntropyWithLogits { /** @@ -137,10 +138,10 @@ public static Operand softmaxCrossEntr axis = shape.numDimensions() + axis; } for (int i = 0; i < axis; i++) { - newArray[i] = shape.size(i); + newArray[i] = shape.get(i); } for (int i = axis + 1; i < shape.numDimensions(); i++) { - newArray[i - 1] = shape.size(i); + newArray[i - 1] = shape.get(i); } cost = Reshape.create(scope, cost, Constant.vectorOf(scope, newArray)); } @@ -165,7 +166,7 @@ private static Operand flattenOuterDims(Scope scope, Oper long product = 1L; boolean productValid = true; for (int i = ndims - 2; i >= 0; i--) { - long d = shape.size(i); + long d = shape.get(i); if (d == Shape.UNKNOWN_SIZE) { productValid = false; break; @@ -173,7 +174,7 @@ private static Operand flattenOuterDims(Scope scope, Oper product *= d; } if (productValid) { - return Reshape.create(scope, logits, Constant.arrayOf(scope, product, shape.size(-1))); + return Reshape.create(scope, logits, Constant.arrayOf(scope, product, shape.get(-1))); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 5299efcce22..3c196641878 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -19,6 +19,7 @@ import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; +@Operator(group = "nn") public class SparseSoftmaxCrossEntropyWithLogits { /** @@ -139,7 +140,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( } // Reshape logits to 2 dims, labels to 1 dim. - long numClassses = logitsShape.size(-1); + long numClassses = logitsShape.get(-1); preciseLogits = Reshape.create(scope, preciseLogits, Constant.arrayOf(scope, -1L, numClassses)); labels = Reshape.create(scope, labels, Constant.scalarOf(scope, -1)); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java index c0c0f12fbf9..f9842e628a0 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java @@ -14,12 +14,11 @@ =======================================================================*/ package org.tensorflow.framework.utils; -import org.tensorflow.ndarray.*; - import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import org.tensorflow.ndarray.*; // TODO used in the Callbacks, this should be a part of NDArray? @@ -75,7 +74,7 @@ private static long[] getCoordinates(Shape shape, long index) { int numDims = shape.numDimensions(); int i = numDims - 1; for (; i >= 0; i--) { - long size = shape.size(i); + long size = shape.get(i); long mod = index % size; coordinates[i] = mod; index -= mod; @@ -676,7 +675,7 @@ public static FloatNdArray sum(FloatNdArray a, int axis, boolean keepDims) { int nDims = shape.numDimensions(); int xis = nDims - 1 - axis; long totalSize = shape.size(); - long axisSize = shape.size(xis); + long axisSize = shape.get(xis); final float[] sums = new float[(int) axisSize]; a.scalars() @@ -767,7 +766,7 @@ public static DoubleNdArray sum(DoubleNdArray a, int axis, boolean keepDims) { int nDims = shape.numDimensions(); int xis = nDims - 1 - axis; long totalSize = shape.size(); - long axisSize = shape.size(xis); + long axisSize = shape.get(xis); final double[] sums = new double[(int) axisSize]; a.scalars() diff --git a/tensorflow-kotlin-parent/.editorconfig b/tensorflow-kotlin-parent/.editorconfig new file mode 100644 index 00000000000..c5d853001f9 --- /dev/null +++ b/tensorflow-kotlin-parent/.editorconfig @@ -0,0 +1,93 @@ +# This .editorconfig section approximates ktfmt's formatting rules. You can include it in an +# existing .editorconfig file or use it standalone by copying it to /.editorconfig +# and making sure your editor is set to read settings from .editorconfig files. +# +# It includes editor-specific config options for IntelliJ IDEA. +# +# If any option is wrong, PR are welcome + +[{*.kt,*.kts}] +indent_style = space +insert_final_newline = true +max_line_length = 100 +indent_size = 2 +ij_continuation_indent_size = 4 +ij_java_names_count_to_use_import_on_demand = 9999 +ij_kotlin_align_in_columns_case_branch = false +ij_kotlin_align_multiline_binary_operation = false +ij_kotlin_align_multiline_extends_list = false +ij_kotlin_align_multiline_method_parentheses = false +ij_kotlin_align_multiline_parameters = true +ij_kotlin_align_multiline_parameters_in_calls = false +ij_kotlin_allow_trailing_comma = true +ij_kotlin_allow_trailing_comma_on_call_site = true +ij_kotlin_assignment_wrap = normal +ij_kotlin_blank_lines_after_class_header = 0 +ij_kotlin_blank_lines_around_block_when_branches = 0 +ij_kotlin_blank_lines_before_declaration_with_comment_or_annotation_on_separate_line = 1 +ij_kotlin_block_comment_at_first_column = true +ij_kotlin_call_parameters_new_line_after_left_paren = true +ij_kotlin_call_parameters_right_paren_on_new_line = false +ij_kotlin_call_parameters_wrap = on_every_item +ij_kotlin_catch_on_new_line = false +ij_kotlin_class_annotation_wrap = split_into_lines +ij_kotlin_code_style_defaults = KOTLIN_OFFICIAL +ij_kotlin_continuation_indent_for_chained_calls = true +ij_kotlin_continuation_indent_for_expression_bodies = true +ij_kotlin_continuation_indent_in_argument_lists = true +ij_kotlin_continuation_indent_in_elvis = false +ij_kotlin_continuation_indent_in_if_conditions = false +ij_kotlin_continuation_indent_in_parameter_lists = false +ij_kotlin_continuation_indent_in_supertype_lists = false +ij_kotlin_else_on_new_line = false +ij_kotlin_enum_constants_wrap = off +ij_kotlin_extends_list_wrap = normal +ij_kotlin_field_annotation_wrap = split_into_lines +ij_kotlin_finally_on_new_line = false +ij_kotlin_if_rparen_on_new_line = false +ij_kotlin_import_nested_classes = false +ij_kotlin_insert_whitespaces_in_simple_one_line_method = true +ij_kotlin_keep_blank_lines_before_right_brace = 2 +ij_kotlin_keep_blank_lines_in_code = 2 +ij_kotlin_keep_blank_lines_in_declarations = 2 +ij_kotlin_keep_first_column_comment = true +ij_kotlin_keep_indents_on_empty_lines = false +ij_kotlin_keep_line_breaks = true +ij_kotlin_lbrace_on_next_line = false +ij_kotlin_line_comment_add_space = false +ij_kotlin_line_comment_at_first_column = true +ij_kotlin_method_annotation_wrap = split_into_lines +ij_kotlin_method_call_chain_wrap = normal +ij_kotlin_method_parameters_new_line_after_left_paren = true +ij_kotlin_method_parameters_right_paren_on_new_line = true +ij_kotlin_method_parameters_wrap = on_every_item +ij_kotlin_name_count_to_use_star_import = 9999 +ij_kotlin_name_count_to_use_star_import_for_members = 9999 +ij_kotlin_parameter_annotation_wrap = off +ij_kotlin_space_after_comma = true +ij_kotlin_space_after_extend_colon = true +ij_kotlin_space_after_type_colon = true +ij_kotlin_space_before_catch_parentheses = true +ij_kotlin_space_before_comma = false +ij_kotlin_space_before_extend_colon = true +ij_kotlin_space_before_for_parentheses = true +ij_kotlin_space_before_if_parentheses = true +ij_kotlin_space_before_lambda_arrow = true +ij_kotlin_space_before_type_colon = false +ij_kotlin_space_before_when_parentheses = true +ij_kotlin_space_before_while_parentheses = true +ij_kotlin_spaces_around_additive_operators = true +ij_kotlin_spaces_around_assignment_operators = true +ij_kotlin_spaces_around_equality_operators = true +ij_kotlin_spaces_around_function_type_arrow = true +ij_kotlin_spaces_around_logical_operators = true +ij_kotlin_spaces_around_multiplicative_operators = true +ij_kotlin_spaces_around_range = false +ij_kotlin_spaces_around_relational_operators = true +ij_kotlin_spaces_around_unary_operator = false +ij_kotlin_spaces_around_when_arrow = true +ij_kotlin_variable_annotation_wrap = off +ij_kotlin_while_on_new_line = false +ij_kotlin_wrap_elvis_expressions = 1 +ij_kotlin_wrap_expression_body_functions = 1 +ij_kotlin_wrap_first_method_in_call_chain = false \ No newline at end of file diff --git a/tensorflow-kotlin-parent/README.md b/tensorflow-kotlin-parent/README.md new file mode 100644 index 00000000000..c2c15eebf00 --- /dev/null +++ b/tensorflow-kotlin-parent/README.md @@ -0,0 +1,7 @@ +# Kotlin API + +This is the home of the Kotlin API for TensorFlow Java. The API lives in `tensorflow-core-api`, and uses the annotation processor in `tensorflow-core-generator`. + +There is no framework wrapper yet, as most of the framework classes work fine from Kotlin, but if there is a need one could be addded. + +For contributing guidelines, see [CONTRIBUTING.md](../CONTRIBUTING.md#kotlin-api). diff --git a/tensorflow-kotlin-parent/pom.xml b/tensorflow-kotlin-parent/pom.xml new file mode 100644 index 00000000000..a4997623eb5 --- /dev/null +++ b/tensorflow-kotlin-parent/pom.xml @@ -0,0 +1,107 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-java + 0.5.0-SNAPSHOT + + tensorflow-kotlin-parent + pom + + TensorFlow Kotlin Parent + Parent POM of TensorFlow Kotlin artifacts + + + tensorflow-kotlin-generator + tensorflow-core-kotlin + tensorflow-framework-kotlin + tensorflow-kotlin + tensorflow-kotlin-jupyter + tensorflow-core-kotlin-jupyter + + + + + org.jetbrains.kotlin + kotlin-stdlib-jdk8 + ${kotlin.version} + + + + + 1.6.10 + 0.11.0-40 + 0.30 + 1.8 + + + + + jdk11 + + 11 + + + + + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + ${kotlin.jvmTarget} + + + + + compile + + compile + + + + + test-compile + + test-compile + + + + + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + + + ${ktfmt.version} + + + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml new file mode 100644 index 00000000000..1207e14aae6 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml @@ -0,0 +1,78 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-kotlin-parent + 0.5.0-SNAPSHOT + + tensorflow-core-kotlin-jupyter + jar + + TensorFlow Core Kotlin Jupyter Integration + Kotlin Jupyter integration for tensorflow-core + + + + ${project.version} + + + + + org.jetbrains.kotlinx + kotlin-jupyter-api + ${kotlin_jupyter.version} + + + org.tensorflow + tensorflow-core-kotlin + ${project.version} + + + + + ${project.basedir}/src/main/kotlin + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + + + compile + + compile + + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt new file mode 100644 index 00000000000..b1219be2b9f --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt @@ -0,0 +1,44 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.jupyter + +import org.jetbrains.kotlinx.jupyter.api.declare +import org.jetbrains.kotlinx.jupyter.api.libraries.JupyterIntegration +import org.tensorflow.EagerSession +import org.tensorflow.Operand +import org.tensorflow.op.Op +import org.tensorflow.op.kotlin.tf + +public class TensorflowKotlinCoreIntegration : JupyterIntegration() { + override fun Builder.onLoaded() { + import( + "org.tensorflow.*", + "org.tensorflow.op.*", + "org.tensorflow.op.kotlin.*", + "org.tensorflow.types.*", + "org.tensorflow.types.family.*", + "org.tensorflow.ndarray.*", + "org.tensorflow.ndarray.index.*") + + render> { it.asOutput().toString() } + render { it.op().toString() } + + // TODO add a implicit receiver of EagerSession.getDefault() instead + onLoaded { declare("tf" to EagerSession.getDefault().tf) } + } +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json new file mode 100644 index 00000000000..54d29d383b3 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json @@ -0,0 +1,6 @@ +{ + "definitions":[], + "producers": [ + { "fqn" : "org.tensorflow.jupyter.TensorflowKotlinCoreIntegration" } + ] +} \ No newline at end of file diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml new file mode 100644 index 00000000000..7ff643f2662 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml @@ -0,0 +1,163 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-kotlin-parent + 0.5.0-SNAPSHOT + + tensorflow-core-kotlin + jar + + TensorFlow Core Kotlin API Library + Kotlin API wrappers for the TensorFlow core Java library + + + + + + + + org.tensorflow + tensorflow-core-api + ${project.version} + + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.openjdk.jmh + jmh-core + test + + + org.openjdk.jmh + jmh-generator-annprocess + test + + + org.jetbrains.kotlin + kotlin-test-junit5 + ${kotlin.version} + test + + + + org.tensorflow + tensorflow-core-platform${javacpp.platform.extension} + ${project.version} + test + + + + + ${project.basedir}/src/main/kotlin + ${project.basedir}/src/test/kotlin + + + org.codehaus.mojo + build-helper-maven-plugin + 3.0.0 + + + + add-gen-sources + generate-sources + + add-source + + + + ${project.basedir}/src/gen/annotations + + + + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + + + compile + + compile + + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + + + kapt + + kapt + + + + ${project.basedir}/src/main/kotlin + ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/gen/java + ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/gen/annotations + ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/main/java + + + org.tensorflow.processor.operator.KotlinOpsProcessor + + + + org.tensorflow + tensorflow-kotlin-generator + ${project.version} + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.22.2 + + + + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt new file mode 100644 index 00000000000..00608480fde --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -0,0 +1,221 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.audio.AudioSpectrogram +import org.tensorflow.op.audio.DecodeWav +import org.tensorflow.op.audio.EncodeWav +import org.tensorflow.op.audio.Mfcc +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TString + +/** + * An API for building `audio` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class AudioOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.AudioOps = ops.java.audio + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Produces a visualization of audio data over time. + * Spectrograms are a standard way of representing audio information as a series of + * slices of frequency information, one slice for each window of time. By joining + * these together into a sequence, they form a distinctive fingerprint of the sound + * over time. + * + * This op expects to receive audio data as an input, stored as floats in the range + * -1 to 1, together with a window width in samples, and a stride specifying how + * far to move the window between slices. From this it generates a three + * dimensional output. The first dimension is for the channels in the input, so a + * stereo audio input would have two here for example. The second dimension is time, + * with successive frequency slices. The third dimension has an amplitude value for + * each frequency during that time slice. + * + * This means the layout when converted and saved as an image is rotated 90 degrees + * clockwise from a typical spectrogram. Time is descending down the Y axis, and + * the frequency decreases from left to right. + * + * Each value in the result represents the square root of the sum of the real and + * imaginary parts of an FFT on the current window of samples. In this way, the + * lowest dimension represents the power of each frequency in the current window, + * and adjacent windows are concatenated in the next dimension. + * + * To get a more intuitive and visual look at what this operation does, you can run + * tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the + * resulting spectrogram as a PNG image. + * + * @param input Float representation of audio data. + * @param windowSize How wide the input window is in samples. For the highest efficiency + * this should be a power of two, but other values are accepted. + * @param stride How widely apart the center of adjacent sample windows should be. + * @param options carries optional attribute values + * @return a new instance of AudioSpectrogram + * @see org.tensorflow.op.AudioOps.audioSpectrogram + * @param magnitudeSquared Sets the magnitudeSquared option. + * + * @param magnitudeSquared Whether to return the squared magnitude or just the + * magnitude. Using squared magnitude can avoid extra calculations. + * @return this Options instance. + */ + public fun audioSpectrogram( + input: Operand, + windowSize: Long, + stride: Long, + magnitudeSquared: Boolean? = null + ): AudioSpectrogram = java.audioSpectrogram( + input, + windowSize, + stride, + *listOfNotNull( + magnitudeSquared?.let{ org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } + ).toTypedArray() + ) + + /** + * Decode a 16-bit PCM WAV file to a float tensor. + * The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. + * + * When desired_channels is set, if the input contains fewer channels than this + * then the last channel will be duplicated to give the requested number, else if + * the input has more channels than requested then the additional channels will be + * ignored. + * + * If desired_samples is set, then the audio will be cropped or padded with zeroes + * to the requested length. + * + * The first output contains a Tensor with the content of the audio samples. The + * lowest dimension will be the number of channels, and the second will be the + * number of samples. For example, a ten-sample-long stereo WAV file should give an + * output shape of [10, 2]. + * + * @param contents The WAV-encoded audio, usually from a file. + * @param options carries optional attribute values + * @return a new instance of DecodeWav + * @see org.tensorflow.op.AudioOps.decodeWav + * @param desiredChannels Sets the desiredChannels option. + * + * @param desiredChannels Number of sample channels wanted. + * @return this Options instance. + * @param desiredSamples Sets the desiredSamples option. + * + * @param desiredSamples Length of audio requested. + * @return this Options instance. + */ + public fun decodeWav( + contents: Operand, + desiredChannels: Long? = null, + desiredSamples: Long? = null + ): DecodeWav = java.decodeWav( + contents, + *listOfNotNull( + desiredChannels?.let{ org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, + desiredSamples?.let{ org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } + ).toTypedArray() + ) + + /** + * Encode audio data using the WAV file format. + * This operation will generate a string suitable to be saved out to create a .wav + * audio file. It will be encoded in the 16-bit PCM format. It takes in float + * values in the range -1.0f to 1.0f, and any outside that value will be clamped to + * that range. + * + * `audio` is a 2-D float Tensor of shape `[length, channels]`. + * `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). + * + * @param audio 2-D with shape `[length, channels]`. + * @param sampleRate Scalar containing the sample frequency. + * @return a new instance of EncodeWav + * @see org.tensorflow.op.AudioOps.encodeWav + */ + public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = + java.encodeWav( + audio, + sampleRate + ) + + /** + * Transforms a spectrogram into a form that's useful for speech recognition. + * Mel Frequency Cepstral Coefficients are a way of representing audio data that's + * been effective as an input feature for machine learning. They are created by + * taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the + * higher frequencies that are less significant to the human ear. They have a long + * history in the speech recognition world, and + * https://en.wikipedia.org/wiki/Mel-frequency_cepstrum + * is a good resource to learn more. + * + * @param spectrogram Typically produced by the Spectrogram op, with magnitude_squared + * set to true. + * @param sampleRate How many samples per second the source audio used. + * @param options carries optional attribute values + * @return a new instance of Mfcc + * @see org.tensorflow.op.AudioOps.mfcc + * @param upperFrequencyLimit Sets the upperFrequencyLimit option. + * + * @param upperFrequencyLimit The highest frequency to use when calculating the + * ceptstrum. + * @return this Options instance. + * @param lowerFrequencyLimit Sets the lowerFrequencyLimit option. + * + * @param lowerFrequencyLimit The lowest frequency to use when calculating the + * ceptstrum. + * @return this Options instance. + * @param filterbankChannelCount Sets the filterbankChannelCount option. + * + * @param filterbankChannelCount Resolution of the Mel bank used internally. + * @return this Options instance. + * @param dctCoefficientCount Sets the dctCoefficientCount option. + * + * @param dctCoefficientCount How many output channels to produce per time slice. + * @return this Options instance. + */ + public fun mfcc( + spectrogram: Operand, + sampleRate: Operand, + upperFrequencyLimit: Float? = null, + lowerFrequencyLimit: Float? = null, + filterbankChannelCount: Long? = null, + dctCoefficientCount: Long? = null + ): Mfcc = java.mfcc( + spectrogram, + sampleRate, + *listOfNotNull( + upperFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, + lowerFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, + filterbankChannelCount?.let{ org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, + dctCoefficientCount?.let{ org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } + ).toTypedArray() + ) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt new file mode 100644 index 00000000000..2ad2d734c3f --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -0,0 +1,302 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.bitwise.BitwiseAnd +import org.tensorflow.op.bitwise.BitwiseOr +import org.tensorflow.op.bitwise.BitwiseXor +import org.tensorflow.op.bitwise.Invert +import org.tensorflow.op.bitwise.LeftShift +import org.tensorflow.op.bitwise.RightShift +import org.tensorflow.types.family.TNumber + +/** + * An API for building `bitwise` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class BitwiseOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Elementwise computes the bitwise AND of `x` and `y`. + * The result will have those bits set, that are set in both `x` and `y`. The + * computation is performed on the underlying representations of `x` and `y`. + * + * For example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * tf.uint8, tf.uint16, tf.uint32, tf.uint64] + * + * for dtype in dtype_list: + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) + * + * res = bitwise_ops.bitwise_and(lhs, rhs) + * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + * + * ``` + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `BitwiseAnd` output and operands + * @return a new instance of BitwiseAnd + * @see org.tensorflow.op.BitwiseOps.bitwiseAnd + */ + public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = + java.bitwiseAnd( + x, + y + ) + + /** + * Elementwise computes the bitwise OR of `x` and `y`. + * The result will have those bits set, that are set in `x`, `y` or both. The + * computation is performed on the underlying representations of `x` and `y`. + * + * For example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * tf.uint8, tf.uint16, tf.uint32, tf.uint64] + * + * for dtype in dtype_list: + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) + * + * res = bitwise_ops.bitwise_or(lhs, rhs) + * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + * + * ``` + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `BitwiseOr` output and operands + * @return a new instance of BitwiseOr + * @see org.tensorflow.op.BitwiseOps.bitwiseOr + */ + public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = + java.bitwiseOr( + x, + y + ) + + /** + * Elementwise computes the bitwise XOR of `x` and `y`. + * The result will have those bits set, that are different in `x` and `y`. The + * computation is performed on the underlying representations of `x` and `y`. + * + * For example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * tf.uint8, tf.uint16, tf.uint32, tf.uint64] + * + * for dtype in dtype_list: + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) + * + * res = bitwise_ops.bitwise_xor(lhs, rhs) + * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + * + * ``` + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `BitwiseXor` output and operands + * @return a new instance of BitwiseXor + * @see org.tensorflow.op.BitwiseOps.bitwiseXor + */ + public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = + java.bitwiseXor( + x, + y + ) + + /** + * Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes + * 10101010. + * Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 + * becomes (decimal -3) binary 11111101. + * This operation is performed on each element of the tensor argument `x`. + * + * Example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * + * # flip 2 (00000010) to -3 (11111101) + * tf.assert_equal(-3, bitwise_ops.invert(2)) + * + * dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, + * dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] + * + * inputs = [0, 5, 3, 14] + * for dtype in dtype_list: + * # Because of issues with negative numbers, let's test this indirectly. + * # 1. invert(a) and a = 0 + * # 2. invert(a) or a = invert(0) + * input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) + * not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( + * input_tensor, bitwise_ops.invert(input_tensor)), + * bitwise_ops.bitwise_or( + * input_tensor, bitwise_ops.invert(input_tensor)), + * bitwise_ops.invert( + * tf.constant(0, dtype=dtype))] + * + * expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) + * tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) + * + * expected = tf.cast([not_0] * 4, tf.float32) + * tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) + * + * # For unsigned dtypes let's also check the result directly. + * if dtype.is_unsigned: + * inverted = bitwise_ops.invert(input_tensor) + * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) + * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Invert` output and operands + * @return a new instance of Invert + * @see org.tensorflow.op.BitwiseOps.invert + */ + public fun invert(x: Operand): Invert = java.invert( + x + ) + + /** + * Elementwise computes the bitwise left-shift of `x` and `y`. + * If `y` is negative, or greater than or equal to the width of `x` in bits the + * result is implementation defined. + * + * Example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * import numpy as np + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + * + * for dtype in dtype_list: + * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * + * left_shift_result = bitwise_ops.left_shift(lhs, rhs) + * + * print(left_shift_result) + * + * # This will print: + * # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) + * + * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + * bitwise_ops.left_shift(lhs, rhs) + * # + * + * ``` + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `LeftShift` output and operands + * @return a new instance of LeftShift + * @see org.tensorflow.op.BitwiseOps.leftShift + */ + public fun leftShift(x: Operand, y: Operand): LeftShift = + java.leftShift( + x, + y + ) + + /** + * Elementwise computes the bitwise right-shift of `x` and `y`. + * Performs a logical shift for unsigned integer types, and an arithmetic shift + * for signed integer types. + * + * If `y` is negative, or greater than or equal to than the width of `x` in bits + * the result is implementation defined. + * + * Example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * import numpy as np + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + * + * for dtype in dtype_list: + * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * + * right_shift_result = bitwise_ops.right_shift(lhs, rhs) + * + * print(right_shift_result) + * + * # This will print: + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) + * + * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + * bitwise_ops.right_shift(lhs, rhs) + * # + * + * ``` + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `RightShift` output and operands + * @return a new instance of RightShift + * @see org.tensorflow.op.BitwiseOps.rightShift + */ + public fun rightShift(x: Operand, y: Operand): RightShift = + java.rightShift( + x, + y + ) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt new file mode 100644 index 00000000000..6c911cfe33a --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -0,0 +1,3341 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import org.tensorflow.ConcreteFunction +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.`data`.AnonymousIterator +import org.tensorflow.op.`data`.AssertCardinalityDataset +import org.tensorflow.op.`data`.AssertNextDataset +import org.tensorflow.op.`data`.AutoShardDataset +import org.tensorflow.op.`data`.BatchDataset +import org.tensorflow.op.`data`.BytesProducedStatsDataset +import org.tensorflow.op.`data`.CSVDataset +import org.tensorflow.op.`data`.CacheDataset +import org.tensorflow.op.`data`.ChooseFastestBranchDataset +import org.tensorflow.op.`data`.ChooseFastestDataset +import org.tensorflow.op.`data`.ConcatenateDataset +import org.tensorflow.op.`data`.DataServiceDatasetV2 +import org.tensorflow.op.`data`.DatasetCardinality +import org.tensorflow.op.`data`.DatasetFromGraph +import org.tensorflow.op.`data`.DatasetToGraph +import org.tensorflow.op.`data`.DatasetToSingleElement +import org.tensorflow.op.`data`.DatasetToTfRecord +import org.tensorflow.op.`data`.DeleteIterator +import org.tensorflow.op.`data`.DenseToSparseBatchDataset +import org.tensorflow.op.`data`.DeserializeIterator +import org.tensorflow.op.`data`.DirectedInterleaveDataset +import org.tensorflow.op.`data`.FilterByLastComponentDataset +import org.tensorflow.op.`data`.FilterDataset +import org.tensorflow.op.`data`.FinalizeDataset +import org.tensorflow.op.`data`.FixedLengthRecordDataset +import org.tensorflow.op.`data`.FlatMapDataset +import org.tensorflow.op.`data`.GeneratorDataset +import org.tensorflow.op.`data`.GroupByReducerDataset +import org.tensorflow.op.`data`.GroupByWindowDataset +import org.tensorflow.op.`data`.IgnoreErrorsDataset +import org.tensorflow.op.`data`.InitializeTableFromDataset +import org.tensorflow.op.`data`.InterleaveDataset +import org.tensorflow.op.`data`.Iterator +import org.tensorflow.op.`data`.IteratorGetNext +import org.tensorflow.op.`data`.IteratorGetNextAsOptional +import org.tensorflow.op.`data`.IteratorGetNextSync +import org.tensorflow.op.`data`.IteratorToStringHandle +import org.tensorflow.op.`data`.LMDBDataset +import org.tensorflow.op.`data`.LatencyStatsDataset +import org.tensorflow.op.`data`.LegacyParallelInterleaveDataset +import org.tensorflow.op.`data`.LoadDataset +import org.tensorflow.op.`data`.MakeIterator +import org.tensorflow.op.`data`.MapAndBatchDataset +import org.tensorflow.op.`data`.MapDataset +import org.tensorflow.op.`data`.MatchingFilesDataset +import org.tensorflow.op.`data`.MaxIntraOpParallelismDataset +import org.tensorflow.op.`data`.ModelDataset +import org.tensorflow.op.`data`.NonSerializableDataset +import org.tensorflow.op.`data`.OneShotIterator +import org.tensorflow.op.`data`.OptimizeDataset +import org.tensorflow.op.`data`.OptionalFromValue +import org.tensorflow.op.`data`.OptionalGetValue +import org.tensorflow.op.`data`.OptionalHasValue +import org.tensorflow.op.`data`.OptionalNone +import org.tensorflow.op.`data`.OptionsDataset +import org.tensorflow.op.`data`.PaddedBatchDataset +import org.tensorflow.op.`data`.ParallelBatchDataset +import org.tensorflow.op.`data`.ParallelInterleaveDataset +import org.tensorflow.op.`data`.ParallelMapDataset +import org.tensorflow.op.`data`.ParseExampleDataset +import org.tensorflow.op.`data`.PrefetchDataset +import org.tensorflow.op.`data`.PrivateThreadPoolDataset +import org.tensorflow.op.`data`.RandomDataset +import org.tensorflow.op.`data`.RangeDataset +import org.tensorflow.op.`data`.RebatchDatasetV2 +import org.tensorflow.op.`data`.ReduceDataset +import org.tensorflow.op.`data`.RegisterDataset +import org.tensorflow.op.`data`.RepeatDataset +import org.tensorflow.op.`data`.SamplingDataset +import org.tensorflow.op.`data`.SaveDataset +import org.tensorflow.op.`data`.ScanDataset +import org.tensorflow.op.`data`.SerializeIterator +import org.tensorflow.op.`data`.SetStatsAggregatorDataset +import org.tensorflow.op.`data`.ShardDataset +import org.tensorflow.op.`data`.ShuffleAndRepeatDataset +import org.tensorflow.op.`data`.ShuffleDataset +import org.tensorflow.op.`data`.SkipDataset +import org.tensorflow.op.`data`.SleepDataset +import org.tensorflow.op.`data`.SlidingWindowDataset +import org.tensorflow.op.`data`.SnapshotDataset +import org.tensorflow.op.`data`.SparseTensorSliceDataset +import org.tensorflow.op.`data`.SqlDataset +import org.tensorflow.op.`data`.TakeDataset +import org.tensorflow.op.`data`.TakeWhileDataset +import org.tensorflow.op.`data`.TensorDataset +import org.tensorflow.op.`data`.TensorSliceDataset +import org.tensorflow.op.`data`.TextLineDataset +import org.tensorflow.op.`data`.TfRecordDataset +import org.tensorflow.op.`data`.ThreadPoolDataset +import org.tensorflow.op.`data`.UnbatchDataset +import org.tensorflow.op.`data`.UniqueDataset +import org.tensorflow.op.`data`.UnwrapDatasetVariant +import org.tensorflow.op.`data`.WindowDataset +import org.tensorflow.op.`data`.WrapDatasetVariant +import org.tensorflow.op.`data`.ZipDataset +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `data` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class DataOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.DataOps = ops.java.data + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * A container for an iterator resource. + * + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of AnonymousIterator + * @see org.tensorflow.op.DataOps.anonymousIterator + */ + public fun anonymousIterator(outputTypes: List>, outputShapes: List): + AnonymousIterator = java.anonymousIterator( + outputTypes, + outputShapes + ) + + /** + * The AssertCardinalityDataset operation + * + * @param inputDataset The inputDataset value + * @param cardinality The cardinality value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of AssertCardinalityDataset + * @see org.tensorflow.op.DataOps.assertCardinalityDataset + */ + public fun assertCardinalityDataset( + inputDataset: Operand, + cardinality: Operand, + outputTypes: List>, + outputShapes: List + ): AssertCardinalityDataset = java.assertCardinalityDataset( + inputDataset, + cardinality, + outputTypes, + outputShapes + ) + + /** + * A transformation that asserts which transformations happen next. + * This transformation checks whether the camel-case names (i.e. "FlatMap", not + * "flat_map") of the transformations following this transformation match the list + * of names in the `transformations` argument. If there is a mismatch, the + * transformation raises an exception. + * + * The check occurs when iterating over the contents of the dataset, which + * means that the check happens _after_ any static optimizations are applied + * to the dataset graph. + * + * @param inputDataset A variant tensor representing the input dataset. + * `data.AssertNextDataset` passes through the outputs of its input dataset. + * @param transformations A `tf.string` vector `tf.Tensor` identifying the transformations that + * are + * expected to happen next. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of AssertNextDataset + * @see org.tensorflow.op.DataOps.assertNextDataset + */ + public fun assertNextDataset( + inputDataset: Operand, + transformations: Operand, + outputTypes: List>, + outputShapes: List + ): AssertNextDataset = java.assertNextDataset( + inputDataset, + transformations, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that shards the input dataset. + * Creates a dataset that shards the input dataset by num_workers, returning a + * sharded dataset for the index-th worker. This attempts to automatically shard + * a dataset by examining the Dataset graph and inserting a shard op before the + * inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). + * + * This dataset will throw a NotFound error if we cannot shard the dataset + * automatically. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param numWorkers A scalar representing the number of workers to distribute this dataset + * across. + * @param index A scalar representing the index of the current worker out of num_workers. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of AutoShardDataset + * @see org.tensorflow.op.DataOps.autoShardDataset + * @param autoShardPolicy Sets the autoShardPolicy option. + * + * @param autoShardPolicy the autoShardPolicy option + * @return this Options instance. + * @param numReplicas Sets the numReplicas option. + * + * @param numReplicas the numReplicas option + * @return this Options instance. + */ + public fun autoShardDataset( + inputDataset: Operand, + numWorkers: Operand, + index: Operand, + outputTypes: List>, + outputShapes: List, + autoShardPolicy: Long? = null, + numReplicas: Long? = null + ): AutoShardDataset = java.autoShardDataset( + inputDataset, + numWorkers, + index, + outputTypes, + outputShapes, + *listOfNotNull( + autoShardPolicy?.let{ org.tensorflow.op.data.AutoShardDataset.autoShardPolicy(it) }, + numReplicas?.let{ org.tensorflow.op.data.AutoShardDataset.numReplicas(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that batches `batch_size` elements from `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param batchSize A scalar representing the number of elements to accumulate in a batch. + * @param dropRemainder A scalar representing whether the last batch should be dropped in case + * its size + * is smaller than desired. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of BatchDataset + * @see org.tensorflow.op.DataOps.batchDataset + * @param parallelCopy Sets the parallelCopy option. + * + * @param parallelCopy the parallelCopy option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun batchDataset( + inputDataset: Operand, + batchSize: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List, + parallelCopy: Boolean? = null, + metadata: String? = null + ): BatchDataset = java.batchDataset( + inputDataset, + batchSize, + dropRemainder, + outputTypes, + outputShapes, + *listOfNotNull( + parallelCopy?.let{ org.tensorflow.op.data.BatchDataset.parallelCopy(it) }, + metadata?.let{ org.tensorflow.op.data.BatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Records the bytes size of each element of `input_dataset` in a StatsAggregator. + * + * @param inputDataset The inputDataset value + * @param tag The tag value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of BytesProducedStatsDataset + * @see org.tensorflow.op.DataOps.bytesProducedStatsDataset + */ + public fun bytesProducedStatsDataset( + inputDataset: Operand, + tag: Operand, + outputTypes: List>, + outputShapes: List + ): BytesProducedStatsDataset = java.bytesProducedStatsDataset( + inputDataset, + tag, + outputTypes, + outputShapes + ) + + /** + * The CSVDatasetV2 operation + * + * @param filenames The filenames value + * @param compressionType The compressionType value + * @param bufferSize The bufferSize value + * @param header The header value + * @param fieldDelim The fieldDelim value + * @param useQuoteDelim The useQuoteDelim value + * @param naValue The naValue value + * @param selectCols The selectCols value + * @param recordDefaults The recordDefaults value + * @param excludeCols The excludeCols value + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of CSVDataset + * @see org.tensorflow.op.DataOps.cSVDataset + */ + public fun cSVDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand, + header: Operand, + fieldDelim: Operand, + useQuoteDelim: Operand, + naValue: Operand, + selectCols: Operand, + recordDefaults: Iterable>, + excludeCols: Operand, + outputShapes: List + ): CSVDataset = java.cSVDataset( + filenames, + compressionType, + bufferSize, + header, + fieldDelim, + useQuoteDelim, + naValue, + selectCols, + recordDefaults, + excludeCols, + outputShapes + ) + + /** + * The CacheDatasetV2 operation + * + * @param inputDataset The inputDataset value + * @param filename The filename value + * @param cache The cache value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of CacheDataset + * @see org.tensorflow.op.DataOps.cacheDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun cacheDataset( + inputDataset: Operand, + filename: Operand, + cache: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): CacheDataset = java.cacheDataset( + inputDataset, + filename, + cache, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.CacheDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The ChooseFastestBranchDataset operation + * + * @param inputDataset The inputDataset value + * @param ratioNumerator The ratioNumerator value + * @param ratioDenominator The ratioDenominator value + * @param otherArguments The otherArguments value + * @param numElementsPerBranch The value of the numElementsPerBranch attribute + * @param branches The value of the branches attribute + * @param otherArgumentsLengths The value of the otherArgumentsLengths attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ChooseFastestBranchDataset + * @see org.tensorflow.op.DataOps.chooseFastestBranchDataset + */ + public fun chooseFastestBranchDataset( + inputDataset: Operand, + ratioNumerator: Operand, + ratioDenominator: Operand, + otherArguments: Iterable>, + numElementsPerBranch: Long, + branches: List, + otherArgumentsLengths: List, + outputTypes: List>, + outputShapes: List + ): ChooseFastestBranchDataset = java.chooseFastestBranchDataset( + inputDataset, + ratioNumerator, + ratioDenominator, + otherArguments, + numElementsPerBranch, + branches, + otherArgumentsLengths, + outputTypes, + outputShapes + ) + + /** + * The ChooseFastestDataset operation + * + * @param inputDatasets The inputDatasets value + * @param numExperiments The value of the numExperiments attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ChooseFastestDataset + * @see org.tensorflow.op.DataOps.chooseFastestDataset + */ + public fun chooseFastestDataset( + inputDatasets: Iterable>, + numExperiments: Long, + outputTypes: List>, + outputShapes: List + ): ChooseFastestDataset = java.chooseFastestDataset( + inputDatasets, + numExperiments, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that concatenates `input_dataset` with `another_dataset`. + * + * @param inputDataset The inputDataset value + * @param anotherDataset The anotherDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ConcatenateDataset + * @see org.tensorflow.op.DataOps.concatenateDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun concatenateDataset( + inputDataset: Operand, + anotherDataset: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): ConcatenateDataset = java.concatenateDataset( + inputDataset, + anotherDataset, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.ConcatenateDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that reads data from the tf.data service. + * + * @param datasetId The datasetId value + * @param processingMode The processingMode value + * @param address The address value + * @param protocol The protocol value + * @param jobName The jobName value + * @param consumerIndex The consumerIndex value + * @param numConsumers The numConsumers value + * @param maxOutstandingRequests The maxOutstandingRequests value + * @param iterationCounter The iterationCounter value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of DataServiceDatasetV2 + * @see org.tensorflow.op.DataOps.dataServiceDatasetV2 + * @param taskRefreshIntervalHintMs Sets the taskRefreshIntervalHintMs option. + * + * @param taskRefreshIntervalHintMs the taskRefreshIntervalHintMs option + * @return this Options instance. + * @param dataTransferProtocol Sets the dataTransferProtocol option. + * + * @param dataTransferProtocol the dataTransferProtocol option + * @return this Options instance. + * @param targetWorkers Sets the targetWorkers option. + * + * @param targetWorkers the targetWorkers option + * @return this Options instance. + */ + public fun dataServiceDatasetV2( + datasetId: Operand, + processingMode: Operand, + address: Operand, + protocol: Operand, + jobName: Operand, + consumerIndex: Operand, + numConsumers: Operand, + maxOutstandingRequests: Operand, + iterationCounter: Operand, + outputTypes: List>, + outputShapes: List, + taskRefreshIntervalHintMs: Long? = null, + dataTransferProtocol: String? = null, + targetWorkers: String? = null + ): DataServiceDatasetV2 = java.dataServiceDatasetV2( + datasetId, + processingMode, + address, + protocol, + jobName, + consumerIndex, + numConsumers, + maxOutstandingRequests, + iterationCounter, + outputTypes, + outputShapes, + *listOfNotNull( + taskRefreshIntervalHintMs?.let{ + org.tensorflow.op.data.DataServiceDatasetV2.taskRefreshIntervalHintMs(it) }, + dataTransferProtocol?.let{ + org.tensorflow.op.data.DataServiceDatasetV2.dataTransferProtocol(it) }, + targetWorkers?.let{ org.tensorflow.op.data.DataServiceDatasetV2.targetWorkers(it) } + ).toTypedArray() + ) + + /** + * Returns the cardinality of `input_dataset`. + * Returns the cardinality of `input_dataset`. + * + * @param inputDataset A variant tensor representing the dataset to return cardinality for. + * @return a new instance of DatasetCardinality + * @see org.tensorflow.op.DataOps.datasetCardinality + */ + public fun datasetCardinality(inputDataset: Operand): DatasetCardinality = + java.datasetCardinality( + inputDataset + ) + + /** + * Creates a dataset from the given `graph_def`. + * Creates a dataset from the provided `graph_def`. + * + * @param graphDef The graph representation of the dataset (as serialized GraphDef). + * @return a new instance of DatasetFromGraph + * @see org.tensorflow.op.DataOps.datasetFromGraph + */ + public fun datasetFromGraph(graphDef: Operand): DatasetFromGraph = + java.datasetFromGraph( + graphDef + ) + + /** + * Returns a serialized GraphDef representing `input_dataset`. + * Returns a graph representation for `input_dataset`. + * + * @param inputDataset A variant tensor representing the dataset to return the graph + * representation for. + * @param options carries optional attribute values + * @return a new instance of DatasetToGraph + * @see org.tensorflow.op.DataOps.datasetToGraph + * @param externalStatePolicy Sets the externalStatePolicy option. + * + * @param externalStatePolicy the externalStatePolicy option + * @return this Options instance. + * @param stripDeviceAssignment Sets the stripDeviceAssignment option. + * + * @param stripDeviceAssignment the stripDeviceAssignment option + * @return this Options instance. + */ + public fun datasetToGraph( + inputDataset: Operand, + externalStatePolicy: Long? = null, + stripDeviceAssignment: Boolean? = null + ): DatasetToGraph = java.datasetToGraph( + inputDataset, + *listOfNotNull( + externalStatePolicy?.let{ org.tensorflow.op.data.DatasetToGraph.externalStatePolicy(it) }, + stripDeviceAssignment?.let{ org.tensorflow.op.data.DatasetToGraph.stripDeviceAssignment(it) } + ).toTypedArray() + ) + + /** + * Outputs the single element from the given dataset. + * + * @param dataset A handle to a dataset that contains a single element. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of DatasetToSingleElement + * @see org.tensorflow.op.DataOps.datasetToSingleElement + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun datasetToSingleElement( + dataset: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): DatasetToSingleElement = java.datasetToSingleElement( + dataset, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.DatasetToSingleElement.metadata(it) } + ).toTypedArray() + ) + + /** + * Writes the given dataset to the given file using the TFRecord format. + * + * @param inputDataset A variant tensor representing the dataset to write. + * @param filename A scalar string tensor representing the filename to use. + * @param compressionType A scalar string tensor containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @return a new instance of DatasetToTfRecord + * @see org.tensorflow.op.DataOps.datasetToTfRecord + */ + public fun datasetToTfRecord( + inputDataset: Operand, + filename: Operand, + compressionType: Operand + ): DatasetToTfRecord = java.datasetToTfRecord( + inputDataset, + filename, + compressionType + ) + + /** + * A container for an iterator resource. + * + * @param handle A handle to the iterator to delete. + * @param deleter A variant deleter. + * @return a new instance of DeleteIterator + * @see org.tensorflow.op.DataOps.deleteIterator + */ + public fun deleteIterator(handle: Operand, deleter: Operand): + DeleteIterator = java.deleteIterator( + handle, + deleter + ) + + /** + * Creates a dataset that batches input elements into a SparseTensor. + * + * @param inputDataset A handle to an input dataset. Must have a single component. + * @param batchSize A scalar representing the number of elements to accumulate in a + * batch. + * @param rowShape A vector representing the dense shape of each row in the produced + * SparseTensor. The shape may be partially specified, using `-1` to indicate + * that a particular dimension should use the maximum size of all batch elements. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of DenseToSparseBatchDataset + * @see org.tensorflow.op.DataOps.denseToSparseBatchDataset + */ + public fun denseToSparseBatchDataset( + inputDataset: Operand, + batchSize: Operand, + rowShape: Operand, + outputTypes: List>, + outputShapes: List + ): DenseToSparseBatchDataset = java.denseToSparseBatchDataset( + inputDataset, + batchSize, + rowShape, + outputTypes, + outputShapes + ) + + /** + * Converts the given variant tensor to an iterator and stores it in the given resource. + * + * @param resourceHandle A handle to an iterator resource. + * @param serialized A variant tensor storing the state of the iterator contained in the + * resource. + * @return a new instance of DeserializeIterator + * @see org.tensorflow.op.DataOps.deserializeIterator + */ + public fun deserializeIterator(resourceHandle: Operand, serialized: Operand): DeserializeIterator = java.deserializeIterator( + resourceHandle, + serialized + ) + + /** + * A substitute for `InterleaveDataset` on a fixed list of `N` datasets. + * + * @param selectorInputDataset A dataset of scalar `DT_INT64` elements that determines which of + * the + * `N` data inputs should produce the next output element. + * @param dataInputDatasets `N` datasets with the same type that will be interleaved according + * to + * the values of `selector_input_dataset`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of DirectedInterleaveDataset + * @see org.tensorflow.op.DataOps.directedInterleaveDataset + * @param stopOnEmptyDataset Sets the stopOnEmptyDataset option. + * + * @param stopOnEmptyDataset the stopOnEmptyDataset option + * @return this Options instance. + */ + public fun directedInterleaveDataset( + selectorInputDataset: Operand, + dataInputDatasets: Iterable>, + outputTypes: List>, + outputShapes: List, + stopOnEmptyDataset: Boolean? = null + ): DirectedInterleaveDataset = java.directedInterleaveDataset( + selectorInputDataset, + dataInputDatasets, + outputTypes, + outputShapes, + *listOfNotNull( + stopOnEmptyDataset?.let{ + org.tensorflow.op.data.DirectedInterleaveDataset.stopOnEmptyDataset(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset containing elements of first component of `input_dataset` having true in + * the last component. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of FilterByLastComponentDataset + * @see org.tensorflow.op.DataOps.filterByLastComponentDataset + */ + public fun filterByLastComponentDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List + ): FilterByLastComponentDataset = java.filterByLastComponentDataset( + inputDataset, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset containing elements of `input_dataset` matching `predicate`. + * The `predicate` function must return a scalar boolean and accept the + * following arguments: + *

    + *
  • One tensor for each component of an element of `input_dataset`.
  • + *
  • One tensor for each value in `other_arguments`.
  • + *
+ * + * @param inputDataset The inputDataset value + * @param otherArguments A list of tensors, typically values that were captured when + * building a closure for `predicate`. + * @param predicate A function returning a scalar boolean. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of FilterDataset + * @see org.tensorflow.op.DataOps.filterDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun filterDataset( + inputDataset: Operand, + otherArguments: Iterable>, + predicate: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): FilterDataset = java.filterDataset( + inputDataset, + otherArguments, + predicate, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.FilterDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset by applying `tf.data.Options` to `input_dataset`. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of FinalizeDataset + * @see org.tensorflow.op.DataOps.finalizeDataset + * @param hasCapturedRef Sets the hasCapturedRef option. + * + * @param hasCapturedRef the hasCapturedRef option + * @return this Options instance. + */ + public fun finalizeDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + hasCapturedRef: Boolean? = null + ): FinalizeDataset = java.finalizeDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + hasCapturedRef?.let{ org.tensorflow.op.data.FinalizeDataset.hasCapturedRef(it) } + ).toTypedArray() + ) + + /** + * The FixedLengthRecordDatasetV2 operation + * + * @param filenames The filenames value + * @param headerBytes The headerBytes value + * @param recordBytes The recordBytes value + * @param footerBytes The footerBytes value + * @param bufferSize The bufferSize value + * @param compressionType The compressionType value + * @param options carries optional attribute values + * @return a new instance of FixedLengthRecordDataset + * @see org.tensorflow.op.DataOps.fixedLengthRecordDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun fixedLengthRecordDataset( + filenames: Operand, + headerBytes: Operand, + recordBytes: Operand, + footerBytes: Operand, + bufferSize: Operand, + compressionType: Operand, + metadata: String? = null + ): FixedLengthRecordDataset = java.fixedLengthRecordDataset( + filenames, + headerBytes, + recordBytes, + footerBytes, + bufferSize, + compressionType, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.FixedLengthRecordDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * Unlike MapDataset, the `f` in FlatMapDataset is expected to return a + * Dataset variant, and FlatMapDataset will flatten successive results + * into a single Dataset. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param f A function mapping elements of `input_dataset`, concatenated with + * `other_arguments`, to a Dataset variant that contains elements matching + * `output_types` and `output_shapes`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of FlatMapDataset + * @see org.tensorflow.op.DataOps.flatMapDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun flatMapDataset( + inputDataset: Operand, + otherArguments: Iterable>, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): FlatMapDataset = java.flatMapDataset( + inputDataset, + otherArguments, + f, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.FlatMapDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that invokes a function to generate elements. + * + * @param initFuncOtherArgs The initFuncOtherArgs value + * @param nextFuncOtherArgs The nextFuncOtherArgs value + * @param finalizeFuncOtherArgs The finalizeFuncOtherArgs value + * @param initFunc The value of the initFunc attribute + * @param nextFunc The value of the nextFunc attribute + * @param finalizeFunc The value of the finalizeFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of GeneratorDataset + * @see org.tensorflow.op.DataOps.generatorDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun generatorDataset( + initFuncOtherArgs: Iterable>, + nextFuncOtherArgs: Iterable>, + finalizeFuncOtherArgs: Iterable>, + initFunc: ConcreteFunction, + nextFunc: ConcreteFunction, + finalizeFunc: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): GeneratorDataset = java.generatorDataset( + initFuncOtherArgs, + nextFuncOtherArgs, + finalizeFuncOtherArgs, + initFunc, + nextFunc, + finalizeFunc, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.GeneratorDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that computes a group-by on `input_dataset`. + * Creates a dataset that computes a group-by on `input_dataset`. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param keyFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for `key_func`. + * @param initFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for `init_func`. + * @param reduceFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for `reduce_func`. + * @param finalizeFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for `finalize_func`. + * @param keyFunc A function mapping an element of `input_dataset`, concatenated + * with `key_func_other_arguments` to a scalar value of type DT_INT64. + * @param initFunc A function mapping a key of type DT_INT64, concatenated with + * `init_func_other_arguments` to the initial reducer state. + * @param reduceFunc A function mapping the current reducer state and an element of + * `input_dataset`, + * concatenated with `reduce_func_other_arguments` to a new reducer state. + * @param finalizeFunc A function mapping the final reducer state to an output element. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of GroupByReducerDataset + * @see org.tensorflow.op.DataOps.groupByReducerDataset + */ + public fun groupByReducerDataset( + inputDataset: Operand, + keyFuncOtherArguments: Iterable>, + initFuncOtherArguments: Iterable>, + reduceFuncOtherArguments: Iterable>, + finalizeFuncOtherArguments: Iterable>, + keyFunc: ConcreteFunction, + initFunc: ConcreteFunction, + reduceFunc: ConcreteFunction, + finalizeFunc: ConcreteFunction, + outputTypes: List>, + outputShapes: List + ): GroupByReducerDataset = java.groupByReducerDataset( + inputDataset, + keyFuncOtherArguments, + initFuncOtherArguments, + reduceFuncOtherArguments, + finalizeFuncOtherArguments, + keyFunc, + initFunc, + reduceFunc, + finalizeFunc, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that computes a windowed group-by on `input_dataset`. + * // TODO(mrry): Support non-int64 keys. + * + * @param inputDataset The inputDataset value + * @param keyFuncOtherArguments The keyFuncOtherArguments value + * @param reduceFuncOtherArguments The reduceFuncOtherArguments value + * @param windowSizeFuncOtherArguments The windowSizeFuncOtherArguments value + * @param keyFunc A function mapping an element of `input_dataset`, concatenated + * with `key_func_other_arguments` to a scalar value of type DT_INT64. + * @param reduceFunc The value of the reduceFunc attribute + * @param windowSizeFunc The value of the windowSizeFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of GroupByWindowDataset + * @see org.tensorflow.op.DataOps.groupByWindowDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun groupByWindowDataset( + inputDataset: Operand, + keyFuncOtherArguments: Iterable>, + reduceFuncOtherArguments: Iterable>, + windowSizeFuncOtherArguments: Iterable>, + keyFunc: ConcreteFunction, + reduceFunc: ConcreteFunction, + windowSizeFunc: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): GroupByWindowDataset = java.groupByWindowDataset( + inputDataset, + keyFuncOtherArguments, + reduceFuncOtherArguments, + windowSizeFuncOtherArguments, + keyFunc, + reduceFunc, + windowSizeFunc, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.GroupByWindowDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that contains the elements of `input_dataset` ignoring errors. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of IgnoreErrorsDataset + * @see org.tensorflow.op.DataOps.ignoreErrorsDataset + * @param logWarning Sets the logWarning option. + * + * @param logWarning the logWarning option + * @return this Options instance. + */ + public fun ignoreErrorsDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + logWarning: Boolean? = null + ): IgnoreErrorsDataset = java.ignoreErrorsDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + logWarning?.let{ org.tensorflow.op.data.IgnoreErrorsDataset.logWarning(it) } + ).toTypedArray() + ) + + /** + * The InitializeTableFromDataset operation + * + * @param tableHandle The tableHandle value + * @param dataset The dataset value + * @return a new instance of InitializeTableFromDataset + * @see org.tensorflow.op.DataOps.initializeTableFromDataset + */ + public fun initializeTableFromDataset(tableHandle: Operand, dataset: Operand): InitializeTableFromDataset = java.initializeTableFromDataset( + tableHandle, + dataset + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * Unlike MapDataset, the `f` in InterleaveDataset is expected to return + * a Dataset variant, and InterleaveDataset will flatten successive + * results into a single Dataset. Unlike FlatMapDataset, + * InterleaveDataset will interleave sequences of up to `block_length` + * consecutive elements from `cycle_length` input elements. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param cycleLength The cycleLength value + * @param blockLength The blockLength value + * @param f A function mapping elements of `input_dataset`, concatenated with + * `other_arguments`, to a Dataset variant that contains elements matching + * `output_types` and `output_shapes`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of InterleaveDataset + * @see org.tensorflow.op.DataOps.interleaveDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun interleaveDataset( + inputDataset: Operand, + otherArguments: Iterable>, + cycleLength: Operand, + blockLength: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): InterleaveDataset = java.interleaveDataset( + inputDataset, + otherArguments, + cycleLength, + blockLength, + f, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.InterleaveDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The IteratorV2 operation + * + * @param sharedName The value of the sharedName attribute + * @param container The value of the container attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of Iterator + * @see org.tensorflow.op.DataOps.iterator + */ + public fun iterator( + sharedName: String, + container: String, + outputTypes: List>, + outputShapes: List + ): Iterator = java.iterator( + sharedName, + container, + outputTypes, + outputShapes + ) + + /** + * Gets the next output from the given iterator . + * + * @param iterator The iterator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of IteratorGetNext + * @see org.tensorflow.op.DataOps.iteratorGetNext + */ + public fun iteratorGetNext( + iterator: Operand, + outputTypes: List>, + outputShapes: List + ): IteratorGetNext = java.iteratorGetNext( + iterator, + outputTypes, + outputShapes + ) + + /** + * Gets the next output from the given iterator as an Optional variant. + * + * @param iterator The iterator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of IteratorGetNextAsOptional + * @see org.tensorflow.op.DataOps.iteratorGetNextAsOptional + */ + public fun iteratorGetNextAsOptional( + iterator: Operand, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( + iterator, + outputTypes, + outputShapes + ) + + /** + * Gets the next output from the given iterator. + * This operation is a synchronous version IteratorGetNext. It should only be used + * in situations where the iterator does not block the calling thread, or where + * the calling thread is not a member of the thread pool used to execute parallel + * operations (e.g. in eager mode). + * + * @param iterator The iterator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of IteratorGetNextSync + * @see org.tensorflow.op.DataOps.iteratorGetNextSync + */ + public fun iteratorGetNextSync( + iterator: Operand, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextSync = java.iteratorGetNextSync( + iterator, + outputTypes, + outputShapes + ) + + /** + * Converts the given `resource_handle` representing an iterator to a string. + * + * @param resourceHandle A handle to an iterator resource. + * @return a new instance of IteratorToStringHandle + * @see org.tensorflow.op.DataOps.iteratorToStringHandle + */ + public fun iteratorToStringHandle(resourceHandle: Operand): IteratorToStringHandle = + java.iteratorToStringHandle( + resourceHandle + ) + + /** + * Creates a dataset that emits the key-value pairs in one or more LMDB files. + * The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary + * key-value database. This dataset can read the contents of LMDB database files, + * the names of which generally have the `.mdb` suffix. + * + * Each output element consists of a key-value pair represented as a pair of + * scalar string `Tensor`s, where the first `Tensor` contains the key and the + * second `Tensor` contains the value. + * + * LMDB uses different file formats on big- and little-endian machines. + * `data.LMDBDataset` can only read files in the format of the host machine. + * + * @param filenames A scalar or a vector containing the name(s) of the binary file(s) to be + * read. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of LMDBDataset + * @see org.tensorflow.op.DataOps.lMDBDataset + */ + public fun lMDBDataset( + filenames: Operand, + outputTypes: List>, + outputShapes: List + ): LMDBDataset = java.lMDBDataset( + filenames, + outputTypes, + outputShapes + ) + + /** + * Records the latency of producing `input_dataset` elements in a StatsAggregator. + * + * @param inputDataset The inputDataset value + * @param tag The tag value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of LatencyStatsDataset + * @see org.tensorflow.op.DataOps.latencyStatsDataset + */ + public fun latencyStatsDataset( + inputDataset: Operand, + tag: Operand, + outputTypes: List>, + outputShapes: List + ): LatencyStatsDataset = java.latencyStatsDataset( + inputDataset, + tag, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * The resulting dataset is similar to the `InterleaveDataset`, with the exception + * that if retrieving the next value from a dataset would cause the requester to + * block, it will skip that input dataset. This dataset is especially useful + * when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it + * allows the training step to proceed so long as some data is available. + * + * !! WARNING !! This dataset is not deterministic! + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param cycleLength The cycleLength value + * @param blockLength The blockLength value + * @param bufferOutputElements The bufferOutputElements value + * @param prefetchInputElements The prefetchInputElements value + * @param f A function mapping elements of `input_dataset`, concatenated with + * `other_arguments`, to a Dataset variant that contains elements matching + * `output_types` and `output_shapes`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of LegacyParallelInterleaveDataset + * @see org.tensorflow.op.DataOps.legacyParallelInterleaveDataset + * @param deterministic Sets the deterministic option. + * + * @param deterministic the deterministic option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun legacyParallelInterleaveDataset( + inputDataset: Operand, + otherArguments: Iterable>, + cycleLength: Operand, + blockLength: Operand, + bufferOutputElements: Operand, + prefetchInputElements: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + deterministic: String? = null, + metadata: String? = null + ): LegacyParallelInterleaveDataset = java.legacyParallelInterleaveDataset( + inputDataset, + otherArguments, + cycleLength, + blockLength, + bufferOutputElements, + prefetchInputElements, + f, + outputTypes, + outputShapes, + *listOfNotNull( + deterministic?.let{ org.tensorflow.op.data.LegacyParallelInterleaveDataset.deterministic(it) + }, + metadata?.let{ org.tensorflow.op.data.LegacyParallelInterleaveDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The LoadDataset operation + * + * @param path The path value + * @param readerFuncOtherArgs The readerFuncOtherArgs value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param readerFunc The value of the readerFunc attribute + * @param options carries optional attribute values + * @return a new instance of LoadDataset + * @see org.tensorflow.op.DataOps.loadDataset + * @param compression Sets the compression option. + * + * @param compression the compression option + * @return this Options instance. + */ + public fun loadDataset( + path: Operand, + readerFuncOtherArgs: Iterable>, + outputTypes: List>, + outputShapes: List, + readerFunc: ConcreteFunction, + compression: String? = null + ): LoadDataset = java.loadDataset( + path, + readerFuncOtherArgs, + outputTypes, + outputShapes, + readerFunc, + *listOfNotNull( + compression?.let{ org.tensorflow.op.data.LoadDataset.compression(it) } + ).toTypedArray() + ) + + /** + * Makes a new iterator from the given `dataset` and stores it in `iterator`. + * This operation may be executed multiple times. Each execution will reset the + * iterator in `iterator` to the first element of `dataset`. + * + * @param dataset The dataset value + * @param iterator The iterator value + * @return a new instance of MakeIterator + * @see org.tensorflow.op.DataOps.makeIterator + */ + public fun makeIterator(dataset: Operand, iterator: Operand): MakeIterator + = java.makeIterator( + dataset, + iterator + ) + + /** + * Creates a dataset that fuses mapping with batching. + * Creates a dataset that applies `f` to the outputs of `input_dataset` and then + * batches `batch_size` of them. + * + * Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + * to `batch_size * num_parallel_batches` copies of `f` in parallel. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param otherArguments A list of tensors, typically values that were captured when building a + * closure + * for `f`. + * @param batchSize A scalar representing the number of elements to accumulate in a + * batch. It determines the number of concurrent invocations of `f` that process + * elements from `input_dataset` in parallel. + * @param numParallelCalls A scalar representing the maximum number of parallel invocations of + * the `map_fn` + * function. Applying the `map_fn` on consecutive input elements in parallel has + * the potential to improve input pipeline throughput. + * @param dropRemainder A scalar representing whether the last batch should be dropped in case + * its size + * is smaller than desired. + * @param f A function to apply to the outputs of `input_dataset`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of MapAndBatchDataset + * @see org.tensorflow.op.DataOps.mapAndBatchDataset + * @param preserveCardinality Sets the preserveCardinality option. + * + * @param preserveCardinality the preserveCardinality option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun mapAndBatchDataset( + inputDataset: Operand, + otherArguments: Iterable>, + batchSize: Operand, + numParallelCalls: Operand, + dropRemainder: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + preserveCardinality: Boolean? = null, + metadata: String? = null + ): MapAndBatchDataset = java.mapAndBatchDataset( + inputDataset, + otherArguments, + batchSize, + numParallelCalls, + dropRemainder, + f, + outputTypes, + outputShapes, + *listOfNotNull( + preserveCardinality?.let{ org.tensorflow.op.data.MapAndBatchDataset.preserveCardinality(it) }, + metadata?.let{ org.tensorflow.op.data.MapAndBatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param f The value of the f attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of MapDataset + * @see org.tensorflow.op.DataOps.mapDataset + * @param useInterOpParallelism Sets the useInterOpParallelism option. + * + * @param useInterOpParallelism the useInterOpParallelism option + * @return this Options instance. + * @param preserveCardinality Sets the preserveCardinality option. + * + * @param preserveCardinality the preserveCardinality option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun mapDataset( + inputDataset: Operand, + otherArguments: Iterable>, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + useInterOpParallelism: Boolean? = null, + preserveCardinality: Boolean? = null, + metadata: String? = null + ): MapDataset = java.mapDataset( + inputDataset, + otherArguments, + f, + outputTypes, + outputShapes, + *listOfNotNull( + useInterOpParallelism?.let{ org.tensorflow.op.data.MapDataset.useInterOpParallelism(it) }, + preserveCardinality?.let{ org.tensorflow.op.data.MapDataset.preserveCardinality(it) }, + metadata?.let{ org.tensorflow.op.data.MapDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The MatchingFilesDataset operation + * + * @param patterns The patterns value + * @return a new instance of MatchingFilesDataset + * @see org.tensorflow.op.DataOps.matchingFilesDataset + */ + public fun matchingFilesDataset(patterns: Operand): MatchingFilesDataset = + java.matchingFilesDataset( + patterns + ) + + /** + * Creates a dataset that overrides the maximum intra-op parallelism. + * + * @param inputDataset The inputDataset value + * @param maxIntraOpParallelism Identifies the maximum intra-op parallelism to use. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of MaxIntraOpParallelismDataset + * @see org.tensorflow.op.DataOps.maxIntraOpParallelismDataset + */ + public fun maxIntraOpParallelismDataset( + inputDataset: Operand, + maxIntraOpParallelism: Operand, + outputTypes: List>, + outputShapes: List + ): MaxIntraOpParallelismDataset = java.maxIntraOpParallelismDataset( + inputDataset, + maxIntraOpParallelism, + outputTypes, + outputShapes + ) + + /** + * Identity transformation that models performance. + * Identity transformation that models performance. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ModelDataset + * @see org.tensorflow.op.DataOps.modelDataset + * @param algorithm Sets the algorithm option. + * + * @param algorithm the algorithm option + * @return this Options instance. + * @param cpuBudget Sets the cpuBudget option. + * + * @param cpuBudget the cpuBudget option + * @return this Options instance. + * @param ramBudget Sets the ramBudget option. + * + * @param ramBudget the ramBudget option + * @return this Options instance. + */ + public fun modelDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + algorithm: Long? = null, + cpuBudget: Long? = null, + ramBudget: Long? = null + ): ModelDataset = java.modelDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + algorithm?.let{ org.tensorflow.op.data.ModelDataset.algorithm(it) }, + cpuBudget?.let{ org.tensorflow.op.data.ModelDataset.cpuBudget(it) }, + ramBudget?.let{ org.tensorflow.op.data.ModelDataset.ramBudget(it) } + ).toTypedArray() + ) + + /** + * The NonSerializableDataset operation + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of NonSerializableDataset + * @see org.tensorflow.op.DataOps.nonSerializableDataset + */ + public fun nonSerializableDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List + ): NonSerializableDataset = java.nonSerializableDataset( + inputDataset, + outputTypes, + outputShapes + ) + + /** + * Makes a "one-shot" iterator that can be iterated only once. + * A one-shot iterator bundles the logic for defining the dataset and + * the state of the iterator in a single op, which allows simple input + * pipelines to be defined without an additional initialization + * ("MakeIterator") step. + * + * One-shot iterators have the following limitations: + *
    + *
  • They do not support parameterization: all logic for creating the underlying + * dataset must be bundled in the `dataset_factory` function.
  • + *
  • They are not resettable. Once a one-shot iterator reaches the end of its + * underlying dataset, subsequent "IteratorGetNext" operations on that + * iterator will always produce an `OutOfRange` error.
  • + *
+ * + * For greater flexibility, use "Iterator" and "MakeIterator" to define + * an iterator using an arbitrary subgraph, which may capture tensors + * (including fed values) as parameters, and which may be reset multiple + * times by rerunning "MakeIterator". + * + * @param datasetFactory A function of type `() -> DT_VARIANT`, where the returned + * DT_VARIANT is a dataset. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of OneShotIterator + * @see org.tensorflow.op.DataOps.oneShotIterator + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun oneShotIterator( + datasetFactory: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + container: String? = null, + sharedName: String? = null + ): OneShotIterator = java.oneShotIterator( + datasetFactory, + outputTypes, + outputShapes, + *listOfNotNull( + container?.let{ org.tensorflow.op.data.OneShotIterator.container(it) }, + sharedName?.let{ org.tensorflow.op.data.OneShotIterator.sharedName(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset by applying related optimizations to `input_dataset`. + * Creates a dataset by applying related optimizations to `input_dataset`. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param optimizationsEnabled A `tf.string` vector `tf.Tensor` identifying user enabled + * optimizations. + * @param optimizationsDisabled A `tf.string` vector `tf.Tensor` identifying user disabled + * optimizations. + * @param optimizationsDefault A `tf.string` vector `tf.Tensor` identifying optimizations by + * default. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of OptimizeDataset + * @see org.tensorflow.op.DataOps.optimizeDataset + * @param optimizationConfigs Sets the optimizationConfigs option. + * + * @param optimizationConfigs the optimizationConfigs option + * @return this Options instance. + */ + public fun optimizeDataset( + inputDataset: Operand, + optimizationsEnabled: Operand, + optimizationsDisabled: Operand, + optimizationsDefault: Operand, + outputTypes: List>, + outputShapes: List, + optimizationConfigs: List? = null + ): OptimizeDataset = java.optimizeDataset( + inputDataset, + optimizationsEnabled, + optimizationsDisabled, + optimizationsDefault, + outputTypes, + outputShapes, + *listOfNotNull( + optimizationConfigs?.let{ org.tensorflow.op.data.OptimizeDataset.optimizationConfigs(it) } + ).toTypedArray() + ) + + /** + * Constructs an Optional variant from a tuple of tensors. + * + * @param components The components value + * @return a new instance of OptionalFromValue + * @see org.tensorflow.op.DataOps.optionalFromValue + */ + public fun optionalFromValue(components: Iterable>): OptionalFromValue = + java.optionalFromValue( + components + ) + + /** + * Returns the value stored in an Optional variant or raises an error if none exists. + * + * @param optional The optional value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of OptionalGetValue + * @see org.tensorflow.op.DataOps.optionalGetValue + */ + public fun optionalGetValue( + optional: Operand, + outputTypes: List>, + outputShapes: List + ): OptionalGetValue = java.optionalGetValue( + optional, + outputTypes, + outputShapes + ) + + /** + * Returns true if and only if the given Optional variant has a value. + * + * @param optional The optional value + * @return a new instance of OptionalHasValue + * @see org.tensorflow.op.DataOps.optionalHasValue + */ + public fun optionalHasValue(optional: Operand): OptionalHasValue = + java.optionalHasValue( + optional + ) + + /** + * Creates an Optional variant with no value. + * + * @return a new instance of OptionalNone + * @see org.tensorflow.op.DataOps.optionalNone + */ + public fun optionalNone(): OptionalNone = java.optionalNone( + + ) + + /** + * Creates a dataset by attaching tf.data.Options to `input_dataset`. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param serializedOptions A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` + * protocol buffer. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of OptionsDataset + * @see org.tensorflow.op.DataOps.optionsDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun optionsDataset( + inputDataset: Operand, + serializedOptions: String, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): OptionsDataset = java.optionsDataset( + inputDataset, + serializedOptions, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.OptionsDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that batches and pads `batch_size` elements from the input. + * + * @param inputDataset The inputDataset value + * @param batchSize A scalar representing the number of elements to accumulate in a + * batch. + * @param paddedShapes A list of int64 tensors representing the desired padded shapes + * of the corresponding output components. These shapes may be partially + * specified, using `-1` to indicate that a particular dimension should be + * padded to the maximum size of all batch elements. + * @param paddingValues A list of scalars containing the padding value to use for + * each of the outputs. + * @param dropRemainder A scalar representing whether the last batch should be dropped in case + * its size + * is smaller than desired. + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of PaddedBatchDataset + * @see org.tensorflow.op.DataOps.paddedBatchDataset + * @param parallelCopy Sets the parallelCopy option. + * + * @param parallelCopy the parallelCopy option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun paddedBatchDataset( + inputDataset: Operand, + batchSize: Operand, + paddedShapes: Iterable>, + paddingValues: Iterable>, + dropRemainder: Operand, + outputShapes: List, + parallelCopy: Boolean? = null, + metadata: String? = null + ): PaddedBatchDataset = java.paddedBatchDataset( + inputDataset, + batchSize, + paddedShapes, + paddingValues, + dropRemainder, + outputShapes, + *listOfNotNull( + parallelCopy?.let{ org.tensorflow.op.data.PaddedBatchDataset.parallelCopy(it) }, + metadata?.let{ org.tensorflow.op.data.PaddedBatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The ParallelBatchDataset operation + * + * @param inputDataset The inputDataset value + * @param batchSize The batchSize value + * @param numParallelCalls The numParallelCalls value + * @param dropRemainder The dropRemainder value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ParallelBatchDataset + * @see org.tensorflow.op.DataOps.parallelBatchDataset + * @param parallelCopy Sets the parallelCopy option. + * + * @param parallelCopy the parallelCopy option + * @return this Options instance. + * @param deterministic Sets the deterministic option. + * + * @param deterministic the deterministic option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun parallelBatchDataset( + inputDataset: Operand, + batchSize: Operand, + numParallelCalls: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List, + parallelCopy: Boolean? = null, + deterministic: String? = null, + metadata: String? = null + ): ParallelBatchDataset = java.parallelBatchDataset( + inputDataset, + batchSize, + numParallelCalls, + dropRemainder, + outputTypes, + outputShapes, + *listOfNotNull( + parallelCopy?.let{ org.tensorflow.op.data.ParallelBatchDataset.parallelCopy(it) }, + deterministic?.let{ org.tensorflow.op.data.ParallelBatchDataset.deterministic(it) }, + metadata?.let{ org.tensorflow.op.data.ParallelBatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * The resulting dataset is similar to the `InterleaveDataset`, except that the + * dataset will fetch records from the interleaved datasets in parallel. + * + * The `tf.data` Python API creates instances of this op from + * `Dataset.interleave()` when the `num_parallel_calls` parameter of that method + * is set to any value other than `None`. + * + * By default, the output of this dataset will be deterministic, which may result + * in the dataset blocking if the next data item to be returned isn't available. + * In order to avoid head-of-line blocking, one can either set the `deterministic` + * attribute to "false", or leave it as "default" and set the + * `experimental_deterministic` parameter of `tf.data.Options` to `False`. + * This can improve performance at the expense of non-determinism. + * + * @param inputDataset Dataset that produces a stream of arguments for the function `f`. + * @param otherArguments Additional arguments to pass to `f` beyond those produced by + * `input_dataset`. + * Evaluated once when the dataset is instantiated. + * @param cycleLength Number of datasets (each created by applying `f` to the elements of + * `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a + * round-robin fashion. + * @param blockLength Number of elements at a time to produce from each interleaved invocation + * of a + * dataset returned by `f`. + * @param bufferOutputElements The number of elements each iterator being interleaved should + * buffer (similar + * to the `.prefetch()` transformation for each interleaved iterator). + * @param prefetchInputElements Determines the number of iterators to prefetch, allowing buffers + * to warm up and + * data to be pre-fetched without blocking the main thread. + * @param numParallelCalls Determines the number of threads that should be used for fetching + * data from + * input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE` + * constant can be used to indicate that the level of parallelism should be autotuned. + * @param f A function mapping elements of `input_dataset`, concatenated with + * `other_arguments`, to a Dataset variant that contains elements matching + * `output_types` and `output_shapes`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ParallelInterleaveDataset + * @see org.tensorflow.op.DataOps.parallelInterleaveDataset + * @param deterministic Sets the deterministic option. + * + * @param deterministic A string indicating the op-level determinism to use. Deterministic + * controls + * whether the interleave is allowed to return elements out of order if the next + * element to be returned isn't available, but a later element is. Options are + * "true", "false", and "default". "default" indicates + * that determinism should be + * decided by the `experimental_deterministic` parameter of `tf.data.Options`. + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun parallelInterleaveDataset( + inputDataset: Operand, + otherArguments: Iterable>, + cycleLength: Operand, + blockLength: Operand, + bufferOutputElements: Operand, + prefetchInputElements: Operand, + numParallelCalls: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + deterministic: String? = null, + metadata: String? = null + ): ParallelInterleaveDataset = java.parallelInterleaveDataset( + inputDataset, + otherArguments, + cycleLength, + blockLength, + bufferOutputElements, + prefetchInputElements, + numParallelCalls, + f, + outputTypes, + outputShapes, + *listOfNotNull( + deterministic?.let{ org.tensorflow.op.data.ParallelInterleaveDataset.deterministic(it) }, + metadata?.let{ org.tensorflow.op.data.ParallelInterleaveDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + * to `num_parallel_calls` copies of `f` in parallel. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param numParallelCalls The number of concurrent invocations of `f` that process + * elements from `input_dataset` in parallel. + * @param f The value of the f attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ParallelMapDataset + * @see org.tensorflow.op.DataOps.parallelMapDataset + * @param useInterOpParallelism Sets the useInterOpParallelism option. + * + * @param useInterOpParallelism the useInterOpParallelism option + * @return this Options instance. + * @param deterministic Sets the deterministic option. + * + * @param deterministic the deterministic option + * @return this Options instance. + * @param preserveCardinality Sets the preserveCardinality option. + * + * @param preserveCardinality the preserveCardinality option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun parallelMapDataset( + inputDataset: Operand, + otherArguments: Iterable>, + numParallelCalls: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + useInterOpParallelism: Boolean? = null, + deterministic: String? = null, + preserveCardinality: Boolean? = null, + metadata: String? = null + ): ParallelMapDataset = java.parallelMapDataset( + inputDataset, + otherArguments, + numParallelCalls, + f, + outputTypes, + outputShapes, + *listOfNotNull( + useInterOpParallelism?.let{ + org.tensorflow.op.data.ParallelMapDataset.useInterOpParallelism(it) }, + deterministic?.let{ org.tensorflow.op.data.ParallelMapDataset.deterministic(it) }, + preserveCardinality?.let{ org.tensorflow.op.data.ParallelMapDataset.preserveCardinality(it) }, + metadata?.let{ org.tensorflow.op.data.ParallelMapDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset + * of `Tensor` or `SparseTensor` objects representing the parsed features. + * + * @param inputDataset The inputDataset value + * @param numParallelCalls The numParallelCalls value + * @param denseDefaults A dict mapping string keys to `Tensor`s. + * The keys of the dict must match the dense_keys of the feature. + * @param sparseKeys A list of string keys in the examples features. + * The results for these keys will be returned as `SparseTensor` objects. + * @param denseKeys A list of Ndense string Tensors (scalars). + * The keys expected in the Examples features associated with dense values. + * @param sparseTypes A list of `DTypes` of the same length as `sparse_keys`. + * Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + * and `tf.string` (`BytesList`) are supported. + * @param denseShapes List of tuples with the same length as `dense_keys`. + * The shape of the data for each dense feature referenced by `dense_keys`. + * Required for any input tensors identified by `dense_keys`. Must be + * either fully defined, or may contain an unknown first dimension. + * An unknown first dimension means the feature is treated as having + * a variable number of blocks, and the output shape along this dimension + * is considered unknown at graph build time. Padding is applied for + * minibatch elements smaller than the maximum number of blocks for the + * given feature along this dimension. + * @param outputTypes The type list for the return values. + * @param outputShapes The list of shapes being produced. + * @param raggedValueTypes The value of the raggedValueTypes attribute + * @param raggedSplitTypes The value of the raggedSplitTypes attribute + * @param options carries optional attribute values + * @return a new instance of ParseExampleDataset + * @see org.tensorflow.op.DataOps.parseExampleDataset + * @param deterministic Sets the deterministic option. + * + * @param deterministic A string indicating the op-level determinism to use. Deterministic + * controls + * whether the dataset is allowed to return elements out of order if the next + * element to be returned isn't available, but a later element is. Options are + * "true", "false", and "default". "default" indicates + * that determinism should be + * decided by the `experimental_deterministic` parameter of `tf.data.Options`. + * @return this Options instance. + * @param raggedKeys Sets the raggedKeys option. + * + * @param raggedKeys the raggedKeys option + * @return this Options instance. + */ + public fun parseExampleDataset( + inputDataset: Operand, + numParallelCalls: Operand, + denseDefaults: Iterable>, + sparseKeys: List, + denseKeys: List, + sparseTypes: List>, + denseShapes: List, + outputTypes: List>, + outputShapes: List, + raggedValueTypes: List>, + raggedSplitTypes: List>, + deterministic: String? = null, + raggedKeys: List? = null + ): ParseExampleDataset = java.parseExampleDataset( + inputDataset, + numParallelCalls, + denseDefaults, + sparseKeys, + denseKeys, + sparseTypes, + denseShapes, + outputTypes, + outputShapes, + raggedValueTypes, + raggedSplitTypes, + *listOfNotNull( + deterministic?.let{ org.tensorflow.op.data.ParseExampleDataset.deterministic(it) }, + raggedKeys?.let{ org.tensorflow.op.data.ParseExampleDataset.raggedKeys(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that asynchronously prefetches elements from `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param bufferSize The maximum number of elements to buffer in an iterator over + * this dataset. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of PrefetchDataset + * @see org.tensorflow.op.DataOps.prefetchDataset + * @param slackPeriod Sets the slackPeriod option. + * + * @param slackPeriod the slackPeriod option + * @return this Options instance. + * @param legacyAutotune Sets the legacyAutotune option. + * + * @param legacyAutotune the legacyAutotune option + * @return this Options instance. + * @param bufferSizeMin Sets the bufferSizeMin option. + * + * @param bufferSizeMin the bufferSizeMin option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun prefetchDataset( + inputDataset: Operand, + bufferSize: Operand, + outputTypes: List>, + outputShapes: List, + slackPeriod: Long? = null, + legacyAutotune: Boolean? = null, + bufferSizeMin: Long? = null, + metadata: String? = null + ): PrefetchDataset = java.prefetchDataset( + inputDataset, + bufferSize, + outputTypes, + outputShapes, + *listOfNotNull( + slackPeriod?.let{ org.tensorflow.op.data.PrefetchDataset.slackPeriod(it) }, + legacyAutotune?.let{ org.tensorflow.op.data.PrefetchDataset.legacyAutotune(it) }, + bufferSizeMin?.let{ org.tensorflow.op.data.PrefetchDataset.bufferSizeMin(it) }, + metadata?.let{ org.tensorflow.op.data.PrefetchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that uses a custom thread pool to compute `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param numThreads Identifies the number of threads to use for the private threadpool. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of PrivateThreadPoolDataset + * @see org.tensorflow.op.DataOps.privateThreadPoolDataset + */ + public fun privateThreadPoolDataset( + inputDataset: Operand, + numThreads: Operand, + outputTypes: List>, + outputShapes: List + ): PrivateThreadPoolDataset = java.privateThreadPoolDataset( + inputDataset, + numThreads, + outputTypes, + outputShapes + ) + + /** + * Creates a Dataset that returns pseudorandom numbers. + * Creates a Dataset that returns a stream of uniformly distributed + * pseudorandom 64-bit signed integers. + * + * In the TensorFlow Python API, you can instantiate this dataset via the + * class `tf.data.experimental.RandomDataset`. + * + * Instances of this dataset are also created as a result of the + * `hoist_random_uniform` static optimization. Whether this optimization is + * performed is determined by the `experimental_optimization.hoist_random_uniform` + * option of `tf.data.Options`. + * + * @param seed A scalar seed for the random number generator. If either seed or + * seed2 is set to be non-zero, the random number generator is seeded + * by the given seed. Otherwise, a random seed is used. + * @param seed2 A second scalar seed to avoid seed collision. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of RandomDataset + * @see org.tensorflow.op.DataOps.randomDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun randomDataset( + seed: Operand, + seed2: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): RandomDataset = java.randomDataset( + seed, + seed2, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.RandomDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset with a range of values. Corresponds to python's xrange. + * + * @param start corresponds to start in python's xrange(). + * @param stop corresponds to stop in python's xrange(). + * @param step corresponds to step in python's xrange(). + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of RangeDataset + * @see org.tensorflow.op.DataOps.rangeDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun rangeDataset( + start: Operand, + stop: Operand, + step: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): RangeDataset = java.rangeDataset( + start, + stop, + step, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.RangeDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that changes the batch size. + * Creates a dataset that rebatches elements from `input_dataset` into new batch + * sizes. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param batchSizes A vector of integers representing the size of batches to produce. These + * values + * are cycled through in order. + * @param dropRemainder The dropRemainder value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of RebatchDatasetV2 + * @see org.tensorflow.op.DataOps.rebatchDatasetV2 + */ + public fun rebatchDatasetV2( + inputDataset: Operand, + batchSizes: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List + ): RebatchDatasetV2 = java.rebatchDatasetV2( + inputDataset, + batchSizes, + dropRemainder, + outputTypes, + outputShapes + ) + + /** + * Reduces the input dataset to a singleton using a reduce function. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param initialState A nested structure of tensors, representing the initial state of the + * transformation. + * @param otherArguments The otherArguments value + * @param f A function that maps `(old_state, input_element)` to `new_state`. It must take + * two arguments and return a nested structures of tensors. The structure of + * `new_state` must match the structure of `initial_state`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ReduceDataset + * @see org.tensorflow.op.DataOps.reduceDataset + * @param useInterOpParallelism Sets the useInterOpParallelism option. + * + * @param useInterOpParallelism the useInterOpParallelism option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun reduceDataset( + inputDataset: Operand, + initialState: Iterable>, + otherArguments: Iterable>, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + useInterOpParallelism: Boolean? = null, + metadata: String? = null + ): ReduceDataset = java.reduceDataset( + inputDataset, + initialState, + otherArguments, + f, + outputTypes, + outputShapes, + *listOfNotNull( + useInterOpParallelism?.let{ org.tensorflow.op.data.ReduceDataset.useInterOpParallelism(it) }, + metadata?.let{ org.tensorflow.op.data.ReduceDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Registers a dataset with the tf.data service. + * + * @param dataset The dataset value + * @param address The address value + * @param protocol The protocol value + * @param externalStatePolicy The value of the externalStatePolicy attribute + * @param options carries optional attribute values + * @return a new instance of RegisterDataset + * @see org.tensorflow.op.DataOps.registerDataset + * @param elementSpec Sets the elementSpec option. + * + * @param elementSpec the elementSpec option + * @return this Options instance. + */ + public fun registerDataset( + dataset: Operand, + address: Operand, + protocol: Operand, + externalStatePolicy: Long, + elementSpec: String? = null + ): RegisterDataset = java.registerDataset( + dataset, + address, + protocol, + externalStatePolicy, + *listOfNotNull( + elementSpec?.let{ org.tensorflow.op.data.RegisterDataset.elementSpec(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that emits the outputs of `input_dataset` `count` times. + * + * @param inputDataset The inputDataset value + * @param count A scalar representing the number of times that `input_dataset` should + * be repeated. A value of `-1` indicates that it should be repeated infinitely. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of RepeatDataset + * @see org.tensorflow.op.DataOps.repeatDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun repeatDataset( + inputDataset: Operand, + count: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): RepeatDataset = java.repeatDataset( + inputDataset, + count, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.RepeatDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that takes a Bernoulli sample of the contents of another dataset. + * There is no transformation in the `tf.data` Python API for creating this dataset. + * Instead, it is created as a result of the `filter_with_random_uniform_fusion` + * static optimization. Whether this optimization is performed is determined by the + * `experimental_optimization.filter_with_random_uniform_fusion` option of + * `tf.data.Options`. + * + * @param inputDataset The inputDataset value + * @param rate A scalar representing the sample rate. Each element of `input_dataset` is + * retained with this probability, independent of all other elements. + * @param seed A scalar representing seed of random number generator. + * @param seed2 A scalar representing seed2 of random number generator. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SamplingDataset + * @see org.tensorflow.op.DataOps.samplingDataset + */ + public fun samplingDataset( + inputDataset: Operand, + rate: Operand, + seed: Operand, + seed2: Operand, + outputTypes: List>, + outputShapes: List + ): SamplingDataset = java.samplingDataset( + inputDataset, + rate, + seed, + seed2, + outputTypes, + outputShapes + ) + + /** + * The SaveDatasetV2 operation + * + * @param inputDataset The inputDataset value + * @param path The path value + * @param shardFuncOtherArgs The shardFuncOtherArgs value + * @param shardFunc The value of the shardFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of SaveDataset + * @see org.tensorflow.op.DataOps.saveDataset + * @param compression Sets the compression option. + * + * @param compression the compression option + * @return this Options instance. + * @param useShardFunc Sets the useShardFunc option. + * + * @param useShardFunc the useShardFunc option + * @return this Options instance. + */ + public fun saveDataset( + inputDataset: Operand, + path: Operand, + shardFuncOtherArgs: Iterable>, + shardFunc: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + compression: String? = null, + useShardFunc: Boolean? = null + ): SaveDataset = java.saveDataset( + inputDataset, + path, + shardFuncOtherArgs, + shardFunc, + outputTypes, + outputShapes, + *listOfNotNull( + compression?.let{ org.tensorflow.op.data.SaveDataset.compression(it) }, + useShardFunc?.let{ org.tensorflow.op.data.SaveDataset.useShardFunc(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset successively reduces `f` over the elements of `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param initialState The initialState value + * @param otherArguments The otherArguments value + * @param f The value of the f attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ScanDataset + * @see org.tensorflow.op.DataOps.scanDataset + * @param preserveCardinality Sets the preserveCardinality option. + * + * @param preserveCardinality the preserveCardinality option + * @return this Options instance. + * @param useDefaultDevice Sets the useDefaultDevice option. + * + * @param useDefaultDevice the useDefaultDevice option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun scanDataset( + inputDataset: Operand, + initialState: Iterable>, + otherArguments: Iterable>, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + preserveCardinality: Boolean? = null, + useDefaultDevice: Boolean? = null, + metadata: String? = null + ): ScanDataset = java.scanDataset( + inputDataset, + initialState, + otherArguments, + f, + outputTypes, + outputShapes, + *listOfNotNull( + preserveCardinality?.let{ org.tensorflow.op.data.ScanDataset.preserveCardinality(it) }, + useDefaultDevice?.let{ org.tensorflow.op.data.ScanDataset.useDefaultDevice(it) }, + metadata?.let{ org.tensorflow.op.data.ScanDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Converts the given `resource_handle` representing an iterator to a variant tensor. + * + * @param resourceHandle A handle to an iterator resource. + * @param options carries optional attribute values + * @return a new instance of SerializeIterator + * @see org.tensorflow.op.DataOps.serializeIterator + * @param externalStatePolicy Sets the externalStatePolicy option. + * + * @param externalStatePolicy the externalStatePolicy option + * @return this Options instance. + */ + public fun serializeIterator(resourceHandle: Operand, externalStatePolicy: Long? = + null): SerializeIterator = java.serializeIterator( + resourceHandle, + *listOfNotNull( + externalStatePolicy?.let{ org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } + ).toTypedArray() + ) + + /** + * The SetStatsAggregatorDataset operation + * + * @param inputDataset The inputDataset value + * @param statsAggregator The statsAggregator value + * @param tag The tag value + * @param counterPrefix The counterPrefix value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SetStatsAggregatorDataset + * @see org.tensorflow.op.DataOps.setStatsAggregatorDataset + */ + public fun setStatsAggregatorDataset( + inputDataset: Operand, + statsAggregator: Operand, + tag: Operand, + counterPrefix: Operand, + outputTypes: List>, + outputShapes: List + ): SetStatsAggregatorDataset = java.setStatsAggregatorDataset( + inputDataset, + statsAggregator, + tag, + counterPrefix, + outputTypes, + outputShapes + ) + + /** + * Creates a `Dataset` that includes only 1/`num_shards` of this dataset. + * + * @param inputDataset The inputDataset value + * @param numShards An integer representing the number of shards operating in parallel. + * @param index An integer representing the current worker index. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ShardDataset + * @see org.tensorflow.op.DataOps.shardDataset + * @param requireNonEmpty Sets the requireNonEmpty option. + * + * @param requireNonEmpty the requireNonEmpty option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun shardDataset( + inputDataset: Operand, + numShards: Operand, + index: Operand, + outputTypes: List>, + outputShapes: List, + requireNonEmpty: Boolean? = null, + metadata: String? = null + ): ShardDataset = java.shardDataset( + inputDataset, + numShards, + index, + outputTypes, + outputShapes, + *listOfNotNull( + requireNonEmpty?.let{ org.tensorflow.op.data.ShardDataset.requireNonEmpty(it) }, + metadata?.let{ org.tensorflow.op.data.ShardDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The ShuffleAndRepeatDatasetV2 operation + * + * @param inputDataset The inputDataset value + * @param bufferSize The bufferSize value + * @param seed The seed value + * @param seed2 The seed2 value + * @param count The count value + * @param seedGenerator The seedGenerator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ShuffleAndRepeatDataset + * @see org.tensorflow.op.DataOps.shuffleAndRepeatDataset + * @param reshuffleEachIteration Sets the reshuffleEachIteration option. + * + * @param reshuffleEachIteration the reshuffleEachIteration option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun shuffleAndRepeatDataset( + inputDataset: Operand, + bufferSize: Operand, + seed: Operand, + seed2: Operand, + count: Operand, + seedGenerator: Operand, + outputTypes: List>, + outputShapes: List, + reshuffleEachIteration: Boolean? = null, + metadata: String? = null + ): ShuffleAndRepeatDataset = java.shuffleAndRepeatDataset( + inputDataset, + bufferSize, + seed, + seed2, + count, + seedGenerator, + outputTypes, + outputShapes, + *listOfNotNull( + reshuffleEachIteration?.let{ + org.tensorflow.op.data.ShuffleAndRepeatDataset.reshuffleEachIteration(it) }, + metadata?.let{ org.tensorflow.op.data.ShuffleAndRepeatDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The ShuffleDatasetV3 operation + * + * @param inputDataset The inputDataset value + * @param bufferSize The bufferSize value + * @param seed The seed value + * @param seed2 The seed2 value + * @param seedGenerator The seedGenerator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ShuffleDataset + * @see org.tensorflow.op.DataOps.shuffleDataset + * @param reshuffleEachIteration Sets the reshuffleEachIteration option. + * + * @param reshuffleEachIteration the reshuffleEachIteration option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun shuffleDataset( + inputDataset: Operand, + bufferSize: Operand, + seed: Operand, + seed2: Operand, + seedGenerator: Operand, + outputTypes: List>, + outputShapes: List, + reshuffleEachIteration: Boolean? = null, + metadata: String? = null + ): ShuffleDataset = java.shuffleDataset( + inputDataset, + bufferSize, + seed, + seed2, + seedGenerator, + outputTypes, + outputShapes, + *listOfNotNull( + reshuffleEachIteration?.let{ org.tensorflow.op.data.ShuffleDataset.reshuffleEachIteration(it) + }, + metadata?.let{ org.tensorflow.op.data.ShuffleDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that skips `count` elements from the `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param count A scalar representing the number of elements from the `input_dataset` + * that should be skipped. If count is -1, skips everything. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of SkipDataset + * @see org.tensorflow.op.DataOps.skipDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun skipDataset( + inputDataset: Operand, + count: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): SkipDataset = java.skipDataset( + inputDataset, + count, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.SkipDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The SleepDataset operation + * + * @param inputDataset The inputDataset value + * @param sleepMicroseconds The sleepMicroseconds value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SleepDataset + * @see org.tensorflow.op.DataOps.sleepDataset + */ + public fun sleepDataset( + inputDataset: Operand, + sleepMicroseconds: Operand, + outputTypes: List>, + outputShapes: List + ): SleepDataset = java.sleepDataset( + inputDataset, + sleepMicroseconds, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that passes a sliding window over `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param windowSize A scalar representing the number of elements in the + * sliding window. + * @param windowShift A scalar representing the steps moving the sliding window + * forward in one iteration. It must be positive. + * @param windowStride A scalar representing the stride of the input elements of the sliding + * window. + * It must be positive. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SlidingWindowDataset + * @see org.tensorflow.op.DataOps.slidingWindowDataset + */ + public fun slidingWindowDataset( + inputDataset: Operand, + windowSize: Operand, + windowShift: Operand, + windowStride: Operand, + outputTypes: List>, + outputShapes: List + ): SlidingWindowDataset = java.slidingWindowDataset( + inputDataset, + windowSize, + windowShift, + windowStride, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that will write to / read from a snapshot. + * This dataset attempts to determine whether a valid snapshot exists at the + * `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`. + * If not, it will run the preprocessing pipeline as usual, and write out a + * snapshot of the data processed for future use. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param path The path we should write snapshots to / read snapshots from. + * @param readerFuncOtherArgs The readerFuncOtherArgs value + * @param shardFuncOtherArgs The shardFuncOtherArgs value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param readerFunc Optional. A function to control how to read data from snapshot shards. + * @param shardFunc Optional. A function to control how to shard data when writing a snapshot. + * @param options carries optional attribute values + * @return a new instance of SnapshotDataset + * @see org.tensorflow.op.DataOps.snapshotDataset + * @param compression Sets the compression option. + * + * @param compression The type of compression to be applied to the saved snapshot files. + * @return this Options instance. + * @param readerPrefix Sets the readerPrefix option. + * + * @param readerPrefix the readerPrefix option + * @return this Options instance. + * @param writerPrefix Sets the writerPrefix option. + * + * @param writerPrefix the writerPrefix option + * @return this Options instance. + * @param hashValid Sets the hashValid option. + * + * @param hashValid the hashValid option + * @return this Options instance. + * @param hash Sets the hash option. + * + * @param hash the hash option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun snapshotDataset( + inputDataset: Operand, + path: Operand, + readerFuncOtherArgs: Iterable>, + shardFuncOtherArgs: Iterable>, + outputTypes: List>, + outputShapes: List, + readerFunc: ConcreteFunction, + shardFunc: ConcreteFunction, + compression: String? = null, + readerPrefix: String? = null, + writerPrefix: String? = null, + hashValid: Boolean? = null, + hash: Long? = null, + metadata: String? = null + ): SnapshotDataset = java.snapshotDataset( + inputDataset, + path, + readerFuncOtherArgs, + shardFuncOtherArgs, + outputTypes, + outputShapes, + readerFunc, + shardFunc, + *listOfNotNull( + compression?.let{ org.tensorflow.op.data.SnapshotDataset.compression(it) }, + readerPrefix?.let{ org.tensorflow.op.data.SnapshotDataset.readerPrefix(it) }, + writerPrefix?.let{ org.tensorflow.op.data.SnapshotDataset.writerPrefix(it) }, + hashValid?.let{ org.tensorflow.op.data.SnapshotDataset.hashValid(it) }, + hash?.let{ org.tensorflow.op.data.SnapshotDataset.hash(it) }, + metadata?.let{ org.tensorflow.op.data.SnapshotDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that splits a SparseTensor into elements row-wise. + * + * @param indices The indices value + * @param values The values value + * @param denseShape The denseShape value + * @return a new instance of SparseTensorSliceDataset + * @see org.tensorflow.op.DataOps.sparseTensorSliceDataset + */ + public fun sparseTensorSliceDataset( + indices: Operand, + values: Operand, + denseShape: Operand + ): SparseTensorSliceDataset = java.sparseTensorSliceDataset( + indices, + values, + denseShape + ) + + /** + * Creates a dataset that executes a SQL query and emits rows of the result set. + * + * @param driverName The database type. Currently, the only supported type is 'sqlite'. + * @param dataSourceName A connection string to connect to the database. + * @param query A SQL query to execute. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SqlDataset + * @see org.tensorflow.op.DataOps.sqlDataset + */ + public fun sqlDataset( + driverName: Operand, + dataSourceName: Operand, + query: Operand, + outputTypes: List>, + outputShapes: List + ): SqlDataset = java.sqlDataset( + driverName, + dataSourceName, + query, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that contains `count` elements from the `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param count A scalar representing the number of elements from the `input_dataset` + * that should be taken. A value of `-1` indicates that all of `input_dataset` + * is taken. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of TakeDataset + * @see org.tensorflow.op.DataOps.takeDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun takeDataset( + inputDataset: Operand, + count: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): TakeDataset = java.takeDataset( + inputDataset, + count, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TakeDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that stops iteration when predicate` is false. + * The `predicate` function must return a scalar boolean and accept the + * following arguments: + *
    + *
  • One tensor for each component of an element of `input_dataset`.
  • + *
  • One tensor for each value in `other_arguments`.
  • + *
+ * + * @param inputDataset The inputDataset value + * @param otherArguments A list of tensors, typically values that were captured when + * building a closure for `predicate`. + * @param predicate A function returning a scalar boolean. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of TakeWhileDataset + * @see org.tensorflow.op.DataOps.takeWhileDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun takeWhileDataset( + inputDataset: Operand, + otherArguments: Iterable>, + predicate: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): TakeWhileDataset = java.takeWhileDataset( + inputDataset, + otherArguments, + predicate, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TakeWhileDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that emits `components` as a tuple of tensors once. + * + * @param components The components value + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of TensorDataset + * @see org.tensorflow.op.DataOps.tensorDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun tensorDataset( + components: Iterable>, + outputShapes: List, + metadata: String? = null + ): TensorDataset = java.tensorDataset( + components, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TensorDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that emits each dim-0 slice of `components` once. + * + * @param components The components value + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of TensorSliceDataset + * @see org.tensorflow.op.DataOps.tensorSliceDataset + * @param isFiles Sets the isFiles option. + * + * @param isFiles the isFiles option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun tensorSliceDataset( + components: Iterable>, + outputShapes: List, + isFiles: Boolean? = null, + metadata: String? = null + ): TensorSliceDataset = java.tensorSliceDataset( + components, + outputShapes, + *listOfNotNull( + isFiles?.let{ org.tensorflow.op.data.TensorSliceDataset.isFiles(it) }, + metadata?.let{ org.tensorflow.op.data.TensorSliceDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that emits the lines of one or more text files. + * + * @param filenames A scalar or a vector containing the name(s) of the file(s) to be + * read. + * @param compressionType A scalar containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @param bufferSize A scalar containing the number of bytes to buffer. + * @param options carries optional attribute values + * @return a new instance of TextLineDataset + * @see org.tensorflow.op.DataOps.textLineDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun textLineDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand, + metadata: String? = null + ): TextLineDataset = java.textLineDataset( + filenames, + compressionType, + bufferSize, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TextLineDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that emits the records from one or more TFRecord files. + * + * @param filenames A scalar or vector containing the name(s) of the file(s) to be + * read. + * @param compressionType A scalar containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @param bufferSize A scalar representing the number of bytes to buffer. A value of + * 0 means no buffering will be performed. + * @param options carries optional attribute values + * @return a new instance of TfRecordDataset + * @see org.tensorflow.op.DataOps.tfRecordDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun tfRecordDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand, + metadata: String? = null + ): TfRecordDataset = java.tfRecordDataset( + filenames, + compressionType, + bufferSize, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TfRecordDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that uses a custom thread pool to compute `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param threadPool A resource produced by the ThreadPoolHandle op. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ThreadPoolDataset + * @see org.tensorflow.op.DataOps.threadPoolDataset + */ + public fun threadPoolDataset( + inputDataset: Operand, + threadPool: Operand, + outputTypes: List>, + outputShapes: List + ): ThreadPoolDataset = java.threadPoolDataset( + inputDataset, + threadPool, + outputTypes, + outputShapes + ) + + /** + * A dataset that splits the elements of its input into multiple elements. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of UnbatchDataset + * @see org.tensorflow.op.DataOps.unbatchDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun unbatchDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): UnbatchDataset = java.unbatchDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.UnbatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that contains the unique elements of `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of UniqueDataset + * @see org.tensorflow.op.DataOps.uniqueDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun uniqueDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): UniqueDataset = java.uniqueDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.UniqueDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The UnwrapDatasetVariant operation + * + * @param inputHandle The inputHandle value + * @return a new instance of UnwrapDatasetVariant + * @see org.tensorflow.op.DataOps.unwrapDatasetVariant + */ + public fun unwrapDatasetVariant(inputHandle: Operand): UnwrapDatasetVariant = + java.unwrapDatasetVariant( + inputHandle + ) + + /** + * Combines (nests of) input elements into a dataset of (nests of) windows. + * + * A "window" is a finite dataset of flat elements of size `size` (or possibly + * fewer if there are not enough input elements to fill the window and + * `drop_remainder` evaluates to false). + * + * The `shift` argument determines the number of input elements by which + * the window moves on each iteration. The first element in the `k`th window + * will be element + * ``` + * 1 + (k-1) * shift + * + * ``` + * + * of the input dataset. In particular, the first element of the first window + * will always be the first element of the input dataset. + * + * If the `stride` parameter is greater than 1, then each window will skip + * `(stride - 1)` input elements between each element that appears in the + * window. Output windows will still contain `size` elements regardless of + * the value of `stride`. + * + * The `stride` argument determines the stride of the input elements, and the + * `shift` argument determines the shift of the window. + * + * For example, letting `{...`} to represent a Dataset: + *
    + *
  • `tf.data.Dataset.range(7).window(2)` produces + * `{{0, 1`, {2, 3}, {4, 5}, {6}}}
  • + *
  • `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces + * `{{0, 1, 2`, {2, 3, 4}, {4, 5, 6}}}
  • + *
  • `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces + * `{{0, 2, 4`, {1, 3, 5}, {2, 4, 6}}}
  • + *
+ * + * Note that when the `window` transformation is applied to a dataset of + * nested elements, it produces a dataset of nested windows. + * + * For example: + *
    + *
  • `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)` + * produces `{({0, 1`, {0, 1}), ({2, 3}, {2, 3})}}
  • + *
  • `tf.data.Dataset.from_tensor_slices({"a": range(4)`).window(2)} + * produces `{{"a": {0, 1`}, {"a": {2, 3}}}}
  • + *
+ * + * @param inputDataset The inputDataset value + * @param sizeOutput An integer scalar, representing the number of elements + * of the input dataset to combine into a window. Must be positive. + * @param shift An integer scalar, representing the number of input elements + * by which the window moves in each iteration. Defaults to `size`. + * Must be positive. + * @param stride An integer scalar, representing the stride of the input elements + * in the sliding window. Must be positive. The default value of 1 means + * "retain every input element". + * @param dropRemainder A Boolean scalar, representing whether the last window should be + * dropped if its size is smaller than `window_size`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of WindowDataset + * @see org.tensorflow.op.DataOps.windowDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun windowDataset( + inputDataset: Operand, + sizeOutput: Operand, + shift: Operand, + stride: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): WindowDataset = java.windowDataset( + inputDataset, + sizeOutput, + shift, + stride, + dropRemainder, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.WindowDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The WrapDatasetVariant operation + * + * @param inputHandle The inputHandle value + * @return a new instance of WrapDatasetVariant + * @see org.tensorflow.op.DataOps.wrapDatasetVariant + */ + public fun wrapDatasetVariant(inputHandle: Operand): WrapDatasetVariant = + java.wrapDatasetVariant( + inputHandle + ) + + /** + * Creates a dataset that zips together `input_datasets`. + * The elements of the resulting dataset are created by zipping corresponding + * elements from each of the input datasets. + * + * The size of the resulting dataset will match the size of the smallest input + * dataset, and no error will be raised if input datasets have different sizes. + * + * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped together. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ZipDataset + * @see org.tensorflow.op.DataOps.zipDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun zipDataset( + inputDatasets: Iterable>, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): ZipDataset = java.zipDataset( + inputDatasets, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.ZipDataset.metadata(it) } + ).toTypedArray() + ) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt new file mode 100644 index 00000000000..6e6b567de7a --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -0,0 +1,227 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.dtypes.AsString +import org.tensorflow.op.dtypes.Cast +import org.tensorflow.op.dtypes.Complex +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `dtypes` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class DtypesOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Converts each entry in the given tensor to strings. + * Supports many numeric types and boolean. + * + * For Unicode, see the + * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode + * text) + * tutorial. + * + * Examples: + * ``` + * + * tf.strings.as_string([3, 2]) + * + * tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() + * array([b'3.14', b'2.72'], dtype=object) + * ``` + * + * @param input The input value + * @param options carries optional attribute values + * @return a new instance of AsString + * @see org.tensorflow.op.DtypesOps.asString + * @param precision Sets the precision option. + * + * @param precision The post-decimal precision to use for floating point numbers. + * Only used if precision > -1. + * @return this Options instance. + * @param scientific Sets the scientific option. + * + * @param scientific Use scientific notation for floating point numbers. + * @return this Options instance. + * @param shortest Sets the shortest option. + * + * @param shortest Use shortest representation (either scientific or standard) for + * floating point numbers. + * @return this Options instance. + * @param width Sets the width option. + * + * @param width Pad pre-decimal numbers to this width. + * Applies to both floating point and integer numbers. + * Only used if width > -1. + * @return this Options instance. + * @param fill Sets the fill option. + * + * @param fill The value to pad if width > -1. If empty, pads with spaces. + * Another typical value is '0'. String cannot be longer than 1 character. + * @return this Options instance. + */ + public fun asString( + input: Operand, + precision: Long? = null, + scientific: Boolean? = null, + shortest: Boolean? = null, + width: Long? = null, + fill: String? = null + ): AsString = java.asString( + input, + *listOfNotNull( + precision?.let{ org.tensorflow.op.dtypes.AsString.precision(it) }, + scientific?.let{ org.tensorflow.op.dtypes.AsString.scientific(it) }, + shortest?.let{ org.tensorflow.op.dtypes.AsString.shortest(it) }, + width?.let{ org.tensorflow.op.dtypes.AsString.width(it) }, + fill?.let{ org.tensorflow.op.dtypes.AsString.fill(it) } + ).toTypedArray() + ) + + /** + * Cast x of type SrcT to y of DstT. + * + * @param data type for `y` output + * @param x The x value + * @param DstT The value of the DstT attribute + * @param options carries optional attribute values + * @param data type for `Cast` output and operands + * @return a new instance of Cast + * @see org.tensorflow.op.DtypesOps.cast + * @param Truncate Sets the Truncate option. + * + * @param Truncate the Truncate option + * @return this Options instance. + */ + public fun cast( + x: Operand, + DstT: Class, + Truncate: Boolean? = null + ): Cast = java.cast( + x, + DstT, + *listOfNotNull( + Truncate?.let{ org.tensorflow.op.dtypes.Cast.Truncate(it) } + ).toTypedArray() + ) + + /** + * Converts two real numbers to a complex number. + * Given a tensor `real` representing the real part of a complex number, and a + * tensor `imag` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form `\(a + bj\)`, where + * _a_ represents the `real` part and _b_ represents the `imag` part. + * + * The input tensors `real` and `imag` must have the same shape. + * + * For example: + * ``` + * # tensor 'real' is [2.25, 3.25] + * # tensor `imag` is [4.75, 5.75] + * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + * + * ``` + * + * @param data type for `out` output + * @param real The real value + * @param imag The imag value + * @param Tout The value of the Tout attribute + * @param data type for `Complex` output and operands + * @param data type for `Complex` output and operands + * @return a new instance of Complex + * @see org.tensorflow.op.DtypesOps.complex + */ + public fun complex( + real: Operand, + imag: Operand, + Tout: Class + ): Complex = java.complex( + real, + imag, + Tout + ) + + /** + * Cast x of type SrcT to y of DstT. + * + * @param data type for `y` output + * @param x The x value + * @param DstT The value of the DstT attribute + * @param options carries optional attribute values + * @param data type for `Cast` output and operands + * @return a new instance of Cast + * @see org.tensorflow.op.DtypesOps.cast + * @param Truncate Sets the Truncate option. + * + * @param Truncate the Truncate option + * @return this Options instance. + */ + @JvmName("castReified") + public inline fun cast(x: Operand, Truncate: Boolean? = null): + Cast = cast(x, U::class.java, Truncate) + + /** + * Converts two real numbers to a complex number. + * Given a tensor `real` representing the real part of a complex number, and a + * tensor `imag` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form `\(a + bj\)`, where + * _a_ represents the `real` part and _b_ represents the `imag` part. + * + * The input tensors `real` and `imag` must have the same shape. + * + * For example: + * ``` + * # tensor 'real' is [2.25, 3.25] + * # tensor `imag` is [4.75, 5.75] + * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + * + * ``` + * + * @param data type for `out` output + * @param real The real value + * @param imag The imag value + * @param Tout The value of the Tout attribute + * @param data type for `Complex` output and operands + * @param data type for `Complex` output and operands + * @return a new instance of Complex + * @see org.tensorflow.op.DtypesOps.complex + */ + @JvmName("complexReified") + public inline fun complex(real: Operand, imag: Operand): + Complex = complex(real, imag, U::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt new file mode 100644 index 00000000000..e522627f8dd --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -0,0 +1,1804 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.image.AdjustContrast +import org.tensorflow.op.image.AdjustHue +import org.tensorflow.op.image.AdjustSaturation +import org.tensorflow.op.image.CombinedNonMaxSuppression +import org.tensorflow.op.image.CropAndResize +import org.tensorflow.op.image.CropAndResizeGradBoxes +import org.tensorflow.op.image.CropAndResizeGradImage +import org.tensorflow.op.image.DecodeAndCropJpeg +import org.tensorflow.op.image.DecodeBmp +import org.tensorflow.op.image.DecodeGif +import org.tensorflow.op.image.DecodeImage +import org.tensorflow.op.image.DecodeJpeg +import org.tensorflow.op.image.DecodePng +import org.tensorflow.op.image.DrawBoundingBoxes +import org.tensorflow.op.image.EncodeJpeg +import org.tensorflow.op.image.EncodeJpegVariableQuality +import org.tensorflow.op.image.EncodePng +import org.tensorflow.op.image.ExtractImagePatches +import org.tensorflow.op.image.ExtractJpegShape +import org.tensorflow.op.image.HsvToRgb +import org.tensorflow.op.image.NonMaxSuppression +import org.tensorflow.op.image.NonMaxSuppressionWithOverlaps +import org.tensorflow.op.image.QuantizedResizeBilinear +import org.tensorflow.op.image.RandomCrop +import org.tensorflow.op.image.ResizeArea +import org.tensorflow.op.image.ResizeBicubic +import org.tensorflow.op.image.ResizeBilinear +import org.tensorflow.op.image.ResizeNearestNeighbor +import org.tensorflow.op.image.RgbToHsv +import org.tensorflow.op.image.SampleDistortedBoundingBox +import org.tensorflow.op.image.ScaleAndTranslate +import org.tensorflow.op.image.StatelessSampleDistortedBoundingBox +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.TUint8 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `image` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class ImageOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.ImageOps = ops.java.image + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Adjust the contrast of one or more images. + * `images` is a tensor of at least 3 dimensions. The last 3 dimensions are + * interpreted as `[height, width, channels]`. The other dimensions only + * represent a collection of images, such as `[batch, height, width, channels].` + * + * Contrast is adjusted independently for each channel of each image. + * + * For each channel, the Op first computes the mean of the image pixels in the + * channel and then adjusts each component of each pixel to + * `(x - mean) * contrast_factor + mean`. + * + * @param data type for `output` output + * @param images Images to adjust. At least 3-D. + * @param contrastFactor A float multiplier for adjusting contrast. + * @param data type for `AdjustContrastv2` output and operands + * @return a new instance of AdjustContrast + * @see org.tensorflow.op.ImageOps.adjustContrast + */ + public fun adjustContrast(images: Operand, contrastFactor: Operand): + AdjustContrast = java.adjustContrast( + images, + contrastFactor + ) + + /** + * Adjust the hue of one or more images. + * `images` is a tensor of at least 3 dimensions. The last dimension is + * interpreted as channels, and must be three. + * + * The input image is considered in the RGB colorspace. Conceptually, the RGB + * colors are first mapped into HSV. A delta is then applied all the hue values, + * and then remapped back to RGB colorspace. + * + * @param data type for `output` output + * @param images Images to adjust. At least 3-D. + * @param delta A float delta to add to the hue. + * @param data type for `AdjustHue` output and operands + * @return a new instance of AdjustHue + * @see org.tensorflow.op.ImageOps.adjustHue + */ + public fun adjustHue(images: Operand, delta: Operand): AdjustHue = + java.adjustHue( + images, + delta + ) + + /** + * Adjust the saturation of one or more images. + * `images` is a tensor of at least 3 dimensions. The last dimension is + * interpreted as channels, and must be three. + * + * The input image is considered in the RGB colorspace. Conceptually, the RGB + * colors are first mapped into HSV. A scale is then applied all the saturation + * values, and then remapped back to RGB colorspace. + * + * @param data type for `output` output + * @param images Images to adjust. At least 3-D. + * @param scale A float scale to add to the saturation. + * @param data type for `AdjustSaturation` output and operands + * @return a new instance of AdjustSaturation + * @see org.tensorflow.op.ImageOps.adjustSaturation + */ + public fun adjustSaturation(images: Operand, scale: Operand): + AdjustSaturation = java.adjustSaturation( + images, + scale + ) + + /** + * Greedily selects a subset of bounding boxes in descending order of score, + * This operation performs non_max_suppression on the inputs per batch, across + * all classes. + * Prunes away boxes that have high intersection-over-union (IOU) overlap + * with previously selected boxes. Bounding boxes are supplied as + * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + * diagonal pair of box corners and the coordinates can be provided as normalized + * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + * is agnostic to where the origin is in the coordinate system. Also note that + * this algorithm is invariant to orthogonal transformations and translations + * of the coordinate system; thus translating or reflections of the coordinate + * system result in the same boxes being selected by the algorithm. + * The output of this operation is the final boxes, scores and classes tensor + * returned after performing non_max_suppression. + * + * @param boxes A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 + * then + * same boxes are used for all classes otherwise, if `q` is equal to number of + * classes, class-specific boxes are used. + * @param scores A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]` + * representing a single score corresponding to each box (each row of boxes). + * @param maxOutputSizePerClass A scalar integer tensor representing the maximum number of + * boxes to be selected by non max suppression per class + * @param maxTotalSize An int32 scalar representing the maximum number of boxes retained over + * all + * classes. Note that setting this value to a large number may result in OOM error + * depending on the system workload. + * @param iouThreshold A 0-D float tensor representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. + * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to + * remove + * boxes based on score. + * @param options carries optional attribute values + * @return a new instance of CombinedNonMaxSuppression + * @see org.tensorflow.op.ImageOps.combinedNonMaxSuppression + * @param padPerClass Sets the padPerClass option. + * + * @param padPerClass If false, the output nmsed boxes, scores and classes + * are padded/clipped to `max_total_size`. If true, the + * output nmsed boxes, scores and classes are padded to be of length + * `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in + * which case it is clipped to `max_total_size`. Defaults to false. + * @return this Options instance. + * @param clipBoxes Sets the clipBoxes option. + * + * @param clipBoxes If true, assume the box coordinates are between [0, 1] and clip the + * output boxes + * if they fall beyond [0, 1]. If false, do not do clipping and output the box + * coordinates as it is. + * @return this Options instance. + */ + public fun combinedNonMaxSuppression( + boxes: Operand, + scores: Operand, + maxOutputSizePerClass: Operand, + maxTotalSize: Operand, + iouThreshold: Operand, + scoreThreshold: Operand, + padPerClass: Boolean? = null, + clipBoxes: Boolean? = null + ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( + boxes, + scores, + maxOutputSizePerClass, + maxTotalSize, + iouThreshold, + scoreThreshold, + *listOfNotNull( + padPerClass?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, + clipBoxes?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } + ).toTypedArray() + ) + + /** + * Extracts crops from the input image tensor and resizes them. + * Extracts crops from the input image tensor and resizes them using bilinear + * sampling or nearest neighbor sampling (possibly with aspect ratio change) to a + * common output size specified by `crop_size`. This is more general than the + * `crop_to_bounding_box` op which extracts a fixed size slice from the input image + * and does not allow resizing or aspect ratio change. + * + * Returns a tensor with `crops` from the input `image` at positions defined at the + * bounding box locations in `boxes`. The cropped boxes are all resized (with + * bilinear or nearest neighbor interpolation) to a fixed + * `size = [crop_height, crop_width]`. The result is a 4-D tensor + * `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. + * In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical + * results to using `tf.image.resize_bilinear()` or + * `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with + * `align_corners=True`. + * + * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + * Both `image_height` and `image_width` need to be positive. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in + * which case the sampled crop is an up-down flipped version of the original + * image. The width dimension is treated similarly. Normalized coordinates + * outside the `[0, 1]` range are allowed, in which case we use + * `extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param cropSize A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + * cropped image patches are resized to this size. The aspect ratio of the image + * content is not preserved. Both `crop_height` and `crop_width` need to be + * positive. + * @param options carries optional attribute values + * @return a new instance of CropAndResize + * @see org.tensorflow.op.ImageOps.cropAndResize + * @param method Sets the method option. + * + * @param method A string specifying the sampling method for resizing. It can be either + * `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling + * methods are supported: Bilinear and Nearest Neighbor. + * @return this Options instance. + * @param extrapolationValue Sets the extrapolationValue option. + * + * @param extrapolationValue Value used for extrapolation, when applicable. + * @return this Options instance. + */ + public fun cropAndResize( + image: Operand, + boxes: Operand, + boxInd: Operand, + cropSize: Operand, + method: String? = null, + extrapolationValue: Float? = null + ): CropAndResize = java.cropAndResize( + image, + boxes, + boxInd, + cropSize, + *listOfNotNull( + method?.let{ org.tensorflow.op.image.CropAndResize.method(it) }, + extrapolationValue?.let{ org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } + ).toTypedArray() + ) + + /** + * Computes the gradient of the crop_and_resize op wrt the input boxes tensor. + * + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + * Both `image_height` and `image_width` need to be positive. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which + * case the sampled crop is an up-down flipped version of the original image. The width dimension + * is treated similarly. Normalized coordinates outside the `[0, 1]`range are allowed, in + * which case we use`extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param options carries optional attribute values + * @return a new instance of CropAndResizeGradBoxes + * @see org.tensorflow.op.ImageOps.cropAndResizeGradBoxes + * @param method Sets the method option. + * + * @param method A string specifying the interpolation method. Only 'bilinear' is + * supported for now. + * @return this Options instance. + */ + public fun cropAndResizeGradBoxes( + grads: Operand, + image: Operand, + boxes: Operand, + boxInd: Operand, + method: String? = null + ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( + grads, + image, + boxes, + boxInd, + *listOfNotNull( + method?.let{ org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } + ).toTypedArray() + ) + + /** + * Computes the gradient of the crop_and_resize op wrt the input image tensor. + * + * @param data type for `output` output + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which + * case the sampled crop is an up-down flipped version of the original image. The width dimension + * is treated similarly. Normalized coordinates outside the `[0, 1]`range are allowed, in + * which case we use`extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` + * containing the original image size. Both `image_height` and `image_width` need + * to be positive. + * @param T The value of the T attribute + * @param options carries optional attribute values + * @param data type for `CropAndResizeGradImage` output and operands + * @return a new instance of CropAndResizeGradImage + * @see org.tensorflow.op.ImageOps.cropAndResizeGradImage + * @param method Sets the method option. + * + * @param method A string specifying the interpolation method. Only 'bilinear' is + * supported for now. + * @return this Options instance. + */ + public fun cropAndResizeGradImage( + grads: Operand, + boxes: Operand, + boxInd: Operand, + imageSize: Operand, + T_: Class, + method: String? = null + ): CropAndResizeGradImage = java.cropAndResizeGradImage( + grads, + boxes, + boxInd, + imageSize, + T_, + *listOfNotNull( + method?.let{ org.tensorflow.op.image.CropAndResizeGradImage.method(it) } + ).toTypedArray() + ) + + /** + * Decode and Crop a JPEG-encoded image to a uint8 tensor. + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
    + *
  • 0: Use the number of channels in the JPEG-encoded image.
  • + *
  • 1: output a grayscale image.
  • + *
  • 3: output an RGB image.
  • + *
+ * + * If needed, the JPEG-encoded image is transformed to match the requested number + * of color channels. + * + * The attr `ratio` allows downscaling the image by an integer factor during + * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + * downscaling the image later. + * + * It is equivalent to a combination of decode and crop, but much faster by only + * decoding partial jpeg image. + * + * @param contents 0-D. The JPEG-encoded image. + * @param cropWindow 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. + * @param options carries optional attribute values + * @return a new instance of DecodeAndCropJpeg + * @see org.tensorflow.op.ImageOps.decodeAndCropJpeg + * @param channels Sets the channels option. + * + * @param channels Number of color channels for the decoded image. + * @return this Options instance. + * @param ratio Sets the ratio option. + * + * @param ratio Downscaling ratio. + * @return this Options instance. + * @param fancyUpscaling Sets the fancyUpscaling option. + * + * @param fancyUpscaling If true use a slower but nicer upscaling of the + * chroma planes (yuv420/422 only). + * @return this Options instance. + * @param tryRecoverTruncated Sets the tryRecoverTruncated option. + * + * @param tryRecoverTruncated If true try to recover an image from truncated input. + * @return this Options instance. + * @param acceptableFraction Sets the acceptableFraction option. + * + * @param acceptableFraction The minimum required fraction of lines before a truncated + * input is accepted. + * @return this Options instance. + * @param dctMethod Sets the dctMethod option. + * + * @param dctMethod string specifying a hint about the algorithm used for + * decompression. Defaults to "" which maps to a system-specific + * default. Currently valid values are ["INTEGER_FAST", + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * jpeg library changes to a version that does not have that specific + * option.) + * @return this Options instance. + */ + public fun decodeAndCropJpeg( + contents: Operand, + cropWindow: Operand, + channels: Long? = null, + ratio: Long? = null, + fancyUpscaling: Boolean? = null, + tryRecoverTruncated: Boolean? = null, + acceptableFraction: Float? = null, + dctMethod: String? = null + ): DecodeAndCropJpeg = java.decodeAndCropJpeg( + contents, + cropWindow, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, + ratio?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, + fancyUpscaling?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, + dctMethod?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } + ).toTypedArray() + ) + + /** + * Decode the first frame of a BMP-encoded image to a uint8 tensor. + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
    + *
  • 0: Use the number of channels in the BMP-encoded image.
  • + *
  • 3: output an RGB image.
  • + *
  • 4: output an RGBA image.
  • + *
+ * + * @param contents 0-D. The BMP-encoded image. + * @param options carries optional attribute values + * @return a new instance of DecodeBmp + * @see org.tensorflow.op.ImageOps.decodeBmp + * @param channels Sets the channels option. + * + * @param channels the channels option + * @return this Options instance. + */ + public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = + java.decodeBmp( + contents, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeBmp.channels(it) } + ).toTypedArray() + ) + + /** + * Decode the frame(s) of a GIF-encoded image to a uint8 tensor. + * GIF images with frame or transparency compression are not supported. + * On Linux and MacOS systems, convert animated GIFs from compressed to + * uncompressed by running: + * ``` + * convert $src.gif -coalesce $dst.gif + * + * ``` + * + * This op also supports decoding JPEGs and PNGs, though it is cleaner to use + * `tf.io.decode_image`. + * + * @param contents 0-D. The GIF-encoded image. + * @return a new instance of DecodeGif + * @see org.tensorflow.op.ImageOps.decodeGif + */ + public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( + contents + ) + + /** + * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. + * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the + * appropriate operation to convert the input bytes string into a Tensor of type + * dtype. + * + * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays + * [height, width, num_channels]. Make sure to take this into account when + * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or + * PNG files. Alternately, set the expand_animations argument of this function to + * False, in which case the op will return 3-dimensional tensors and will truncate + * animated GIF files to the first frame. + * + * _NOTE_: If the first frame of an animated GIF does not occupy the entire + * canvas (maximum frame width x maximum frame height), then it fills the + * unoccupied areas (in the first frame) with zeros (black). For frames after the + * first frame that does not occupy the entire canvas, it uses the previous + * frame to fill the unoccupied areas. + * + * @param data type for `image` output + * @param contents 0-D. The encoded image bytes. + * @param options carries optional attribute values + * @return a new instance of DecodeImage, with default output types + * @see org.tensorflow.op.ImageOps.decodeImage + */ + public fun decodeImage(contents: Operand, options: Array): + DecodeImage = java.decodeImage( + contents, + options + ) + + /** + * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. + * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the + * appropriate operation to convert the input bytes string into a Tensor of type + * dtype. + * + * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays + * [height, width, num_channels]. Make sure to take this into account when + * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or + * PNG files. Alternately, set the expand_animations argument of this function to + * False, in which case the op will return 3-dimensional tensors and will truncate + * animated GIF files to the first frame. + * + * _NOTE_: If the first frame of an animated GIF does not occupy the entire + * canvas (maximum frame width x maximum frame height), then it fills the + * unoccupied areas (in the first frame) with zeros (black). For frames after the + * first frame that does not occupy the entire canvas, it uses the previous + * frame to fill the unoccupied areas. + * + * @param data type for `image` output + * @param contents 0-D. The encoded image bytes. + * @param dtype The desired DType of the returned Tensor. + * @param options carries optional attribute values + * @param data type for `DecodeImage` output and operands + * @return a new instance of DecodeImage + * @see org.tensorflow.op.ImageOps.decodeImage + * @param channels Sets the channels option. + * + * @param channels Number of color channels for the decoded image. + * @return this Options instance. + * @param expandAnimations Sets the expandAnimations option. + * + * @param expandAnimations Controls the output shape of the returned op. If True, the returned + * op will + * produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all + * GIFs, whether animated or not. If, False, the returned op will produce a 3-D + * tensor for all file types and will truncate animated GIFs to the first frame. + * @return this Options instance. + */ + public fun decodeImage( + contents: Operand, + dtype: Class, + channels: Long? = null, + expandAnimations: Boolean? = null + ): DecodeImage = java.decodeImage( + contents, + dtype, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeImage.channels(it) }, + expandAnimations?.let{ org.tensorflow.op.image.DecodeImage.expandAnimations(it) } + ).toTypedArray() + ) + + /** + * Decode a JPEG-encoded image to a uint8 tensor. + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
    + *
  • 0: Use the number of channels in the JPEG-encoded image.
  • + *
  • 1: output a grayscale image.
  • + *
  • 3: output an RGB image.
  • + *
+ * + * If needed, the JPEG-encoded image is transformed to match the requested number + * of color channels. + * + * The attr `ratio` allows downscaling the image by an integer factor during + * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + * downscaling the image later. + * + * This op also supports decoding PNGs and non-animated GIFs since the interface is + * the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param contents 0-D. The JPEG-encoded image. + * @param options carries optional attribute values + * @return a new instance of DecodeJpeg + * @see org.tensorflow.op.ImageOps.decodeJpeg + * @param channels Sets the channels option. + * + * @param channels Number of color channels for the decoded image. + * @return this Options instance. + * @param ratio Sets the ratio option. + * + * @param ratio Downscaling ratio. + * @return this Options instance. + * @param fancyUpscaling Sets the fancyUpscaling option. + * + * @param fancyUpscaling If true use a slower but nicer upscaling of the + * chroma planes (yuv420/422 only). + * @return this Options instance. + * @param tryRecoverTruncated Sets the tryRecoverTruncated option. + * + * @param tryRecoverTruncated If true try to recover an image from truncated input. + * @return this Options instance. + * @param acceptableFraction Sets the acceptableFraction option. + * + * @param acceptableFraction The minimum required fraction of lines before a truncated + * input is accepted. + * @return this Options instance. + * @param dctMethod Sets the dctMethod option. + * + * @param dctMethod string specifying a hint about the algorithm used for + * decompression. Defaults to "" which maps to a system-specific + * default. Currently valid values are ["INTEGER_FAST", + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * jpeg library changes to a version that does not have that specific + * option.) + * @return this Options instance. + */ + public fun decodeJpeg( + contents: Operand, + channels: Long? = null, + ratio: Long? = null, + fancyUpscaling: Boolean? = null, + tryRecoverTruncated: Boolean? = null, + acceptableFraction: Float? = null, + dctMethod: String? = null + ): DecodeJpeg = java.decodeJpeg( + contents, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeJpeg.channels(it) }, + ratio?.let{ org.tensorflow.op.image.DecodeJpeg.ratio(it) }, + fancyUpscaling?.let{ org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let{ org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, + dctMethod?.let{ org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } + ).toTypedArray() + ) + + /** + * Decode a PNG-encoded image to a uint8 or uint16 tensor. + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
    + *
  • 0: Use the number of channels in the PNG-encoded image.
  • + *
  • 1: output a grayscale image.
  • + *
  • 3: output an RGB image.
  • + *
  • 4: output an RGBA image.
  • + *
+ * + * If needed, the PNG-encoded image is transformed to match the requested number + * of color channels. + * + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param data type for `image` output + * @param contents 0-D. The PNG-encoded image. + * @param options carries optional attribute values + * @return a new instance of DecodePng, with default output types + * @see org.tensorflow.op.ImageOps.decodePng + */ + public fun decodePng(contents: Operand, options: Array): + DecodePng = java.decodePng( + contents, + options + ) + + /** + * Decode a PNG-encoded image to a uint8 or uint16 tensor. + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
    + *
  • 0: Use the number of channels in the PNG-encoded image.
  • + *
  • 1: output a grayscale image.
  • + *
  • 3: output an RGB image.
  • + *
  • 4: output an RGBA image.
  • + *
+ * + * If needed, the PNG-encoded image is transformed to match the requested number + * of color channels. + * + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param data type for `image` output + * @param contents 0-D. The PNG-encoded image. + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `DecodePng` output and operands + * @return a new instance of DecodePng + * @see org.tensorflow.op.ImageOps.decodePng + * @param channels Sets the channels option. + * + * @param channels Number of color channels for the decoded image. + * @return this Options instance. + */ + public fun decodePng( + contents: Operand, + dtype: Class, + channels: Long? = null + ): DecodePng = java.decodePng( + contents, + dtype, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } + ).toTypedArray() + ) + + /** + * Draw bounding boxes on a batch of images. + * Outputs a copy of `images` but draws on top of the pixels zero or more bounding + * boxes specified by the locations in `boxes`. The coordinates of the each + * bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * height of the underlying image. + * + * For example, if an image is 100 x 200 pixels (height x width) and the bounding + * box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of + * the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). + * + * Parts of the bounding box may fall outside the image. + * + * @param data type for `output` output + * @param images 4-D with shape `[batch, height, width, depth]`. A batch of images. + * @param boxes 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + * boxes. + * @param colors 2-D. A list of RGBA colors to cycle through for the boxes. + * @param data type for `DrawBoundingBoxesV2` output and operands + * @return a new instance of DrawBoundingBoxes + * @see org.tensorflow.op.ImageOps.drawBoundingBoxes + */ + public fun drawBoundingBoxes( + images: Operand, + boxes: Operand, + colors: Operand + ): DrawBoundingBoxes = java.drawBoundingBoxes( + images, + boxes, + colors + ) + + /** + * JPEG-encode an image. + * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. + * + * The attr `format` can be used to override the color format of the encoded + * output. Values can be: + *
    + *
  • `''`: Use a default format based on the number of channels in the image.
  • + *
  • `grayscale`: Output a grayscale JPEG image. The `channels` dimension + * of `image` must be 1.
  • + *
  • `rgb`: Output an RGB JPEG image. The `channels` dimension + * of `image` must be 3.
  • + *
+ * + * If `format` is not specified or is the empty string, a default format is picked + * in function of the number of channels in `image`: + *
    + *
  • 1: Output a grayscale image.
  • + *
  • 3: Output an RGB image.
  • + *
+ * + * @param image 3-D with shape `[height, width, channels]`. + * @param options carries optional attribute values + * @return a new instance of EncodeJpeg + * @see org.tensorflow.op.ImageOps.encodeJpeg + * @param format Sets the format option. + * + * @param format Per pixel image format. + * @return this Options instance. + * @param quality Sets the quality option. + * + * @param quality Quality of the compression from 0 to 100 (higher is better and slower). + * @return this Options instance. + * @param progressive Sets the progressive option. + * + * @param progressive If True, create a JPEG that loads progressively (coarse to fine). + * @return this Options instance. + * @param optimizeSize Sets the optimizeSize option. + * + * @param optimizeSize If True, spend CPU/RAM to reduce size with no quality change. + * @return this Options instance. + * @param chromaDownsampling Sets the chromaDownsampling option. + * + * @param chromaDownsampling See http://en.wikipedia.org/wiki/Chroma_subsampling. + * @return this Options instance. + * @param densityUnit Sets the densityUnit option. + * + * @param densityUnit Unit used to specify `x_density` and `y_density`: + * pixels per inch (`'in'`) or centimeter (`'cm'`). + * @return this Options instance. + * @param xDensity Sets the xDensity option. + * + * @param xDensity Horizontal pixels per density unit. + * @return this Options instance. + * @param yDensity Sets the yDensity option. + * + * @param yDensity Vertical pixels per density unit. + * @return this Options instance. + * @param xmpMetadata Sets the xmpMetadata option. + * + * @param xmpMetadata If not empty, embed this XMP metadata in the image header. + * @return this Options instance. + */ + public fun encodeJpeg( + image: Operand, + format: String? = null, + quality: Long? = null, + progressive: Boolean? = null, + optimizeSize: Boolean? = null, + chromaDownsampling: Boolean? = null, + densityUnit: String? = null, + xDensity: Long? = null, + yDensity: Long? = null, + xmpMetadata: String? = null + ): EncodeJpeg = java.encodeJpeg( + image, + *listOfNotNull( + format?.let{ org.tensorflow.op.image.EncodeJpeg.format(it) }, + quality?.let{ org.tensorflow.op.image.EncodeJpeg.quality(it) }, + progressive?.let{ org.tensorflow.op.image.EncodeJpeg.progressive(it) }, + optimizeSize?.let{ org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, + chromaDownsampling?.let{ org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, + densityUnit?.let{ org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, + xDensity?.let{ org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, + yDensity?.let{ org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, + xmpMetadata?.let{ org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } + ).toTypedArray() + ) + + /** + * JPEG encode input image with provided compression quality. + * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. + * `quality` is an int32 jpeg compression quality value between 0 and 100. + * + * @param images Images to adjust. At least 3-D. + * @param quality An int quality to encode to. + * @return a new instance of EncodeJpegVariableQuality + * @see org.tensorflow.op.ImageOps.encodeJpegVariableQuality + */ + public fun encodeJpegVariableQuality(images: Operand, quality: Operand): + EncodeJpegVariableQuality = java.encodeJpegVariableQuality( + images, + quality + ) + + /** + * PNG-encode an image. + * `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` + * where `channels` is: + *
    + *
  • 1: for grayscale.
  • + *
  • 2: for grayscale + alpha.
  • + *
  • 3: for RGB.
  • + *
  • 4: for RGBA.
  • + *
+ * + * The ZLIB compression level, `compression`, can be -1 for the PNG-encoder + * default or a value from 0 to 9. 9 is the highest compression level, generating + * the smallest output, but is slower. + * + * @param image 3-D with shape `[height, width, channels]`. + * @param options carries optional attribute values + * @return a new instance of EncodePng + * @see org.tensorflow.op.ImageOps.encodePng + * @param compression Sets the compression option. + * + * @param compression Compression level. + * @return this Options instance. + */ + public fun encodePng(image: Operand, compression: Long? = null): EncodePng = + java.encodePng( + image, + *listOfNotNull( + compression?.let{ org.tensorflow.op.image.EncodePng.compression(it) } + ).toTypedArray() + ) + + /** + * Extract `patches` from `images` and put them in the "depth" output dimension. + * + * @param data type for `patches` output + * @param images 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of `images`. + * @param strides How far the centers of two consecutive patches are in + * the images. Must be: `[1, stride_rows, stride_cols, 1]`. + * @param rates Must be: `[1, rate_rows, rate_cols, 1]`. This is the + * input stride, specifying how far two consecutive patch samples are in the + * input. Equivalent to extracting patches with + * `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by + * subsampling them spatially by a factor of `rates`. This is equivalent to + * `rate` in dilated (a.k.a. Atrous) convolutions. + * @param padding The type of padding algorithm to use. + * @param data type for `ExtractImagePatches` output and operands + * @return a new instance of ExtractImagePatches + * @see org.tensorflow.op.ImageOps.extractImagePatches + */ + public fun extractImagePatches( + images: Operand, + ksizes: List, + strides: List, + rates: List, + padding: String + ): ExtractImagePatches = java.extractImagePatches( + images, + ksizes, + strides, + rates, + padding + ) + + /** + * Extract the shape information of a JPEG-encoded image. + * This op only parses the image header, so it is much faster than DecodeJpeg. + * + * @param data type for `image_shape` output + * @param contents 0-D. The JPEG-encoded image. + * @return a new instance of ExtractJpegShape, with default output types + * @see org.tensorflow.op.ImageOps.extractJpegShape + */ + public fun extractJpegShape(contents: Operand): ExtractJpegShape = + java.extractJpegShape( + contents + ) + + /** + * Extract the shape information of a JPEG-encoded image. + * This op only parses the image header, so it is much faster than DecodeJpeg. + * + * @param data type for `image_shape` output + * @param contents 0-D. The JPEG-encoded image. + * @param outputType (Optional) The output type of the operation (int32 or int64). + * Defaults to int32. + * @param data type for `ExtractJpegShape` output and operands + * @return a new instance of ExtractJpegShape + * @see org.tensorflow.op.ImageOps.extractJpegShape + */ + public fun extractJpegShape(contents: Operand, outputType: Class): + ExtractJpegShape = java.extractJpegShape( + contents, + outputType + ) + + /** + * Convert one or more images from HSV to RGB. + * Outputs a tensor of the same shape as the `images` tensor, containing the RGB + * value of the pixels. The output is only well defined if the value in `images` + * are in `[0,1]`. + * + * See `rgb_to_hsv` for a description of the HSV encoding. + * + * @param data type for `output` output + * @param images 1-D or higher rank. HSV data to convert. Last dimension must be size 3. + * @param data type for `HSVToRGB` output and operands + * @return a new instance of HsvToRgb + * @see org.tensorflow.op.ImageOps.hsvToRgb + */ + public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( + images + ) + + /** + * Greedily selects a subset of bounding boxes in descending order of score, + * pruning away boxes that have high intersection-over-union (IOU) overlap + * with previously selected boxes. Bounding boxes with score less than + * `score_threshold` are removed. Bounding boxes are supplied as + * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + * diagonal pair of box corners and the coordinates can be provided as normalized + * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + * is agnostic to where the origin is in the coordinate system and more + * generally is invariant to orthogonal transformations and translations + * of the coordinate system; thus translating or reflections of the coordinate + * system result in the same boxes being selected by the algorithm. + * The output of this operation is a set of integers indexing into the input + * collection of bounding boxes representing the selected boxes. The bounding + * box coordinates corresponding to the selected indices can then be obtained + * using the `tf.gather operation`. For example: + * selected_indices = tf.image.non_max_suppression_v2( + * boxes, scores, max_output_size, iou_threshold, score_threshold) + * selected_boxes = tf.gather(boxes, selected_indices) + * This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f. + * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score + * of other overlapping boxes instead of directly causing them to be pruned. + * To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be + * larger than 0. + * + * @param data type for `selected_scores` output + * @param boxes A 2-D float tensor of shape `[num_boxes, 4]`. + * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single + * score corresponding to each box (each row of boxes). + * @param maxOutputSize A scalar integer tensor representing the maximum number of + * boxes to be selected by non max suppression. + * @param iouThreshold A 0-D float tensor representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. + * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to + * remove + * boxes based on score. + * @param softNmsSigma A 0-D float tensor representing the sigma parameter for Soft NMS; see + * Bodla et + * al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which + * is default), we fall back to standard (hard) NMS. + * @param options carries optional attribute values + * @param data type for `NonMaxSuppressionV5` output and operands + * @return a new instance of NonMaxSuppression + * @see org.tensorflow.op.ImageOps.nonMaxSuppression + * @param padToMaxOutputSize Sets the padToMaxOutputSize option. + * + * @param padToMaxOutputSize If true, the output `selected_indices` is padded to be of length + * `max_output_size`. Defaults to false. + * @return this Options instance. + */ + public fun nonMaxSuppression( + boxes: Operand, + scores: Operand, + maxOutputSize: Operand, + iouThreshold: Operand, + scoreThreshold: Operand, + softNmsSigma: Operand, + padToMaxOutputSize: Boolean? = null + ): NonMaxSuppression = java.nonMaxSuppression( + boxes, + scores, + maxOutputSize, + iouThreshold, + scoreThreshold, + softNmsSigma, + *listOfNotNull( + padToMaxOutputSize?.let{ org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } + ).toTypedArray() + ) + + /** + * Greedily selects a subset of bounding boxes in descending order of score, + * pruning away boxes that have high overlaps + * with previously selected boxes. Bounding boxes with score less than + * `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, + * which allows for defining a custom overlap criterium (eg. intersection over union, + * intersection over area, etc.). + * + * The output of this operation is a set of integers indexing into the input + * collection of bounding boxes representing the selected boxes. The bounding + * box coordinates corresponding to the selected indices can then be obtained + * using the `tf.gather operation`. For example: + * + * selected_indices = tf.image.non_max_suppression_with_overlaps( + * overlaps, scores, max_output_size, overlap_threshold, score_threshold) + * selected_boxes = tf.gather(boxes, selected_indices) + * + * @param overlaps A 2-D float tensor of shape `[num_boxes, num_boxes]` representing + * the n-by-n box overlap values. + * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single + * score corresponding to each box (each row of boxes). + * @param maxOutputSize A scalar integer tensor representing the maximum number of + * boxes to be selected by non max suppression. + * @param overlapThreshold A 0-D float tensor representing the threshold for deciding whether + * boxes overlap too. + * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to + * remove + * boxes based on score. + * @return a new instance of NonMaxSuppressionWithOverlaps + * @see org.tensorflow.op.ImageOps.nonMaxSuppressionWithOverlaps + */ + public fun nonMaxSuppressionWithOverlaps( + overlaps: Operand, + scores: Operand, + maxOutputSize: Operand, + overlapThreshold: Operand, + scoreThreshold: Operand + ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( + overlaps, + scores, + maxOutputSize, + overlapThreshold, + scoreThreshold + ) + + /** + * Resize quantized `images` to `size` using quantized bilinear interpolation. + * Input images and output images must be quantized types. + * + * @param data type for `resized_images` output + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param min The min value + * @param max The max value + * @param options carries optional attribute values + * @param data type for `QuantizedResizeBilinear` output and operands + * @return a new instance of QuantizedResizeBilinear + * @see org.tensorflow.op.ImageOps.quantizedResizeBilinear + * @param alignCorners Sets the alignCorners option. + * + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @return this Options instance. + * @param halfPixelCenters Sets the halfPixelCenters option. + * + * @param halfPixelCenters the halfPixelCenters option + * @return this Options instance. + */ + public fun quantizedResizeBilinear( + images: Operand, + sizeOutput: Operand, + min: Operand, + max: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): QuantizedResizeBilinear = java.quantizedResizeBilinear( + images, + sizeOutput, + min, + max, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } + ).toTypedArray() + ) + + /** + * Randomly crop `image`. + * `size` is a 1-D int64 tensor with 2 elements representing the crop height and + * width. The values must be non negative. + * + * This Op picks a random location in `image` and crops a `height` by `width` + * rectangle from that location. The random location is picked so the cropped + * area will fit inside the original image. + * + * @param data type for `output` output + * @param image 3-D of shape `[height, width, channels]`. + * @param sizeOutput 1-D of length 2 containing: `crop_height`, `crop_width`.. + * @param options carries optional attribute values + * @param data type for `RandomCrop` output and operands + * @return a new instance of RandomCrop + * @see org.tensorflow.op.ImageOps.randomCrop + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun randomCrop( + image: Operand, + sizeOutput: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomCrop = java.randomCrop( + image, + sizeOutput, + *listOfNotNull( + seed?.let{ org.tensorflow.op.image.RandomCrop.seed(it) }, + seed2?.let{ org.tensorflow.op.image.RandomCrop.seed2(it) } + ).toTypedArray() + ) + + /** + * Resize `images` to `size` using area interpolation. + * Input images can be of different types but output images are always float. + * + * The range of pixel values for the output image might be slightly different + * from the range for the input image because of limited numerical precision. + * To guarantee an output range, for example `[0.0, 1.0]`, apply + * `tf.clip_by_value` to the output. + * + * Each output pixel is computed by first transforming the pixel's footprint into + * the input tensor and then averaging the pixels that intersect the footprint. An + * input pixel's contribution to the average is weighted by the fraction of its + * area that intersects the footprint. This is the same as OpenCV's INTER_AREA. + * + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param options carries optional attribute values + * @return a new instance of ResizeArea + * @see org.tensorflow.op.ImageOps.resizeArea + * @param alignCorners Sets the alignCorners option. + * + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @return this Options instance. + */ + public fun resizeArea( + images: Operand, + sizeOutput: Operand, + alignCorners: Boolean? = null + ): ResizeArea = java.resizeArea( + images, + sizeOutput, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.ResizeArea.alignCorners(it) } + ).toTypedArray() + ) + + /** + * Resize `images` to `size` using bicubic interpolation. + * Input images can be of different types but output images are always float. + * + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param options carries optional attribute values + * @return a new instance of ResizeBicubic + * @see org.tensorflow.op.ImageOps.resizeBicubic + * @param alignCorners Sets the alignCorners option. + * + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @return this Options instance. + * @param halfPixelCenters Sets the halfPixelCenters option. + * + * @param halfPixelCenters the halfPixelCenters option + * @return this Options instance. + */ + public fun resizeBicubic( + images: Operand, + sizeOutput: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeBicubic = java.resizeBicubic( + images, + sizeOutput, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } + ).toTypedArray() + ) + + /** + * Resize `images` to `size` using bilinear interpolation. + * Input images can be of different types but output images are always float. + * + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param options carries optional attribute values + * @return a new instance of ResizeBilinear + * @see org.tensorflow.op.ImageOps.resizeBilinear + * @param alignCorners Sets the alignCorners option. + * + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @return this Options instance. + * @param halfPixelCenters Sets the halfPixelCenters option. + * + * @param halfPixelCenters the halfPixelCenters option + * @return this Options instance. + */ + public fun resizeBilinear( + images: Operand, + sizeOutput: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeBilinear = java.resizeBilinear( + images, + sizeOutput, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } + ).toTypedArray() + ) + + /** + * Resize `images` to `size` using nearest neighbor interpolation. + * + * @param data type for `resized_images` output + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param options carries optional attribute values + * @param data type for `ResizeNearestNeighbor` output and operands + * @return a new instance of ResizeNearestNeighbor + * @see org.tensorflow.op.ImageOps.resizeNearestNeighbor + * @param alignCorners Sets the alignCorners option. + * + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @return this Options instance. + * @param halfPixelCenters Sets the halfPixelCenters option. + * + * @param halfPixelCenters the halfPixelCenters option + * @return this Options instance. + */ + public fun resizeNearestNeighbor( + images: Operand, + sizeOutput: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeNearestNeighbor = java.resizeNearestNeighbor( + images, + sizeOutput, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } + ).toTypedArray() + ) + + /** + * Converts one or more images from RGB to HSV. + * Outputs a tensor of the same shape as the `images` tensor, containing the HSV + * value of the pixels. The output is only well defined if the value in `images` + * are in `[0,1]`. + * + * `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and + * `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 + * corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. + * + * Usage Example: + * ``` + * + * blue_image = tf.stack([ + * ... tf.zeros([5,5]), + * ... tf.zeros([5,5]), + * ... tf.ones([5,5])], + * ... axis=-1) + * blue_hsv_image = tf.image.rgb_to_hsv(blue_image) + * blue_hsv_image[0,0].numpy() + * array([0.6666667, 1. , 1. ], dtype=float32) + * ``` + * + * @param data type for `output` output + * @param images 1-D or higher rank. RGB data to convert. Last dimension must be size 3. + * @param data type for `RGBToHSV` output and operands + * @return a new instance of RgbToHsv + * @see org.tensorflow.op.ImageOps.rgbToHsv + */ + public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( + images + ) + + /** + * Generate a single randomly distorted bounding box for an image. + * Bounding box annotations are often supplied in addition to ground-truth labels + * in image recognition or object localization tasks. A common technique for + * training such a system is to randomly distort an image while preserving + * its content, i.e. _data augmentation_. This Op outputs a randomly distorted + * localization of an object, i.e. bounding box, given an `image_size`, + * `bounding_boxes` and a series of constraints. + * + * The output of this Op is a single bounding box that may be used to crop the + * original image. The output is returned as 3 tensors: `begin`, `size` and + * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the + * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + * what the bounding box looks like. + * + * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * height of the underlying image. + * + * For example, + * ``` + * # Generate a single distorted bounding box. + * begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + * tf.shape(image), + * bounding_boxes=bounding_boxes) + * + * # Draw the bounding box in an image summary. + * image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + * bbox_for_draw) + * tf.summary.image('images_with_box', image_with_box) + * + * # Employ the bounding box to distort the image. + * distorted_image = tf.slice(image, begin, size) + * + * ``` + * + * Note that if no bounding box information is available, setting + * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is + * false and no bounding boxes are supplied, an error is raised. + * + * @param data type for `begin` output + * @param imageSize 1-D, containing `[height, width, channels]`. + * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes + * associated with the image. + * @param minObjectCovered The cropped area of the image must contain at least this + * fraction of any bounding box supplied. The value of this parameter should be + * non-negative. In the case of 0, the cropped area does not need to overlap + * any of the bounding boxes supplied. + * @param options carries optional attribute values + * @param data type for `SampleDistortedBoundingBoxV2` output and operands + * @return a new instance of SampleDistortedBoundingBox + * @see org.tensorflow.op.ImageOps.sampleDistortedBoundingBox + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to non-zero, the random number + * generator is seeded by the given `seed`. Otherwise, it is seeded by a random + * seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + * @param aspectRatioRange Sets the aspectRatioRange option. + * + * @param aspectRatioRange The cropped area of the image must have an aspect ratio = + * width / height within this range. + * @return this Options instance. + * @param areaRange Sets the areaRange option. + * + * @param areaRange The cropped area of the image must contain a fraction of the + * supplied image within this range. + * @return this Options instance. + * @param maxAttempts Sets the maxAttempts option. + * + * @param maxAttempts Number of attempts at generating a cropped region of the image + * of the specified constraints. After `max_attempts` failures, return the entire + * image. + * @return this Options instance. + * @param useImageIfNoBoundingBoxes Sets the useImageIfNoBoundingBoxes option. + * + * @param useImageIfNoBoundingBoxes Controls behavior if no bounding boxes supplied. + * If true, assume an implicit bounding box covering the whole input. If false, + * raise an error. + * @return this Options instance. + */ + public fun sampleDistortedBoundingBox( + imageSize: Operand, + boundingBoxes: Operand, + minObjectCovered: Operand, + seed: Long? = null, + seed2: Long? = null, + aspectRatioRange: List? = null, + areaRange: List? = null, + maxAttempts: Long? = null, + useImageIfNoBoundingBoxes: Boolean? = null + ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( + imageSize, + boundingBoxes, + minObjectCovered, + *listOfNotNull( + seed?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, + seed2?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, + aspectRatioRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) + }, + areaRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, + useImageIfNoBoundingBoxes?.let{ + org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) } + ).toTypedArray() + ) + + /** + * The ScaleAndTranslate operation + * + * @param images The images value + * @param sizeOutput The sizeOutput value + * @param scale The scale value + * @param translation The translation value + * @param options carries optional attribute values + * @return a new instance of ScaleAndTranslate + * @see org.tensorflow.op.ImageOps.scaleAndTranslate + * @param kernelType Sets the kernelType option. + * + * @param kernelType the kernelType option + * @return this Options instance. + * @param antialias Sets the antialias option. + * + * @param antialias the antialias option + * @return this Options instance. + */ + public fun scaleAndTranslate( + images: Operand, + sizeOutput: Operand, + scale: Operand, + translation: Operand, + kernelType: String? = null, + antialias: Boolean? = null + ): ScaleAndTranslate = java.scaleAndTranslate( + images, + sizeOutput, + scale, + translation, + *listOfNotNull( + kernelType?.let{ org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, + antialias?.let{ org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } + ).toTypedArray() + ) + + /** + * Generate a randomly distorted bounding box for an image deterministically. + * Bounding box annotations are often supplied in addition to ground-truth labels + * in image recognition or object localization tasks. A common technique for + * training such a system is to randomly distort an image while preserving its + * content, i.e. _data augmentation_. This Op, given the same `seed`, + * deterministically outputs a randomly distorted localization of an object, i.e. + * bounding box, given an `image_size`, `bounding_boxes` and a series of + * constraints. + * + * The output of this Op is a single bounding box that may be used to crop the + * original image. The output is returned as 3 tensors: `begin`, `size` and + * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the + * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + * what the bounding box looks like. + * + * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * the height of the underlying image. + * + * The output of this Op is guaranteed to be the same given the same `seed` and is + * independent of how many times the function is called, and independent of global + * seed settings (e.g. `tf.random.set_seed`). + * + * Example usage: + * ``` + * + * image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]) + * bbox = tf.constant( + * ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + * seed = (1, 2) + * **Generate a single distorted bounding box.** + * + * + * bbox_begin, bbox_size, bbox_draw = ( + * ... tf.image.stateless_sample_distorted_bounding_box( + * ... tf.shape(image), bounding_boxes=bbox, seed=seed)) + * **Employ the bounding box to distort the image.** + * + * + * tf.slice(image, bbox_begin, bbox_size) + * + * **Draw the bounding box in an image summary.** + * + * + * colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) + * tf.image.draw_bounding_boxes( + * ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors) + * + * ``` + * + * Note that if no bounding box information is available, setting + * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is + * false and no bounding boxes are supplied, an error is raised. + * + * @param data type for `begin` output + * @param imageSize 1-D, containing `[height, width, channels]`. + * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes + * associated with the image. + * @param minObjectCovered The cropped area of the image must contain at least this + * fraction of any bounding box supplied. The value of this parameter should be + * non-negative. In the case of 0, the cropped area does not need to overlap + * any of the bounding boxes supplied. + * @param seed 1-D with shape `[2]`. The seed to the random number generator. Must have dtype + * `int32` or `int64`. (When using XLA, only `int32` is allowed.) + * @param options carries optional attribute values + * @param data type for `StatelessSampleDistortedBoundingBox` output and operands + * @return a new instance of StatelessSampleDistortedBoundingBox + * @see org.tensorflow.op.ImageOps.statelessSampleDistortedBoundingBox + * @param aspectRatioRange Sets the aspectRatioRange option. + * + * @param aspectRatioRange The cropped area of the image must have an aspect ratio = + * width / height within this range. + * @return this Options instance. + * @param areaRange Sets the areaRange option. + * + * @param areaRange The cropped area of the image must contain a fraction of the + * supplied image within this range. + * @return this Options instance. + * @param maxAttempts Sets the maxAttempts option. + * + * @param maxAttempts Number of attempts at generating a cropped region of the image + * of the specified constraints. After `max_attempts` failures, return the entire + * image. + * @return this Options instance. + * @param useImageIfNoBoundingBoxes Sets the useImageIfNoBoundingBoxes option. + * + * @param useImageIfNoBoundingBoxes Controls behavior if no bounding boxes supplied. + * If true, assume an implicit bounding box covering the whole input. If false, + * raise an error. + * @return this Options instance. + */ + public fun statelessSampleDistortedBoundingBox( + imageSize: Operand, + boundingBoxes: Operand, + minObjectCovered: Operand, + seed: Operand, + aspectRatioRange: List? = null, + areaRange: List? = null, + maxAttempts: Long? = null, + useImageIfNoBoundingBoxes: Boolean? = null + ): StatelessSampleDistortedBoundingBox = java.statelessSampleDistortedBoundingBox( + imageSize, + boundingBoxes, + minObjectCovered, + seed, + *listOfNotNull( + aspectRatioRange?.let{ + org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.aspectRatioRange(it) }, + areaRange?.let{ org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let{ org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.maxAttempts(it) + }, + useImageIfNoBoundingBoxes?.let{ + org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) + } + ).toTypedArray() + ) + + /** + * Computes the gradient of the crop_and_resize op wrt the input image tensor. + * + * @param data type for `output` output + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which + * case the sampled crop is an up-down flipped version of the original image. The width dimension + * is treated similarly. Normalized coordinates outside the `[0, 1]`range are allowed, in + * which case we use`extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` + * containing the original image size. Both `image_height` and `image_width` need + * to be positive. + * @param T The value of the T attribute + * @param options carries optional attribute values + * @param data type for `CropAndResizeGradImage` output and operands + * @return a new instance of CropAndResizeGradImage + * @see org.tensorflow.op.ImageOps.cropAndResizeGradImage + * @param method Sets the method option. + * + * @param method A string specifying the interpolation method. Only 'bilinear' is + * supported for now. + * @return this Options instance. + */ + @JvmName("cropAndResizeGradImageReified") + public inline fun cropAndResizeGradImage( + grads: Operand, + boxes: Operand, + boxInd: Operand, + imageSize: Operand, + method: String? = null + ): CropAndResizeGradImage = cropAndResizeGradImage(grads, boxes, boxInd, imageSize, + T::class.java, method) + + /** + * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. + * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the + * appropriate operation to convert the input bytes string into a Tensor of type + * dtype. + * + * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays + * [height, width, num_channels]. Make sure to take this into account when + * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or + * PNG files. Alternately, set the expand_animations argument of this function to + * False, in which case the op will return 3-dimensional tensors and will truncate + * animated GIF files to the first frame. + * + * _NOTE_: If the first frame of an animated GIF does not occupy the entire + * canvas (maximum frame width x maximum frame height), then it fills the + * unoccupied areas (in the first frame) with zeros (black). For frames after the + * first frame that does not occupy the entire canvas, it uses the previous + * frame to fill the unoccupied areas. + * + * @param data type for `image` output + * @param contents 0-D. The encoded image bytes. + * @param dtype The desired DType of the returned Tensor. + * @param options carries optional attribute values + * @param data type for `DecodeImage` output and operands + * @return a new instance of DecodeImage + * @see org.tensorflow.op.ImageOps.decodeImage + * @param channels Sets the channels option. + * + * @param channels Number of color channels for the decoded image. + * @return this Options instance. + * @param expandAnimations Sets the expandAnimations option. + * + * @param expandAnimations Controls the output shape of the returned op. If True, the returned + * op will + * produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all + * GIFs, whether animated or not. If, False, the returned op will produce a 3-D + * tensor for all file types and will truncate animated GIFs to the first frame. + * @return this Options instance. + */ + @JvmName("decodeImageReified") + public inline fun decodeImage( + contents: Operand, + channels: Long? = null, + expandAnimations: Boolean? = null + ): DecodeImage = decodeImage(contents, T::class.java, channels, expandAnimations) + + /** + * Decode a PNG-encoded image to a uint8 or uint16 tensor. + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
    + *
  • 0: Use the number of channels in the PNG-encoded image.
  • + *
  • 1: output a grayscale image.
  • + *
  • 3: output an RGB image.
  • + *
  • 4: output an RGBA image.
  • + *
+ * + * If needed, the PNG-encoded image is transformed to match the requested number + * of color channels. + * + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param data type for `image` output + * @param contents 0-D. The PNG-encoded image. + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `DecodePng` output and operands + * @return a new instance of DecodePng + * @see org.tensorflow.op.ImageOps.decodePng + * @param channels Sets the channels option. + * + * @param channels Number of color channels for the decoded image. + * @return this Options instance. + */ + @JvmName("decodePngReified") + public inline fun decodePng(contents: Operand, channels: Long? = + null): DecodePng = decodePng(contents, T::class.java, channels) + + /** + * Extract the shape information of a JPEG-encoded image. + * This op only parses the image header, so it is much faster than DecodeJpeg. + * + * @param data type for `image_shape` output + * @param contents 0-D. The JPEG-encoded image. + * @param outputType (Optional) The output type of the operation (int32 or int64). + * Defaults to int32. + * @param data type for `ExtractJpegShape` output and operands + * @return a new instance of ExtractJpegShape + * @see org.tensorflow.op.ImageOps.extractJpegShape + */ + @JvmName("extractJpegShapeReified") + public inline fun extractJpegShapeTyped(contents: Operand): + ExtractJpegShape = extractJpegShape(contents, T::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt new file mode 100644 index 00000000000..83b8aae959d --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -0,0 +1,1904 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.io.DecodeBase64 +import org.tensorflow.op.io.DecodeCompressed +import org.tensorflow.op.io.DecodeCsv +import org.tensorflow.op.io.DecodeJsonExample +import org.tensorflow.op.io.DecodePaddedRaw +import org.tensorflow.op.io.DecodeRaw +import org.tensorflow.op.io.DeserializeManySparse +import org.tensorflow.op.io.EncodeBase64 +import org.tensorflow.op.io.FifoQueue +import org.tensorflow.op.io.FixedLengthRecordReader +import org.tensorflow.op.io.IdentityReader +import org.tensorflow.op.io.LmdbReader +import org.tensorflow.op.io.MatchingFiles +import org.tensorflow.op.io.PaddingFifoQueue +import org.tensorflow.op.io.ParseExample +import org.tensorflow.op.io.ParseSequenceExample +import org.tensorflow.op.io.ParseSingleExample +import org.tensorflow.op.io.ParseSingleSequenceExample +import org.tensorflow.op.io.ParseTensor +import org.tensorflow.op.io.PriorityQueue +import org.tensorflow.op.io.QueueClose +import org.tensorflow.op.io.QueueDequeue +import org.tensorflow.op.io.QueueDequeueMany +import org.tensorflow.op.io.QueueDequeueUpTo +import org.tensorflow.op.io.QueueEnqueue +import org.tensorflow.op.io.QueueEnqueueMany +import org.tensorflow.op.io.QueueIsClosed +import org.tensorflow.op.io.QueueSize +import org.tensorflow.op.io.RandomShuffleQueue +import org.tensorflow.op.io.ReadFile +import org.tensorflow.op.io.ReaderNumRecordsProduced +import org.tensorflow.op.io.ReaderNumWorkUnitsCompleted +import org.tensorflow.op.io.ReaderRead +import org.tensorflow.op.io.ReaderReadUpTo +import org.tensorflow.op.io.ReaderReset +import org.tensorflow.op.io.ReaderRestoreState +import org.tensorflow.op.io.ReaderSerializeState +import org.tensorflow.op.io.SerializeManySparse +import org.tensorflow.op.io.SerializeSparse +import org.tensorflow.op.io.SerializeTensor +import org.tensorflow.op.io.ShardedFilename +import org.tensorflow.op.io.ShardedFilespec +import org.tensorflow.op.io.TextLineReader +import org.tensorflow.op.io.TfRecordReader +import org.tensorflow.op.io.WholeFileReader +import org.tensorflow.op.io.WriteFile +import org.tensorflow.types.TBool +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `io` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class IoOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.IoOps = ops.java.io + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Decode web-safe base64-encoded strings. + * Input may or may not have padding at the end. See EncodeBase64 for padding. + * Web-safe means that input must use - and _ instead of + and /. + * + * @param input Base64 strings to decode. + * @return a new instance of DecodeBase64 + * @see org.tensorflow.op.IoOps.decodeBase64 + */ + public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( + input + ) + + /** + * Decompress strings. + * This op decompresses each element of the `bytes` input `Tensor`, which + * is assumed to be compressed using the given `compression_type`. + * + * The `output` is a string `Tensor` of the same shape as `bytes`, + * each element containing the decompressed data from the corresponding + * element in `bytes`. + * + * @param bytes A Tensor of string which is compressed. + * @param options carries optional attribute values + * @return a new instance of DecodeCompressed + * @see org.tensorflow.op.IoOps.decodeCompressed + * @param compressionType Sets the compressionType option. + * + * @param compressionType A scalar containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @return this Options instance. + */ + public fun decodeCompressed(bytes: Operand, compressionType: String? = null): + DecodeCompressed = java.decodeCompressed( + bytes, + *listOfNotNull( + compressionType?.let{ org.tensorflow.op.io.DecodeCompressed.compressionType(it) } + ).toTypedArray() + ) + + /** + * Convert CSV records to tensors. Each column maps to one tensor. + * RFC 4180 format is expected for the CSV records. + * (https://tools.ietf.org/html/rfc4180) + * Note that we allow leading and trailing spaces with int or float field. + * + * @param records Each string is a record/row in the csv and all records should have + * the same format. + * @param recordDefaults One tensor per column of the input record, with either a + * scalar default value for that column or an empty vector if the column is + * required. + * @param options carries optional attribute values + * @return a new instance of DecodeCsv + * @see org.tensorflow.op.IoOps.decodeCsv + * @param fieldDelim Sets the fieldDelim option. + * + * @param fieldDelim char delimiter to separate fields in a record. + * @return this Options instance. + * @param useQuoteDelim Sets the useQuoteDelim option. + * + * @param useQuoteDelim If false, treats double quotation marks as regular + * characters inside of the string fields (ignoring RFC 4180, Section 2, + * Bullet 5). + * @return this Options instance. + * @param naValue Sets the naValue option. + * + * @param naValue Additional string to recognize as NA/NaN. + * @return this Options instance. + * @param selectCols Sets the selectCols option. + * + * @param selectCols the selectCols option + * @return this Options instance. + */ + public fun decodeCsv( + records: Operand, + recordDefaults: Iterable>, + fieldDelim: String? = null, + useQuoteDelim: Boolean? = null, + naValue: String? = null, + selectCols: List? = null + ): DecodeCsv = java.decodeCsv( + records, + recordDefaults, + *listOfNotNull( + fieldDelim?.let{ org.tensorflow.op.io.DecodeCsv.fieldDelim(it) }, + useQuoteDelim?.let{ org.tensorflow.op.io.DecodeCsv.useQuoteDelim(it) }, + naValue?.let{ org.tensorflow.op.io.DecodeCsv.naValue(it) }, + selectCols?.let{ org.tensorflow.op.io.DecodeCsv.selectCols(it) } + ).toTypedArray() + ) + + /** + * Convert JSON-encoded Example records to binary protocol buffer strings. + * Note: This is **not** a general purpose JSON parsing op. + * + * This op converts JSON-serialized + * `tf.train.Example` (created with `json_format.MessageToJson`, following the[standard JSON + * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json) ) + * to a binary-serialized `tf.train.Example` (equivalent to + * `Example.SerializeToString()`) suitable for conversion to tensors with + * `tf.io.parse_example`. + * + * @param jsonExamples Each string is a JSON object serialized according to the JSON + * mapping of the Example proto. + * @return a new instance of DecodeJsonExample + * @see org.tensorflow.op.IoOps.decodeJsonExample + */ + public fun decodeJsonExample(jsonExamples: Operand): DecodeJsonExample = + java.decodeJsonExample( + jsonExamples + ) + + /** + * Reinterpret the bytes of a string as a vector of numbers. + * + * @param data type for `output` output + * @param inputBytes Tensor of string to be decoded. + * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple + * of the size of the output type. + * @param outType The value of the outType attribute + * @param options carries optional attribute values + * @param data type for `DecodePaddedRaw` output and operands + * @return a new instance of DecodePaddedRaw + * @see org.tensorflow.op.IoOps.decodePaddedRaw + * @param littleEndian Sets the littleEndian option. + * + * @param littleEndian Whether the input `input_bytes` is in little-endian order. Ignored for + * `out_type` values that are stored in a single byte, like `uint8` + * @return this Options instance. + */ + public fun decodePaddedRaw( + inputBytes: Operand, + fixedLength: Operand, + outType: Class, + littleEndian: Boolean? = null + ): DecodePaddedRaw = java.decodePaddedRaw( + inputBytes, + fixedLength, + outType, + *listOfNotNull( + littleEndian?.let{ org.tensorflow.op.io.DecodePaddedRaw.littleEndian(it) } + ).toTypedArray() + ) + + /** + * Reinterpret the bytes of a string as a vector of numbers. + * + * @param data type for `output` output + * @param bytes All the elements must have the same length. + * @param outType The value of the outType attribute + * @param options carries optional attribute values + * @param data type for `DecodeRaw` output and operands + * @return a new instance of DecodeRaw + * @see org.tensorflow.op.IoOps.decodeRaw + * @param littleEndian Sets the littleEndian option. + * + * @param littleEndian Whether the input `bytes` are in little-endian order. + * Ignored for `out_type` values that are stored in a single byte like + * `uint8`. + * @return this Options instance. + */ + public fun decodeRaw( + bytes: Operand, + outType: Class, + littleEndian: Boolean? = null + ): DecodeRaw = java.decodeRaw( + bytes, + outType, + *listOfNotNull( + littleEndian?.let{ org.tensorflow.op.io.DecodeRaw.littleEndian(it) } + ).toTypedArray() + ) + + /** + * Deserialize and concatenate `SparseTensors` from a serialized minibatch. + * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where + * `N` is the minibatch size and the rows correspond to packed outputs of + * `SerializeSparse`. The ranks of the original `SparseTensor` objects + * must all match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects + * (they have been concatenated along a new row dimension). + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final deserialized `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. + * Must have 3 columns. + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param data type for `DeserializeManySparse` output and operands + * @return a new instance of DeserializeManySparse + * @see org.tensorflow.op.IoOps.deserializeManySparse + */ + public fun deserializeManySparse(serializedSparse: Operand, + dtype: Class): DeserializeManySparse = java.deserializeManySparse( + serializedSparse, + dtype + ) + + /** + * Encode strings into web-safe base64 format. + * Refer to the following article for more information on base64 format: + * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the + * end so that the encoded has length multiple of 4. See Padding section of the + * link above. + * + * Web-safe means that the encoder uses - and _ instead of + and /. + * + * @param input Strings to be encoded. + * @param options carries optional attribute values + * @return a new instance of EncodeBase64 + * @see org.tensorflow.op.IoOps.encodeBase64 + * @param pad Sets the pad option. + * + * @param pad Bool whether padding is applied at the ends. + * @return this Options instance. + */ + public fun encodeBase64(input: Operand, pad: Boolean? = null): EncodeBase64 = + java.encodeBase64( + input, + *listOfNotNull( + pad?.let{ org.tensorflow.op.io.EncodeBase64.pad(it) } + ).toTypedArray() + ) + + /** + * A queue that produces elements in first-in first-out order. + * + * @param componentTypes The type of each component in a value. + * @param options carries optional attribute values + * @return a new instance of FifoQueue + * @see org.tensorflow.op.IoOps.fifoQueue + * @param shapes Sets the shapes option. + * + * @param shapes The shape of each component in a value. The length of this attr must + * be either 0 or the same as the length of component_types. If the length of + * this attr is 0, the shapes of queue elements are not constrained, and + * only one element may be dequeued at a time. + * @return this Options instance. + * @param capacity Sets the capacity option. + * + * @param capacity The upper bound on the number of elements in this queue. + * Negative numbers mean no limit. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this queue is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this queue will be shared under the given name + * across multiple sessions. + * @return this Options instance. + */ + public fun fifoQueue( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): FifoQueue = java.fifoQueue( + componentTypes, + *listOfNotNull( + shapes?.let{ org.tensorflow.op.io.FifoQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.FifoQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.FifoQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.FifoQueue.sharedName(it) } + ).toTypedArray() + ) + + /** + * A Reader that outputs fixed-length records from a file. + * + * @param recordBytes Number of bytes in the record. + * @param options carries optional attribute values + * @return a new instance of FixedLengthRecordReader + * @see org.tensorflow.op.IoOps.fixedLengthRecordReader + * @param headerBytes Sets the headerBytes option. + * + * @param headerBytes Number of bytes in the header, defaults to 0. + * @return this Options instance. + * @param footerBytes Sets the footerBytes option. + * + * @param footerBytes Number of bytes in the footer, defaults to 0. + * @return this Options instance. + * @param hopBytes Sets the hopBytes option. + * + * @param hopBytes Number of bytes to hop before each read. Default of 0 means using + * record_bytes. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + * @param encoding Sets the encoding option. + * + * @param encoding The type of encoding for the file. Currently ZLIB and GZIP + * are supported. Defaults to none. + * @return this Options instance. + */ + public fun fixedLengthRecordReader( + recordBytes: Long, + headerBytes: Long? = null, + footerBytes: Long? = null, + hopBytes: Long? = null, + container: String? = null, + sharedName: String? = null, + encoding: String? = null + ): FixedLengthRecordReader = java.fixedLengthRecordReader( + recordBytes, + *listOfNotNull( + headerBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.headerBytes(it) }, + footerBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.footerBytes(it) }, + hopBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.hopBytes(it) }, + container?.let{ org.tensorflow.op.io.FixedLengthRecordReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.FixedLengthRecordReader.sharedName(it) }, + encoding?.let{ org.tensorflow.op.io.FixedLengthRecordReader.encoding(it) } + ).toTypedArray() + ) + + /** + * A Reader that outputs the queued work as both the key and value. + * To use, enqueue strings in a Queue. ReaderRead will take the front + * work string and output (work, work). + * + * @param options carries optional attribute values + * @return a new instance of IdentityReader + * @see org.tensorflow.op.IoOps.identityReader + * + * @param container Sets the container option. + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + */ + public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader + = java.identityReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.IdentityReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.IdentityReader.sharedName(it) } + ).toTypedArray() + ) + + /** + * A Reader that outputs the records from a LMDB file. + * + * @param options carries optional attribute values + * @return a new instance of LmdbReader + * @see org.tensorflow.op.IoOps.lmdbReader + * + * @param container Sets the container option. + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + */ + public fun lmdbReader(container: String? = null, sharedName: String? = null): LmdbReader = + java.lmdbReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.LmdbReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.LmdbReader.sharedName(it) } + ).toTypedArray() + ) + + /** + * Returns the set of files matching one or more glob patterns. + * Note that this routine only supports wildcard characters in the + * basename portion of the pattern, not in the directory portion. + * Note also that the order of filenames returned is deterministic. + * + * @param pattern Shell wildcard pattern(s). Scalar or vector of type string. + * @return a new instance of MatchingFiles + * @see org.tensorflow.op.IoOps.matchingFiles + */ + public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( + pattern + ) + + /** + * A queue that produces elements in first-in first-out order. + * Variable-size shapes are allowed by setting the corresponding shape dimensions + * to 0 in the shape attr. In this case DequeueMany will pad up to the maximum + * size of any given element in the minibatch. See below for details. + * + * @param componentTypes The type of each component in a value. + * @param options carries optional attribute values + * @return a new instance of PaddingFifoQueue + * @see org.tensorflow.op.IoOps.paddingFifoQueue + * @param shapes Sets the shapes option. + * + * @param shapes The shape of each component in a value. The length of this attr must + * be either 0 or the same as the length of component_types. + * Shapes of fixed rank but variable size are allowed by setting + * any shape dimension to -1. In this case, the inputs' shape may vary along + * the given dimension, and DequeueMany will pad the given dimension with + * zeros up to the maximum shape of all elements in the given batch. + * If the length of this attr is 0, different queue elements may have + * different ranks and shapes, but only one element may be dequeued at a time. + * @return this Options instance. + * @param capacity Sets the capacity option. + * + * @param capacity The upper bound on the number of elements in this queue. + * Negative numbers mean no limit. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this queue is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this queue will be shared under the given name + * across multiple sessions. + * @return this Options instance. + */ + public fun paddingFifoQueue( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): PaddingFifoQueue = java.paddingFifoQueue( + componentTypes, + *listOfNotNull( + shapes?.let{ org.tensorflow.op.io.PaddingFifoQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.PaddingFifoQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.PaddingFifoQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.PaddingFifoQueue.sharedName(it) } + ).toTypedArray() + ) + + /** + * Transforms a vector of tf.Example protos (as strings) into typed tensors. + * + * @param serialized A scalar or vector containing binary serialized Example protos. + * @param names A tensor containing the names of the serialized protos. + * Corresponds 1:1 with the `serialized` tensor. + * May contain, for example, table key (descriptive) names for the + * corresponding serialized protos. These are purely useful for debugging + * purposes, and the presence of values here has no effect on the output. + * May also be an empty vector if no names are available. + * If non-empty, this tensor must have the same shape as "serialized". + * @param sparseKeys Vector of strings. + * The keys expected in the Examples' features associated with sparse values. + * @param denseKeys Vector of strings. + * The keys expected in the Examples' features associated with dense values. + * @param raggedKeys Vector of strings. + * The keys expected in the Examples' features associated with ragged values. + * @param denseDefaults A list of Tensors (some may be empty). Corresponds 1:1 with + * `dense_keys`. + * dense_defaults[j] provides default values + * when the example's feature_map lacks dense_key[j]. If an empty Tensor is + * provided for dense_defaults[j], then the Feature dense_keys[j] is required. + * The input type is inferred from dense_defaults[j], even when it's empty. + * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + * then the shape of dense_defaults[j] must match that of dense_shapes[j]. + * If dense_shapes[j] has an undefined major dimension (variable strides dense + * feature), dense_defaults[j] must contain a single element: + * the padding element. + * @param numSparse The number of sparse keys. + * @param sparseTypes A list of `num_sparse` types; the data types of data in each Feature + * given in sparse_keys. + * Currently the ParseExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param raggedValueTypes A list of `num_ragged` types; the data types of data in each Feature + * given in ragged_keys (where `num_ragged = sparse_keys.size()`). + * Currently the ParseExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param raggedSplitTypes A list of `num_ragged` types; the data types of row_splits in each + * Feature + * given in ragged_keys (where `num_ragged = sparse_keys.size()`). + * May be DT_INT32 or DT_INT64. + * @param denseShapes A list of `num_dense` shapes; the shapes of data in each Feature + * given in dense_keys (where `num_dense = dense_keys.size()`). + * The number of elements in the Feature corresponding to dense_key[j] + * must always equal dense_shapes[j].NumEntries(). + * If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output + * Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): + * The dense outputs are just the inputs row-stacked by batch. + * This works for dense_shapes[j] = (-1, D1, ..., DN). In this case + * the shape of the output Tensor dense_values[j] will be + * (|serialized|, M, D1, .., DN), where M is the maximum number of blocks + * of elements of length D1 * .... * DN, across all minibatch entries + * in the input. Any minibatch entry with less than M blocks of elements of + * length D1 * ... * DN will be padded with the corresponding default_value + * scalar element along the second dimension. + * @return a new instance of ParseExample + * @see org.tensorflow.op.IoOps.parseExample + */ + public fun parseExample( + serialized: Operand, + names: Operand, + sparseKeys: Operand, + denseKeys: Operand, + raggedKeys: Operand, + denseDefaults: Iterable>, + numSparse: Long, + sparseTypes: List>, + raggedValueTypes: List>, + raggedSplitTypes: List>, + denseShapes: List + ): ParseExample = java.parseExample( + serialized, + names, + sparseKeys, + denseKeys, + raggedKeys, + denseDefaults, + numSparse, + sparseTypes, + raggedValueTypes, + raggedSplitTypes, + denseShapes + ) + + /** + * Transforms a vector of tf.io.SequenceExample protos (as strings) into + * typed tensors. + * + * @param serialized A scalar or vector containing binary serialized SequenceExample protos. + * @param debugName A scalar or vector containing the names of the serialized protos. + * May contain, for example, table key (descriptive) name for the + * corresponding serialized proto. This is purely useful for debugging + * purposes, and the presence of values here has no effect on the output. + * May also be an empty vector if no name is available. + * @param contextSparseKeys The keys expected in the Examples' features associated with + * context_sparse + * values. + * @param contextDenseKeys The keys expected in the SequenceExamples' context features + * associated with + * dense values. + * @param contextRaggedKeys The keys expected in the Examples' features associated with + * context_ragged + * values. + * @param featureListSparseKeys The keys expected in the FeatureLists associated with sparse + * values. + * @param featureListDenseKeys The keys expected in the SequenceExamples' feature_lists + * associated + * with lists of dense values. + * @param featureListRaggedKeys The keys expected in the FeatureLists associated with ragged + * values. + * @param featureListDenseMissingAssumedEmpty A vector corresponding 1:1 with + * feature_list_dense_keys, indicating which + * features may be missing from the SequenceExamples. If the associated + * FeatureList is missing, it is treated as empty. + * @param contextDenseDefaults A list of Ncontext_dense Tensors (some may be empty). + * context_dense_defaults[j] provides default values + * when the SequenceExample's context map lacks context_dense_key[j]. + * If an empty Tensor is provided for context_dense_defaults[j], + * then the Feature context_dense_keys[j] is required. + * The input type is inferred from context_dense_defaults[j], even when it's + * empty. If context_dense_defaults[j] is not empty, its shape must match + * context_dense_shapes[j]. + * @param contextSparseTypes A list of Ncontext_sparse types; the data types of data in + * each context Feature given in context_sparse_keys. + * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param contextRaggedValueTypes RaggedTensor.value dtypes for the ragged context features. + * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context features. + * @param featureListDenseTypes The value of the featureListDenseTypes attribute + * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types + * of data in each FeatureList given in feature_list_sparse_keys. + * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param featureListRaggedValueTypes RaggedTensor.value dtypes for the ragged FeatureList + * features. + * @param featureListRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged FeatureList + * features. + * @param options carries optional attribute values + * @return a new instance of ParseSequenceExample + * @see org.tensorflow.op.IoOps.parseSequenceExample + * @param NcontextSparse Sets the NcontextSparse option. + * + * @param NcontextSparse the NcontextSparse option + * @return this Options instance. + * @param contextDenseShapes Sets the contextDenseShapes option. + * + * @param contextDenseShapes A list of Ncontext_dense shapes; the shapes of data in + * each context Feature given in context_dense_keys. + * The number of elements in the Feature corresponding to context_dense_key[j] + * must always equal context_dense_shapes[j].NumEntries(). + * The shape of context_dense_values[j] will match context_dense_shapes[j]. + * @return this Options instance. + * @param NfeatureListSparse Sets the NfeatureListSparse option. + * + * @param NfeatureListSparse the NfeatureListSparse option + * @return this Options instance. + * @param NfeatureListDense Sets the NfeatureListDense option. + * + * @param NfeatureListDense the NfeatureListDense option + * @return this Options instance. + * @param featureListDenseShapes Sets the featureListDenseShapes option. + * + * @param featureListDenseShapes A list of Nfeature_list_dense shapes; the shapes of + * data in each FeatureList given in feature_list_dense_keys. + * The shape of each Feature in the FeatureList corresponding to + * feature_list_dense_key[j] must always equal + * feature_list_dense_shapes[j].NumEntries(). + * @return this Options instance. + */ + public fun parseSequenceExample( + serialized: Operand, + debugName: Operand, + contextSparseKeys: Operand, + contextDenseKeys: Operand, + contextRaggedKeys: Operand, + featureListSparseKeys: Operand, + featureListDenseKeys: Operand, + featureListRaggedKeys: Operand, + featureListDenseMissingAssumedEmpty: Operand, + contextDenseDefaults: Iterable>, + contextSparseTypes: List>, + contextRaggedValueTypes: List>, + contextRaggedSplitTypes: List>, + featureListDenseTypes: List>, + featureListSparseTypes: List>, + featureListRaggedValueTypes: List>, + featureListRaggedSplitTypes: List>, + NcontextSparse: Long? = null, + contextDenseShapes: List? = null, + NfeatureListSparse: Long? = null, + NfeatureListDense: Long? = null, + featureListDenseShapes: List? = null + ): ParseSequenceExample = java.parseSequenceExample( + serialized, + debugName, + contextSparseKeys, + contextDenseKeys, + contextRaggedKeys, + featureListSparseKeys, + featureListDenseKeys, + featureListRaggedKeys, + featureListDenseMissingAssumedEmpty, + contextDenseDefaults, + contextSparseTypes, + contextRaggedValueTypes, + contextRaggedSplitTypes, + featureListDenseTypes, + featureListSparseTypes, + featureListRaggedValueTypes, + featureListRaggedSplitTypes, + *listOfNotNull( + NcontextSparse?.let{ org.tensorflow.op.io.ParseSequenceExample.NcontextSparse(it) }, + contextDenseShapes?.let{ org.tensorflow.op.io.ParseSequenceExample.contextDenseShapes(it) }, + NfeatureListSparse?.let{ org.tensorflow.op.io.ParseSequenceExample.NfeatureListSparse(it) }, + NfeatureListDense?.let{ org.tensorflow.op.io.ParseSequenceExample.NfeatureListDense(it) }, + featureListDenseShapes?.let{ + org.tensorflow.op.io.ParseSequenceExample.featureListDenseShapes(it) } + ).toTypedArray() + ) + + /** + * Transforms a tf.Example proto (as a string) into typed tensors. + * + * @param serialized A vector containing a batch of binary serialized Example protos. + * @param denseDefaults A list of Tensors (some may be empty), whose length matches + * the length of `dense_keys`. dense_defaults[j] provides default values + * when the example's feature_map lacks dense_key[j]. If an empty Tensor is + * provided for dense_defaults[j], then the Feature dense_keys[j] is required. + * The input type is inferred from dense_defaults[j], even when it's empty. + * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + * then the shape of dense_defaults[j] must match that of dense_shapes[j]. + * If dense_shapes[j] has an undefined major dimension (variable strides dense + * feature), dense_defaults[j] must contain a single element: + * the padding element. + * @param numSparse The number of sparse features to be parsed from the example. This + * must match the lengths of `sparse_keys` and `sparse_types`. + * @param sparseKeys A list of `num_sparse` strings. + * The keys expected in the Examples' features associated with sparse values. + * @param denseKeys The keys expected in the Examples' features associated with dense + * values. + * @param sparseTypes A list of `num_sparse` types; the data types of data in each + * Feature given in sparse_keys. + * Currently the ParseSingleExample op supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param denseShapes The shapes of data in each Feature given in dense_keys. + * The length of this list must match the length of `dense_keys`. The + * number of elements in the Feature corresponding to dense_key[j] must + * always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == + * (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] + * will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, + * ..., DN), the shape of the output Tensor dense_values[j] will be (M, + * D1, .., DN), where M is the number of blocks of elements of length + * D1 * .... * DN, in the input. + * @return a new instance of ParseSingleExample + * @see org.tensorflow.op.IoOps.parseSingleExample + */ + public fun parseSingleExample( + serialized: Operand, + denseDefaults: Iterable>, + numSparse: Long, + sparseKeys: List, + denseKeys: List, + sparseTypes: List>, + denseShapes: List + ): ParseSingleExample = java.parseSingleExample( + serialized, + denseDefaults, + numSparse, + sparseKeys, + denseKeys, + sparseTypes, + denseShapes + ) + + /** + * Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. + * + * @param serialized A scalar containing a binary serialized SequenceExample proto. + * @param featureListDenseMissingAssumedEmpty A vector listing the + * FeatureList keys which may be missing from the SequenceExample. If the + * associated FeatureList is missing, it is treated as empty. By default, + * any FeatureList not listed in this vector must exist in the SequenceExample. + * @param contextSparseKeys A list of Ncontext_sparse string Tensors (scalars). + * The keys expected in the Examples' features associated with context_sparse + * values. + * @param contextDenseKeys A list of Ncontext_dense string Tensors (scalars). + * The keys expected in the SequenceExamples' context features associated with + * dense values. + * @param featureListSparseKeys A list of Nfeature_list_sparse string Tensors + * (scalars). The keys expected in the FeatureLists associated with sparse + * values. + * @param featureListDenseKeys A list of Nfeature_list_dense string Tensors (scalars). + * The keys expected in the SequenceExamples' feature_lists associated + * with lists of dense values. + * @param contextDenseDefaults A list of Ncontext_dense Tensors (some may be empty). + * context_dense_defaults[j] provides default values + * when the SequenceExample's context map lacks context_dense_key[j]. + * If an empty Tensor is provided for context_dense_defaults[j], + * then the Feature context_dense_keys[j] is required. + * The input type is inferred from context_dense_defaults[j], even when it's + * empty. If context_dense_defaults[j] is not empty, its shape must match + * context_dense_shapes[j]. + * @param debugName A scalar containing the name of the serialized proto. + * May contain, for example, table key (descriptive) name for the + * corresponding serialized proto. This is purely useful for debugging + * purposes, and the presence of values here has no effect on the output. + * May also be an empty scalar if no name is available. + * @param contextSparseTypes A list of Ncontext_sparse types; the data types of data in + * each context Feature given in context_sparse_keys. + * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param featureListDenseTypes The value of the featureListDenseTypes attribute + * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types + * of data in each FeatureList given in feature_list_sparse_keys. + * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param options carries optional attribute values + * @return a new instance of ParseSingleSequenceExample + * @see org.tensorflow.op.IoOps.parseSingleSequenceExample + * @param NcontextSparse Sets the NcontextSparse option. + * + * @param NcontextSparse the NcontextSparse option + * @return this Options instance. + * @param NcontextDense Sets the NcontextDense option. + * + * @param NcontextDense the NcontextDense option + * @return this Options instance. + * @param NfeatureListSparse Sets the NfeatureListSparse option. + * + * @param NfeatureListSparse the NfeatureListSparse option + * @return this Options instance. + * @param NfeatureListDense Sets the NfeatureListDense option. + * + * @param NfeatureListDense the NfeatureListDense option + * @return this Options instance. + * @param contextDenseShapes Sets the contextDenseShapes option. + * + * @param contextDenseShapes A list of Ncontext_dense shapes; the shapes of data in + * each context Feature given in context_dense_keys. + * The number of elements in the Feature corresponding to context_dense_key[j] + * must always equal context_dense_shapes[j].NumEntries(). + * The shape of context_dense_values[j] will match context_dense_shapes[j]. + * @return this Options instance. + * @param featureListDenseShapes Sets the featureListDenseShapes option. + * + * @param featureListDenseShapes A list of Nfeature_list_dense shapes; the shapes of + * data in each FeatureList given in feature_list_dense_keys. + * The shape of each Feature in the FeatureList corresponding to + * feature_list_dense_key[j] must always equal + * feature_list_dense_shapes[j].NumEntries(). + * @return this Options instance. + */ + public fun parseSingleSequenceExample( + serialized: Operand, + featureListDenseMissingAssumedEmpty: Operand, + contextSparseKeys: Iterable>, + contextDenseKeys: Iterable>, + featureListSparseKeys: Iterable>, + featureListDenseKeys: Iterable>, + contextDenseDefaults: Iterable>, + debugName: Operand, + contextSparseTypes: List>, + featureListDenseTypes: List>, + featureListSparseTypes: List>, + NcontextSparse: Long? = null, + NcontextDense: Long? = null, + NfeatureListSparse: Long? = null, + NfeatureListDense: Long? = null, + contextDenseShapes: List? = null, + featureListDenseShapes: List? = null + ): ParseSingleSequenceExample = java.parseSingleSequenceExample( + serialized, + featureListDenseMissingAssumedEmpty, + contextSparseKeys, + contextDenseKeys, + featureListSparseKeys, + featureListDenseKeys, + contextDenseDefaults, + debugName, + contextSparseTypes, + featureListDenseTypes, + featureListSparseTypes, + *listOfNotNull( + NcontextSparse?.let{ org.tensorflow.op.io.ParseSingleSequenceExample.NcontextSparse(it) }, + NcontextDense?.let{ org.tensorflow.op.io.ParseSingleSequenceExample.NcontextDense(it) }, + NfeatureListSparse?.let{ + org.tensorflow.op.io.ParseSingleSequenceExample.NfeatureListSparse(it) }, + NfeatureListDense?.let{ org.tensorflow.op.io.ParseSingleSequenceExample.NfeatureListDense(it) + }, + contextDenseShapes?.let{ + org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) }, + featureListDenseShapes?.let{ + org.tensorflow.op.io.ParseSingleSequenceExample.featureListDenseShapes(it) } + ).toTypedArray() + ) + + /** + * Transforms a serialized tensorflow.TensorProto proto into a Tensor. + * + * @param data type for `output` output + * @param serialized A scalar string containing a serialized TensorProto proto. + * @param outType The type of the serialized tensor. The provided type must match the + * type of the serialized tensor and no implicit conversion will take place. + * @param data type for `ParseTensor` output and operands + * @return a new instance of ParseTensor + * @see org.tensorflow.op.IoOps.parseTensor + */ + public fun parseTensor(serialized: Operand, outType: Class): + ParseTensor = java.parseTensor( + serialized, + outType + ) + + /** + * A queue that produces elements sorted by the first component value. + * Note that the PriorityQueue requires the first component of any element + * to be a scalar int64, in addition to the other elements declared by + * component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue + * and DequeueMany) on a PriorityQueue will all require (resp. output) one extra + * entry in their input (resp. output) lists. + * + * @param componentTypes The type of each component in a value. + * @param shapes The shape of each component in a value. The length of this attr must + * be either 0 or the same as the length of component_types. If the length of + * this attr is 0, the shapes of queue elements are not constrained, and + * only one element may be dequeued at a time. + * @param options carries optional attribute values + * @return a new instance of PriorityQueue + * @see org.tensorflow.op.IoOps.priorityQueue + * @param capacity Sets the capacity option. + * + * @param capacity The upper bound on the number of elements in this queue. + * Negative numbers mean no limit. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this queue is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this queue will be shared under the given name + * across multiple sessions. + * @return this Options instance. + */ + public fun priorityQueue( + componentTypes: List>, + shapes: List, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): PriorityQueue = java.priorityQueue( + componentTypes, + shapes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.io.PriorityQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.PriorityQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.PriorityQueue.sharedName(it) } + ).toTypedArray() + ) + + /** + * Closes the given queue. + * This operation signals that no more elements will be enqueued in the + * given queue. Subsequent Enqueue(Many) operations will fail. + * Subsequent Dequeue(Many) operations will continue to succeed if + * sufficient elements remain in the queue. Subsequent Dequeue(Many) + * operations that would block will fail immediately. + * + * @param handle The handle to a queue. + * @param options carries optional attribute values + * @return a new instance of QueueClose + * @see org.tensorflow.op.IoOps.queueClose + * @param cancelPendingEnqueues Sets the cancelPendingEnqueues option. + * + * @param cancelPendingEnqueues If true, all pending enqueue requests that are + * blocked on the given queue will be canceled. + * @return this Options instance. + */ + public fun queueClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): + QueueClose = java.queueClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let{ org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } + ).toTypedArray() + ) + + /** + * Dequeues a tuple of one or more tensors from the given queue. + * This operation has k outputs, where k is the number of components + * in the tuples stored in the given queue, and output i is the ith + * component of the dequeued tuple. + * + * N.B. If the queue is empty, this operation will block until an element + * has been dequeued (or 'timeout_ms' elapses, if specified). + * + * @param handle The handle to a queue. + * @param componentTypes The type of each component in a tuple. + * @param options carries optional attribute values + * @return a new instance of QueueDequeue + * @see org.tensorflow.op.IoOps.queueDequeue + * @param timeoutMs Sets the timeoutMs option. + * + * @param timeoutMs If the queue is empty, this operation will block for up to + * timeout_ms milliseconds. + * Note: This option is not supported yet. + * @return this Options instance. + */ + public fun queueDequeue( + handle: Operand, + componentTypes: List>, + timeoutMs: Long? = null + ): QueueDequeue = java.queueDequeue( + handle, + componentTypes, + *listOfNotNull( + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeue.timeoutMs(it) } + ).toTypedArray() + ) + + /** + * Dequeues `n` tuples of one or more tensors from the given queue. + * If the queue is closed and there are fewer than `n` elements, then an + * OutOfRange error is returned. + * + * This operation concatenates queue-element component tensors along the + * 0th dimension to make a single component tensor. All of the components + * in the dequeued tuple will have size `n` in the 0th dimension. + * + * This operation has `k` outputs, where `k` is the number of components in + * the tuples stored in the given queue, and output `i` is the ith + * component of the dequeued tuple. + * + * N.B. If the queue is empty, this operation will block until `n` elements + * have been dequeued (or 'timeout_ms' elapses, if specified). + * + * @param handle The handle to a queue. + * @param n The number of tuples to dequeue. + * @param componentTypes The type of each component in a tuple. + * @param options carries optional attribute values + * @return a new instance of QueueDequeueMany + * @see org.tensorflow.op.IoOps.queueDequeueMany + * @param timeoutMs Sets the timeoutMs option. + * + * @param timeoutMs If the queue has fewer than n elements, this operation + * will block for up to timeout_ms milliseconds. + * Note: This option is not supported yet. + * @return this Options instance. + */ + public fun queueDequeueMany( + handle: Operand, + n: Operand, + componentTypes: List>, + timeoutMs: Long? = null + ): QueueDequeueMany = java.queueDequeueMany( + handle, + n, + componentTypes, + *listOfNotNull( + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeueMany.timeoutMs(it) } + ).toTypedArray() + ) + + /** + * Dequeues `n` tuples of one or more tensors from the given queue. + * This operation is not supported by all queues. If a queue does not support + * DequeueUpTo, then an Unimplemented error is returned. + * + * If the queue is closed and there are more than 0 but less than `n` + * elements remaining, then instead of returning an OutOfRange error like + * QueueDequeueMany, less than `n` elements are returned immediately. If + * the queue is closed and there are 0 elements left in the queue, then + * an OutOfRange error is returned just like in QueueDequeueMany. + * Otherwise the behavior is identical to QueueDequeueMany: + * + * This operation concatenates queue-element component tensors along the + * 0th dimension to make a single component tensor. All of the components + * in the dequeued tuple will have size n in the 0th dimension. + * + * This operation has `k` outputs, where `k` is the number of components in + * the tuples stored in the given queue, and output `i` is the ith + * component of the dequeued tuple. + * + * @param handle The handle to a queue. + * @param n The number of tuples to dequeue. + * @param componentTypes The type of each component in a tuple. + * @param options carries optional attribute values + * @return a new instance of QueueDequeueUpTo + * @see org.tensorflow.op.IoOps.queueDequeueUpTo + * @param timeoutMs Sets the timeoutMs option. + * + * @param timeoutMs If the queue has fewer than n elements, this operation + * will block for up to timeout_ms milliseconds. + * Note: This option is not supported yet. + * @return this Options instance. + */ + public fun queueDequeueUpTo( + handle: Operand, + n: Operand, + componentTypes: List>, + timeoutMs: Long? = null + ): QueueDequeueUpTo = java.queueDequeueUpTo( + handle, + n, + componentTypes, + *listOfNotNull( + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeueUpTo.timeoutMs(it) } + ).toTypedArray() + ) + + /** + * Enqueues a tuple of one or more tensors in the given queue. + * The components input has k elements, which correspond to the components of + * tuples stored in the given queue. + * + * N.B. If the queue is full, this operation will block until the given + * element has been enqueued (or 'timeout_ms' elapses, if specified). + * + * @param handle The handle to a queue. + * @param components One or more tensors from which the enqueued tensors should be taken. + * @param options carries optional attribute values + * @return a new instance of QueueEnqueue + * @see org.tensorflow.op.IoOps.queueEnqueue + * @param timeoutMs Sets the timeoutMs option. + * + * @param timeoutMs If the queue is full, this operation will block for up to + * timeout_ms milliseconds. + * Note: This option is not supported yet. + * @return this Options instance. + */ + public fun queueEnqueue( + handle: Operand, + components: Iterable>, + timeoutMs: Long? = null + ): QueueEnqueue = java.queueEnqueue( + handle, + components, + *listOfNotNull( + timeoutMs?.let{ org.tensorflow.op.io.QueueEnqueue.timeoutMs(it) } + ).toTypedArray() + ) + + /** + * Enqueues zero or more tuples of one or more tensors in the given queue. + * This operation slices each component tensor along the 0th dimension to + * make multiple queue elements. All of the tuple components must have the + * same size in the 0th dimension. + * + * The components input has k elements, which correspond to the components of + * tuples stored in the given queue. + * + * N.B. If the queue is full, this operation will block until the given + * elements have been enqueued (or 'timeout_ms' elapses, if specified). + * + * @param handle The handle to a queue. + * @param components One or more tensors from which the enqueued tensors should + * be taken. + * @param options carries optional attribute values + * @return a new instance of QueueEnqueueMany + * @see org.tensorflow.op.IoOps.queueEnqueueMany + * @param timeoutMs Sets the timeoutMs option. + * + * @param timeoutMs If the queue is too full, this operation will block for up + * to timeout_ms milliseconds. + * Note: This option is not supported yet. + * @return this Options instance. + */ + public fun queueEnqueueMany( + handle: Operand, + components: Iterable>, + timeoutMs: Long? = null + ): QueueEnqueueMany = java.queueEnqueueMany( + handle, + components, + *listOfNotNull( + timeoutMs?.let{ org.tensorflow.op.io.QueueEnqueueMany.timeoutMs(it) } + ).toTypedArray() + ) + + /** + * Returns true if queue is closed. + * This operation returns true if the queue is closed and false if the queue + * is open. + * + * @param handle The handle to a queue. + * @return a new instance of QueueIsClosed + * @see org.tensorflow.op.IoOps.queueIsClosed + */ + public fun queueIsClosed(handle: Operand): QueueIsClosed = java.queueIsClosed( + handle + ) + + /** + * Computes the number of elements in the given queue. + * + * @param handle The handle to a queue. + * @return a new instance of QueueSize + * @see org.tensorflow.op.IoOps.queueSize + */ + public fun queueSize(handle: Operand): QueueSize = java.queueSize( + handle + ) + + /** + * A queue that randomizes the order of elements. + * + * @param componentTypes The type of each component in a value. + * @param options carries optional attribute values + * @return a new instance of RandomShuffleQueue + * @see org.tensorflow.op.IoOps.randomShuffleQueue + * @param shapes Sets the shapes option. + * + * @param shapes The shape of each component in a value. The length of this attr must + * be either 0 or the same as the length of component_types. If the length of + * this attr is 0, the shapes of queue elements are not constrained, and + * only one element may be dequeued at a time. + * @return this Options instance. + * @param capacity Sets the capacity option. + * + * @param capacity The upper bound on the number of elements in this queue. + * Negative numbers mean no limit. + * @return this Options instance. + * @param minAfterDequeue Sets the minAfterDequeue option. + * + * @param minAfterDequeue Dequeue will block unless there would be this + * many elements after the dequeue or the queue is closed. This + * ensures a minimum level of mixing of elements. + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 is set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, a random seed is used. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this queue is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this queue will be shared under the given name + * across multiple sessions. + * @return this Options instance. + */ + public fun randomShuffleQueue( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + minAfterDequeue: Long? = null, + seed: Long? = null, + seed2: Long? = null, + container: String? = null, + sharedName: String? = null + ): RandomShuffleQueue = java.randomShuffleQueue( + componentTypes, + *listOfNotNull( + shapes?.let{ org.tensorflow.op.io.RandomShuffleQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.RandomShuffleQueue.capacity(it) }, + minAfterDequeue?.let{ org.tensorflow.op.io.RandomShuffleQueue.minAfterDequeue(it) }, + seed?.let{ org.tensorflow.op.io.RandomShuffleQueue.seed(it) }, + seed2?.let{ org.tensorflow.op.io.RandomShuffleQueue.seed2(it) }, + container?.let{ org.tensorflow.op.io.RandomShuffleQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.RandomShuffleQueue.sharedName(it) } + ).toTypedArray() + ) + + /** + * Reads and outputs the entire contents of the input filename. + * + * @param filename The filename value + * @return a new instance of ReadFile + * @see org.tensorflow.op.IoOps.readFile + */ + public fun readFile(filename: Operand): ReadFile = java.readFile( + filename + ) + + /** + * Returns the number of records this Reader has produced. + * This is the same as the number of ReaderRead executions that have + * succeeded. + * + * @param readerHandle Handle to a Reader. + * @return a new instance of ReaderNumRecordsProduced + * @see org.tensorflow.op.IoOps.readerNumRecordsProduced + */ + public fun readerNumRecordsProduced(readerHandle: Operand): ReaderNumRecordsProduced + = java.readerNumRecordsProduced( + readerHandle + ) + + /** + * Returns the number of work units this Reader has finished processing. + * + * @param readerHandle Handle to a Reader. + * @return a new instance of ReaderNumWorkUnitsCompleted + * @see org.tensorflow.op.IoOps.readerNumWorkUnitsCompleted + */ + public fun readerNumWorkUnitsCompleted(readerHandle: Operand): + ReaderNumWorkUnitsCompleted = java.readerNumWorkUnitsCompleted( + readerHandle + ) + + /** + * Returns the next record (key, value pair) produced by a Reader. + * Will dequeue from the input queue if necessary (e.g. when the + * Reader needs to start reading from a new file since it has finished + * with the previous file). + * + * @param readerHandle Handle to a Reader. + * @param queueHandle Handle to a Queue, with string work items. + * @return a new instance of ReaderRead + * @see org.tensorflow.op.IoOps.readerRead + */ + public fun readerRead(readerHandle: Operand, queueHandle: Operand): + ReaderRead = java.readerRead( + readerHandle, + queueHandle + ) + + /** + * Returns up to `num_records` (key, value) pairs produced by a Reader. + * Will dequeue from the input queue if necessary (e.g. when the + * Reader needs to start reading from a new file since it has finished + * with the previous file). + * It may return less than `num_records` even before the last batch. + * + * @param readerHandle Handle to a `Reader`. + * @param queueHandle Handle to a `Queue`, with string work items. + * @param numRecords number of records to read from `Reader`. + * @return a new instance of ReaderReadUpTo + * @see org.tensorflow.op.IoOps.readerReadUpTo + */ + public fun readerReadUpTo( + readerHandle: Operand, + queueHandle: Operand, + numRecords: Operand + ): ReaderReadUpTo = java.readerReadUpTo( + readerHandle, + queueHandle, + numRecords + ) + + /** + * Restore a Reader to its initial clean state. + * + * @param readerHandle Handle to a Reader. + * @return a new instance of ReaderReset + * @see org.tensorflow.op.IoOps.readerReset + */ + public fun readerReset(readerHandle: Operand): ReaderReset = java.readerReset( + readerHandle + ) + + /** + * Restore a reader to a previously saved state. + * Not all Readers support being restored, so this can produce an + * Unimplemented error. + * + * @param readerHandle Handle to a Reader. + * @param state Result of a ReaderSerializeState of a Reader with type + * matching reader_handle. + * @return a new instance of ReaderRestoreState + * @see org.tensorflow.op.IoOps.readerRestoreState + */ + public fun readerRestoreState(readerHandle: Operand, state: Operand): + ReaderRestoreState = java.readerRestoreState( + readerHandle, + state + ) + + /** + * Produce a string tensor that encodes the state of a Reader. + * Not all Readers support being serialized, so this can produce an + * Unimplemented error. + * + * @param readerHandle Handle to a Reader. + * @return a new instance of ReaderSerializeState + * @see org.tensorflow.op.IoOps.readerSerializeState + */ + public fun readerSerializeState(readerHandle: Operand): ReaderSerializeState = + java.readerSerializeState( + readerHandle + ) + + /** + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` + * must be sorted in increasing order of this first dimension. The serialized + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * @return a new instance of SerializeManySparse, with default output types + * @see org.tensorflow.op.IoOps.serializeManySparse + */ + public fun serializeManySparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeManySparse = java.serializeManySparse( + sparseIndices, + sparseValues, + sparseShape + ) + + /** + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` + * must be sorted in increasing order of this first dimension. The serialized + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @param data type for `SerializeManySparse` output and operands + * @return a new instance of SerializeManySparse + * @see org.tensorflow.op.IoOps.serializeManySparse + */ + public fun serializeManySparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + outType: Class + ): SerializeManySparse = java.serializeManySparse( + sparseIndices, + sparseValues, + sparseShape, + outType + ) + + /** + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @return a new instance of SerializeSparse, with default output types + * @see org.tensorflow.op.IoOps.serializeSparse + */ + public fun serializeSparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeSparse = java.serializeSparse( + sparseIndices, + sparseValues, + sparseShape + ) + + /** + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @param data type for `SerializeSparse` output and operands + * @return a new instance of SerializeSparse + * @see org.tensorflow.op.IoOps.serializeSparse + */ + public fun serializeSparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + outType: Class + ): SerializeSparse = java.serializeSparse( + sparseIndices, + sparseValues, + sparseShape, + outType + ) + + /** + * Transforms a Tensor into a serialized TensorProto proto. + * + * @param tensor A Tensor of type `T`. + * @return a new instance of SerializeTensor + * @see org.tensorflow.op.IoOps.serializeTensor + */ + public fun serializeTensor(tensor: Operand): SerializeTensor = java.serializeTensor( + tensor + ) + + /** + * Generate a sharded filename. The filename is printf formatted as + * %s-%05d-of-%05d, basename, shard, num_shards. + * + * @param basename The basename value + * @param shard The shard value + * @param numShards The numShards value + * @return a new instance of ShardedFilename + * @see org.tensorflow.op.IoOps.shardedFilename + */ + public fun shardedFilename( + basename: Operand, + shard: Operand, + numShards: Operand + ): ShardedFilename = java.shardedFilename( + basename, + shard, + numShards + ) + + /** + * Generate a glob pattern matching all sharded file names. + * + * @param basename The basename value + * @param numShards The numShards value + * @return a new instance of ShardedFilespec + * @see org.tensorflow.op.IoOps.shardedFilespec + */ + public fun shardedFilespec(basename: Operand, numShards: Operand): + ShardedFilespec = java.shardedFilespec( + basename, + numShards + ) + + /** + * A Reader that outputs the lines of a file delimited by '\n'. + * + * @param options carries optional attribute values + * @return a new instance of TextLineReader + * @see org.tensorflow.op.IoOps.textLineReader + * + * @param skipHeaderLines Sets the skipHeaderLines option. + * + * @param skipHeaderLines Number of lines to skip from the beginning of every file. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + */ + public fun textLineReader( + skipHeaderLines: Long? = null, + container: String? = null, + sharedName: String? = null + ): TextLineReader = java.textLineReader( + *listOfNotNull( + skipHeaderLines?.let{ org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, + container?.let{ org.tensorflow.op.io.TextLineReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.TextLineReader.sharedName(it) } + ).toTypedArray() + ) + + /** + * A Reader that outputs the records from a TensorFlow Records file. + * + * @param options carries optional attribute values + * @return a new instance of TfRecordReader + * @see org.tensorflow.op.IoOps.tfRecordReader + * + * @param container Sets the container option. + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + * @param compressionType Sets the compressionType option. + * + * @param compressionType the compressionType option + * @return this Options instance. + */ + public fun tfRecordReader( + container: String? = null, + sharedName: String? = null, + compressionType: String? = null + ): TfRecordReader = java.tfRecordReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.TfRecordReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.TfRecordReader.sharedName(it) }, + compressionType?.let{ org.tensorflow.op.io.TfRecordReader.compressionType(it) } + ).toTypedArray() + ) + + /** + * A Reader that outputs the entire contents of a file as a value. + * To use, enqueue filenames in a Queue. The output of ReaderRead will + * be a filename (key) and the contents of that file (value). + * + * @param options carries optional attribute values + * @return a new instance of WholeFileReader + * @see org.tensorflow.op.IoOps.wholeFileReader + * + * @param container Sets the container option. + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + */ + public fun wholeFileReader(container: String? = null, sharedName: String? = null): + WholeFileReader = java.wholeFileReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.WholeFileReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.WholeFileReader.sharedName(it) } + ).toTypedArray() + ) + + /** + * Writes contents to the file at input filename. Creates file and recursively + * creates directory if not existing. + * + * @param filename scalar. The name of the file to which we write the contents. + * @param contents scalar. The content to be written to the output file. + * @return a new instance of WriteFile + * @see org.tensorflow.op.IoOps.writeFile + */ + public fun writeFile(filename: Operand, contents: Operand): WriteFile = + java.writeFile( + filename, + contents + ) + + /** + * Reinterpret the bytes of a string as a vector of numbers. + * + * @param data type for `output` output + * @param inputBytes Tensor of string to be decoded. + * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple + * of the size of the output type. + * @param outType The value of the outType attribute + * @param options carries optional attribute values + * @param data type for `DecodePaddedRaw` output and operands + * @return a new instance of DecodePaddedRaw + * @see org.tensorflow.op.IoOps.decodePaddedRaw + * @param littleEndian Sets the littleEndian option. + * + * @param littleEndian Whether the input `input_bytes` is in little-endian order. Ignored for + * `out_type` values that are stored in a single byte, like `uint8` + * @return this Options instance. + */ + @JvmName("decodePaddedRawReified") + public inline fun decodePaddedRaw( + inputBytes: Operand, + fixedLength: Operand, + littleEndian: Boolean? = null + ): DecodePaddedRaw = decodePaddedRaw(inputBytes, fixedLength, T::class.java, littleEndian) + + /** + * Reinterpret the bytes of a string as a vector of numbers. + * + * @param data type for `output` output + * @param bytes All the elements must have the same length. + * @param outType The value of the outType attribute + * @param options carries optional attribute values + * @param data type for `DecodeRaw` output and operands + * @return a new instance of DecodeRaw + * @see org.tensorflow.op.IoOps.decodeRaw + * @param littleEndian Sets the littleEndian option. + * + * @param littleEndian Whether the input `bytes` are in little-endian order. + * Ignored for `out_type` values that are stored in a single byte like + * `uint8`. + * @return this Options instance. + */ + @JvmName("decodeRawReified") + public inline fun decodeRaw(bytes: Operand, littleEndian: Boolean? + = null): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) + + /** + * Deserialize and concatenate `SparseTensors` from a serialized minibatch. + * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where + * `N` is the minibatch size and the rows correspond to packed outputs of + * `SerializeSparse`. The ranks of the original `SparseTensor` objects + * must all match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects + * (they have been concatenated along a new row dimension). + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final deserialized `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. + * Must have 3 columns. + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param data type for `DeserializeManySparse` output and operands + * @return a new instance of DeserializeManySparse + * @see org.tensorflow.op.IoOps.deserializeManySparse + */ + @JvmName("deserializeManySparseReified") + public inline fun deserializeManySparse(serializedSparse: Operand): + DeserializeManySparse = deserializeManySparse(serializedSparse, T::class.java) + + /** + * Transforms a serialized tensorflow.TensorProto proto into a Tensor. + * + * @param data type for `output` output + * @param serialized A scalar string containing a serialized TensorProto proto. + * @param outType The type of the serialized tensor. The provided type must match the + * type of the serialized tensor and no implicit conversion will take place. + * @param data type for `ParseTensor` output and operands + * @return a new instance of ParseTensor + * @see org.tensorflow.op.IoOps.parseTensor + */ + @JvmName("parseTensorReified") + public inline fun parseTensor(serialized: Operand): ParseTensor + = parseTensor(serialized, T::class.java) + + /** + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` + * must be sorted in increasing order of this first dimension. The serialized + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @param data type for `SerializeManySparse` output and operands + * @return a new instance of SerializeManySparse + * @see org.tensorflow.op.IoOps.serializeManySparse + */ + @JvmName("serializeManySparseReified") + public inline fun serializeManySparseTyped( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeManySparse = serializeManySparse(sparseIndices, sparseValues, sparseShape, + U::class.java) + + /** + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @param data type for `SerializeSparse` output and operands + * @return a new instance of SerializeSparse + * @see org.tensorflow.op.IoOps.serializeSparse + */ + @JvmName("serializeSparseReified") + public inline fun serializeSparseTyped( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeSparse = serializeSparse(sparseIndices, sparseValues, sparseShape, + U::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt new file mode 100644 index 00000000000..70b1a7d1324 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -0,0 +1,13102 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import java.nio.charset.Charset +import kotlin.Array +import kotlin.Boolean +import kotlin.BooleanArray +import kotlin.Byte +import kotlin.ByteArray +import kotlin.Double +import kotlin.DoubleArray +import kotlin.Float +import kotlin.FloatArray +import kotlin.Int +import kotlin.IntArray +import kotlin.Long +import kotlin.LongArray +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.ConcreteFunction +import org.tensorflow.Operand +import org.tensorflow.ndarray.BooleanNdArray +import org.tensorflow.ndarray.ByteNdArray +import org.tensorflow.ndarray.DoubleNdArray +import org.tensorflow.ndarray.FloatNdArray +import org.tensorflow.ndarray.IntNdArray +import org.tensorflow.ndarray.LongNdArray +import org.tensorflow.ndarray.NdArray +import org.tensorflow.ndarray.Shape +import org.tensorflow.ndarray.buffer.BooleanDataBuffer +import org.tensorflow.ndarray.buffer.ByteDataBuffer +import org.tensorflow.ndarray.buffer.DataBuffer +import org.tensorflow.ndarray.buffer.DoubleDataBuffer +import org.tensorflow.ndarray.buffer.FloatDataBuffer +import org.tensorflow.ndarray.buffer.IntDataBuffer +import org.tensorflow.ndarray.buffer.LongDataBuffer +import org.tensorflow.ndarray.index.Index +import org.tensorflow.op.Ops +import org.tensorflow.op.Scope +import org.tensorflow.op.core.Abort +import org.tensorflow.op.core.All +import org.tensorflow.op.core.Any +import org.tensorflow.op.core.AssertThat +import org.tensorflow.op.core.Assign +import org.tensorflow.op.core.AssignAdd +import org.tensorflow.op.core.AssignAddVariableOp +import org.tensorflow.op.core.AssignSub +import org.tensorflow.op.core.AssignSubVariableOp +import org.tensorflow.op.core.AssignVariableOp +import org.tensorflow.op.core.Barrier +import org.tensorflow.op.core.BarrierClose +import org.tensorflow.op.core.BarrierIncompleteSize +import org.tensorflow.op.core.BarrierInsertMany +import org.tensorflow.op.core.BarrierReadySize +import org.tensorflow.op.core.BarrierTakeMany +import org.tensorflow.op.core.Batch +import org.tensorflow.op.core.BatchFunction +import org.tensorflow.op.core.BatchToSpace +import org.tensorflow.op.core.BatchToSpaceNd +import org.tensorflow.op.core.Bitcast +import org.tensorflow.op.core.BroadcastDynamicShape +import org.tensorflow.op.core.BroadcastTo +import org.tensorflow.op.core.Bucketize +import org.tensorflow.op.core.Case +import org.tensorflow.op.core.ClipByValue +import org.tensorflow.op.core.Concat +import org.tensorflow.op.core.Constant +import org.tensorflow.op.core.ConsumeMutexLock +import org.tensorflow.op.core.ControlTrigger +import org.tensorflow.op.core.CountUpTo +import org.tensorflow.op.core.DecodeProto +import org.tensorflow.op.core.DeepCopy +import org.tensorflow.op.core.DeleteSessionTensor +import org.tensorflow.op.core.DestroyResourceOp +import org.tensorflow.op.core.DestroyTemporaryVariable +import org.tensorflow.op.core.DynamicPartition +import org.tensorflow.op.core.DynamicStitch +import org.tensorflow.op.core.EditDistance +import org.tensorflow.op.core.Empty +import org.tensorflow.op.core.EmptyTensorList +import org.tensorflow.op.core.EmptyTensorMap +import org.tensorflow.op.core.EncodeProto +import org.tensorflow.op.core.EnsureShape +import org.tensorflow.op.core.ExpandDims +import org.tensorflow.op.core.ExtractVolumePatches +import org.tensorflow.op.core.Fill +import org.tensorflow.op.core.Fingerprint +import org.tensorflow.op.core.For +import org.tensorflow.op.core.Gather +import org.tensorflow.op.core.GatherNd +import org.tensorflow.op.core.GetSessionHandle +import org.tensorflow.op.core.GetSessionTensor +import org.tensorflow.op.core.Gradients +import org.tensorflow.op.core.GuaranteeConst +import org.tensorflow.op.core.HashTable +import org.tensorflow.op.core.HistogramFixedWidth +import org.tensorflow.op.core.Identity +import org.tensorflow.op.core.IdentityN +import org.tensorflow.op.core.If +import org.tensorflow.op.core.ImmutableConst +import org.tensorflow.op.core.InitializeTable +import org.tensorflow.op.core.InitializeTableFromTextFile +import org.tensorflow.op.core.InplaceAdd +import org.tensorflow.op.core.InplaceSub +import org.tensorflow.op.core.InplaceUpdate +import org.tensorflow.op.core.IsVariableInitialized +import org.tensorflow.op.core.KthOrderStatistic +import org.tensorflow.op.core.LookupTableExport +import org.tensorflow.op.core.LookupTableFind +import org.tensorflow.op.core.LookupTableImport +import org.tensorflow.op.core.LookupTableInsert +import org.tensorflow.op.core.LookupTableSize +import org.tensorflow.op.core.LoopCond +import org.tensorflow.op.core.MakeUnique +import org.tensorflow.op.core.MapClear +import org.tensorflow.op.core.MapIncompleteSize +import org.tensorflow.op.core.MapPeek +import org.tensorflow.op.core.MapSize +import org.tensorflow.op.core.MapStage +import org.tensorflow.op.core.MapUnstage +import org.tensorflow.op.core.MapUnstageNoKey +import org.tensorflow.op.core.Max +import org.tensorflow.op.core.Merge +import org.tensorflow.op.core.Min +import org.tensorflow.op.core.MirrorPad +import org.tensorflow.op.core.MlirPassthroughOp +import org.tensorflow.op.core.MutableDenseHashTable +import org.tensorflow.op.core.MutableHashTable +import org.tensorflow.op.core.MutableHashTableOfTensors +import org.tensorflow.op.core.Mutex +import org.tensorflow.op.core.MutexLock +import org.tensorflow.op.core.NextIteration +import org.tensorflow.op.core.NoOp +import org.tensorflow.op.core.OneHot +import org.tensorflow.op.core.Ones +import org.tensorflow.op.core.OnesLike +import org.tensorflow.op.core.OrderedMapClear +import org.tensorflow.op.core.OrderedMapIncompleteSize +import org.tensorflow.op.core.OrderedMapPeek +import org.tensorflow.op.core.OrderedMapSize +import org.tensorflow.op.core.OrderedMapStage +import org.tensorflow.op.core.OrderedMapUnstage +import org.tensorflow.op.core.OrderedMapUnstageNoKey +import org.tensorflow.op.core.Pad +import org.tensorflow.op.core.ParallelConcat +import org.tensorflow.op.core.ParallelDynamicStitch +import org.tensorflow.op.core.PartitionedCall +import org.tensorflow.op.core.Placeholder +import org.tensorflow.op.core.PlaceholderWithDefault +import org.tensorflow.op.core.Print +import org.tensorflow.op.core.Prod +import org.tensorflow.op.core.QuantizedReshape +import org.tensorflow.op.core.Range +import org.tensorflow.op.core.Rank +import org.tensorflow.op.core.ReadVariableOp +import org.tensorflow.op.core.ReduceAll +import org.tensorflow.op.core.ReduceAny +import org.tensorflow.op.core.ReduceMax +import org.tensorflow.op.core.ReduceMin +import org.tensorflow.op.core.ReduceProd +import org.tensorflow.op.core.ReduceSum +import org.tensorflow.op.core.RefNextIteration +import org.tensorflow.op.core.RefSelect +import org.tensorflow.op.core.RefSwitch +import org.tensorflow.op.core.RemoteCall +import org.tensorflow.op.core.Reshape +import org.tensorflow.op.core.ResourceCountUpTo +import org.tensorflow.op.core.ResourceGather +import org.tensorflow.op.core.ResourceGatherNd +import org.tensorflow.op.core.ResourceScatterAdd +import org.tensorflow.op.core.ResourceScatterDiv +import org.tensorflow.op.core.ResourceScatterMax +import org.tensorflow.op.core.ResourceScatterMin +import org.tensorflow.op.core.ResourceScatterMul +import org.tensorflow.op.core.ResourceScatterNdAdd +import org.tensorflow.op.core.ResourceScatterNdMax +import org.tensorflow.op.core.ResourceScatterNdMin +import org.tensorflow.op.core.ResourceScatterNdSub +import org.tensorflow.op.core.ResourceScatterNdUpdate +import org.tensorflow.op.core.ResourceScatterSub +import org.tensorflow.op.core.ResourceScatterUpdate +import org.tensorflow.op.core.ResourceStridedSliceAssign +import org.tensorflow.op.core.Reverse +import org.tensorflow.op.core.ReverseSequence +import org.tensorflow.op.core.Roll +import org.tensorflow.op.core.ScatterAdd +import org.tensorflow.op.core.ScatterDiv +import org.tensorflow.op.core.ScatterMax +import org.tensorflow.op.core.ScatterMin +import org.tensorflow.op.core.ScatterMul +import org.tensorflow.op.core.ScatterNd +import org.tensorflow.op.core.ScatterNdAdd +import org.tensorflow.op.core.ScatterNdNonAliasingAdd +import org.tensorflow.op.core.ScatterNdSub +import org.tensorflow.op.core.ScatterNdUpdate +import org.tensorflow.op.core.ScatterSub +import org.tensorflow.op.core.ScatterUpdate +import org.tensorflow.op.core.Select +import org.tensorflow.op.core.SetDiff1d +import org.tensorflow.op.core.SetSize +import org.tensorflow.op.core.ShapeN +import org.tensorflow.op.core.Size +import org.tensorflow.op.core.Skipgram +import org.tensorflow.op.core.Slice +import org.tensorflow.op.core.Snapshot +import org.tensorflow.op.core.SpaceToBatchNd +import org.tensorflow.op.core.Split +import org.tensorflow.op.core.SplitV +import org.tensorflow.op.core.Squeeze +import org.tensorflow.op.core.Stack +import org.tensorflow.op.core.Stage +import org.tensorflow.op.core.StageClear +import org.tensorflow.op.core.StagePeek +import org.tensorflow.op.core.StageSize +import org.tensorflow.op.core.StatefulCase +import org.tensorflow.op.core.StatefulIf +import org.tensorflow.op.core.StatefulPartitionedCall +import org.tensorflow.op.core.StatefulWhile +import org.tensorflow.op.core.StatelessIf +import org.tensorflow.op.core.StatelessPartitionedCall +import org.tensorflow.op.core.StatelessWhile +import org.tensorflow.op.core.StopGradient +import org.tensorflow.op.core.StridedSlice +import org.tensorflow.op.core.StridedSliceAssign +import org.tensorflow.op.core.StridedSliceGrad +import org.tensorflow.op.core.Sum +import org.tensorflow.op.core.SwitchCond +import org.tensorflow.op.core.TemporaryVariable +import org.tensorflow.op.core.TensorArray +import org.tensorflow.op.core.TensorArrayClose +import org.tensorflow.op.core.TensorArrayConcat +import org.tensorflow.op.core.TensorArrayGather +import org.tensorflow.op.core.TensorArrayGrad +import org.tensorflow.op.core.TensorArrayGradWithShape +import org.tensorflow.op.core.TensorArrayPack +import org.tensorflow.op.core.TensorArrayRead +import org.tensorflow.op.core.TensorArrayScatter +import org.tensorflow.op.core.TensorArraySize +import org.tensorflow.op.core.TensorArraySplit +import org.tensorflow.op.core.TensorArrayUnpack +import org.tensorflow.op.core.TensorArrayWrite +import org.tensorflow.op.core.TensorListConcat +import org.tensorflow.op.core.TensorListConcatLists +import org.tensorflow.op.core.TensorListElementShape +import org.tensorflow.op.core.TensorListFromTensor +import org.tensorflow.op.core.TensorListGather +import org.tensorflow.op.core.TensorListGetItem +import org.tensorflow.op.core.TensorListLength +import org.tensorflow.op.core.TensorListPopBack +import org.tensorflow.op.core.TensorListPushBack +import org.tensorflow.op.core.TensorListPushBackBatch +import org.tensorflow.op.core.TensorListReserve +import org.tensorflow.op.core.TensorListResize +import org.tensorflow.op.core.TensorListScatter +import org.tensorflow.op.core.TensorListScatterIntoExistingList +import org.tensorflow.op.core.TensorListSetItem +import org.tensorflow.op.core.TensorListSplit +import org.tensorflow.op.core.TensorListStack +import org.tensorflow.op.core.TensorMapErase +import org.tensorflow.op.core.TensorMapHasKey +import org.tensorflow.op.core.TensorMapInsert +import org.tensorflow.op.core.TensorMapLookup +import org.tensorflow.op.core.TensorMapSize +import org.tensorflow.op.core.TensorMapStackKeys +import org.tensorflow.op.core.TensorScatterNdAdd +import org.tensorflow.op.core.TensorScatterNdMax +import org.tensorflow.op.core.TensorScatterNdMin +import org.tensorflow.op.core.TensorScatterNdSub +import org.tensorflow.op.core.TensorScatterNdUpdate +import org.tensorflow.op.core.TensorStridedSliceUpdate +import org.tensorflow.op.core.Tile +import org.tensorflow.op.core.Timestamp +import org.tensorflow.op.core.TopKUnique +import org.tensorflow.op.core.TopKWithUnique +import org.tensorflow.op.core.Unbatch +import org.tensorflow.op.core.UnbatchGrad +import org.tensorflow.op.core.Unique +import org.tensorflow.op.core.UniqueWithCounts +import org.tensorflow.op.core.UnravelIndex +import org.tensorflow.op.core.Unstack +import org.tensorflow.op.core.Unstage +import org.tensorflow.op.core.VarHandleOp +import org.tensorflow.op.core.VarIsInitializedOp +import org.tensorflow.op.core.Variable +import org.tensorflow.op.core.VariableShape +import org.tensorflow.op.core.Where +import org.tensorflow.op.core.While +import org.tensorflow.op.core.Zeros +import org.tensorflow.op.core.ZerosLike +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TFloat64 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.TUint8 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building operations as [Op][org.tensorflow.op.Op]s + * + * @see Ops + */ +public class KotlinOps( + /** + * Returns the java counterpart of this API + */ + public override val java: Ops +) : OpsBase() { + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = java.scope() + + public val nn: NnOps = NnOps(this) + + public val summary: SummaryOps = SummaryOps(this) + + public val image: ImageOps = ImageOps(this) + + public val ragged: RaggedOps = RaggedOps(this) + + public val `data`: DataOps = DataOps(this) + + public val shape: ShapeOps = ShapeOps(this) + + public val io: IoOps = IoOps(this) + + public val dtypes: DtypesOps = DtypesOps(this) + + public val xla: XlaOps = XlaOps(this) + + public val linalg: LinalgOps = LinalgOps(this) + + public val random: RandomOps = RandomOps(this) + + public val strings: StringsOps = StringsOps(this) + + public val sparse: SparseOps = SparseOps(this) + + public val bitwise: BitwiseOps = BitwiseOps(this) + + public val tpu: TpuOps = TpuOps(this) + + public val audio: AudioOps = AudioOps(this) + + public val math: MathOps = MathOps(this) + + public val signal: SignalOps = SignalOps(this) + + public val quantization: QuantizationOps = QuantizationOps(this) + + public val train: TrainOps = TrainOps(this) + + /** + * Raise a exception to abort the process when called. + * If exit_without_error is true, the process will exit normally, + * otherwise it will exit with a SIGABORT signal. + * + * Returns nothing but an exception. + * + * @param options carries optional attribute values + * @return a new instance of Abort + * @see org.tensorflow.op.Ops.abort + * + * @param errorMsg Sets the errorMsg option. + * + * @param errorMsg A string which is the message associated with the exception. + * @return this Options instance. + * @param exitWithoutError Sets the exitWithoutError option. + * + * @param exitWithoutError the exitWithoutError option + * @return this Options instance. + */ + public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = + java.abort( + *listOfNotNull( + errorMsg?.let{ org.tensorflow.op.core.Abort.errorMsg(it) }, + exitWithoutError?.let{ org.tensorflow.op.core.Abort.exitWithoutError(it) } + ).toTypedArray() + ) + + /** + * Computes the "logical and" of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @return a new instance of All + * @see org.tensorflow.op.Ops.all + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun all( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): All = java.all( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.All.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the "logical or" of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @return a new instance of Any + * @see org.tensorflow.op.Ops.any + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun any( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Any = java.any( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Any.keepDims(it) } + ).toTypedArray() + ) + + /** + * Creates a constant of `String` elements, using the default UTF-8 charset. + * + * @param data An array containing the values to put into the new constant. + * @return the `String` constant + * @see org.tensorflow.op.Ops.array + */ + public fun array(vararg `data`: String): Constant = java.array( + *data + ) + + /** + * Creates a constant of `int` elements. + * + * @param data An array containing the values to put into the new constant. + * @return a float constant + * @see org.tensorflow.op.Ops.array + */ + public fun array(vararg `data`: Int): Constant = java.array( + *data + ) + + /** + * Creates a constant of `double` elements. + * + * @param data An array containing the values to put into the new constant. + * @return a double constant + * @see org.tensorflow.op.Ops.array + */ + public fun array(vararg `data`: Double): Constant = java.array( + *data + ) + + /** + * Creates a constant of `long` elements. + * + * @param data An array containing the values to put into the new constant. + * @return a long constant + * @see org.tensorflow.op.Ops.array + */ + public fun array(vararg `data`: Long): Constant = java.array( + *data + ) + + /** + * Creates a constant of `byte` elements. + * + * @param data An array containing the values to put into the new constant. + * @return a byte constant + * @see org.tensorflow.op.Ops.array + */ + public fun array(vararg `data`: Byte): Constant = java.array( + *data + ) + + /** + * Creates a constant of `boolean` elements. + * + * @param data An array containing the values to put into the new constant. + * @return a boolean constant + * @see org.tensorflow.op.Ops.array + */ + public fun array(vararg `data`: Boolean): Constant = java.array( + *data + ) + + /** + * Creates a constant of `float` elements. + * + * @param data An array containing the values to put into the new constant. + * @return a float constant + * @see org.tensorflow.op.Ops.array + */ + public fun array(vararg `data`: Float): Constant = java.array( + *data + ) + + /** + * Creates a constant of `String` elements, using the given charset. + * + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the `String` constant + * @see org.tensorflow.op.Ops.array + */ + public fun array(charset: Charset, vararg `data`: String): Constant = java.array( + charset, + *data + ) + + /** + * Asserts that the given condition is true. + * If `condition` evaluates to false, print the list of tensors in `data`. + * `summarize` determines how many entries of the tensors to print. + * + * @param condition The condition to evaluate. + * @param data The tensors to print out when condition is false. + * @param options carries optional attribute values + * @return a new instance of AssertThat + * @see org.tensorflow.op.Ops.assertThat + * @param summarize Sets the summarize option. + * + * @param summarize Print this many entries of each tensor. + * @return this Options instance. + */ + public fun assertThat( + condition: Operand, + `data`: Iterable>, + summarize: Long? = null + ): AssertThat = java.assertThat( + condition, + data, + *listOfNotNull( + summarize?.let{ org.tensorflow.op.core.AssertThat.summarize(it) } + ).toTypedArray() + ) + + /** + * Update 'ref' by assigning 'value' to it. + * This operation outputs "ref" after the assignment is done. + * This makes it easier to chain operations that need to use the reset value. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. May be uninitialized. + * @param value The value to be assigned to the variable. + * @param options carries optional attribute values + * @param data type for `Assign` output and operands + * @return a new instance of Assign + * @see org.tensorflow.op.Ops.assign + * @param validateShape Sets the validateShape option. + * + * @param validateShape If true, the operation will validate that the shape + * of 'value' matches the shape of the Tensor being assigned to. If false, + * 'ref' will take on the shape of 'value'. + * @return this Options instance. + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the assignment will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun assign( + ref: Operand, + value: Operand, + validateShape: Boolean? = null, + useLocking: Boolean? = null + ): Assign = java.assign( + ref, + value, + *listOfNotNull( + validateShape?.let{ org.tensorflow.op.core.Assign.validateShape(it) }, + useLocking?.let{ org.tensorflow.op.core.Assign.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update 'ref' by adding 'value' to it. + * This operation outputs "ref" after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param value The value to be added to the variable. + * @param options carries optional attribute values + * @param data type for `AssignAdd` output and operands + * @return a new instance of AssignAdd + * @see org.tensorflow.op.Ops.assignAdd + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the addition will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun assignAdd( + ref: Operand, + value: Operand, + useLocking: Boolean? = null + ): AssignAdd = java.assignAdd( + ref, + value, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.AssignAdd.useLocking(it) } + ).toTypedArray() + ) + + /** + * Adds a value to the current value of a variable. + * Any ReadVariableOp with a control dependency on this op is guaranteed to + * see the incremented value or a subsequent newer one. + * + * @param resource handle to the resource in which to store the variable. + * @param value the value by which the variable will be incremented. + * @return a new instance of AssignAddVariableOp + * @see org.tensorflow.op.Ops.assignAddVariableOp + */ + public fun assignAddVariableOp(resource: Operand, value: Operand): + AssignAddVariableOp = java.assignAddVariableOp( + resource, + value + ) + + /** + * Update 'ref' by subtracting 'value' from it. + * This operation outputs "ref" after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param value The value to be subtracted to the variable. + * @param options carries optional attribute values + * @param data type for `AssignSub` output and operands + * @return a new instance of AssignSub + * @see org.tensorflow.op.Ops.assignSub + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun assignSub( + ref: Operand, + value: Operand, + useLocking: Boolean? = null + ): AssignSub = java.assignSub( + ref, + value, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.AssignSub.useLocking(it) } + ).toTypedArray() + ) + + /** + * Subtracts a value from the current value of a variable. + * Any ReadVariableOp with a control dependency on this op is guaranteed to + * see the decremented value or a subsequent newer one. + * + * @param resource handle to the resource in which to store the variable. + * @param value the value by which the variable will be incremented. + * @return a new instance of AssignSubVariableOp + * @see org.tensorflow.op.Ops.assignSubVariableOp + */ + public fun assignSubVariableOp(resource: Operand, value: Operand): + AssignSubVariableOp = java.assignSubVariableOp( + resource, + value + ) + + /** + * Assigns a new value to a variable. + * Any ReadVariableOp with a control dependency on this op is guaranteed to return + * this value or a subsequent newer value of the variable. + * + * @param resource handle to the resource in which to store the variable. + * @param value the value to set the new tensor to use. + * @return a new instance of AssignVariableOp + * @see org.tensorflow.op.Ops.assignVariableOp + */ + public fun assignVariableOp(resource: Operand, value: Operand): + AssignVariableOp = java.assignVariableOp( + resource, + value + ) + + /** + * Defines a barrier that persists across different graph executions. + * A barrier represents a key-value map, where each key is a string, and + * each value is a tuple of tensors. + * + * At runtime, the barrier contains 'complete' and 'incomplete' + * elements. A complete element has defined tensors for all components of + * its value tuple, and may be accessed using BarrierTakeMany. An + * incomplete element has some undefined components in its value tuple, + * and may be updated using BarrierInsertMany. + * + * @param componentTypes The type of each component in a value. + * @param options carries optional attribute values + * @return a new instance of Barrier + * @see org.tensorflow.op.Ops.barrier + * @param shapes Sets the shapes option. + * + * @param shapes The shape of each component in a value. Each shape must be 1 in the + * first dimension. The length of this attr must be the same as the length of + * component_types. + * @return this Options instance. + * @param capacity Sets the capacity option. + * + * @param capacity The capacity of the barrier. The default capacity is MAX_INT32, + * which is the largest capacity of the underlying queue. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this barrier is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this barrier will be shared under the given name + * across multiple sessions. + * @return this Options instance. + */ + public fun barrier( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): Barrier = java.barrier( + componentTypes, + *listOfNotNull( + shapes?.let{ org.tensorflow.op.core.Barrier.shapes(it) }, + capacity?.let{ org.tensorflow.op.core.Barrier.capacity(it) }, + container?.let{ org.tensorflow.op.core.Barrier.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Barrier.sharedName(it) } + ).toTypedArray() + ) + + /** + * Closes the given barrier. + * This operation signals that no more new elements will be inserted in the + * given barrier. Subsequent InsertMany that try to introduce a new key will fail. + * Subsequent InsertMany operations that just add missing components to already + * existing elements will continue to succeed. Subsequent TakeMany operations will + * continue to succeed if sufficient completed elements remain in the barrier. + * Subsequent TakeMany operations that would block will fail immediately. + * + * @param handle The handle to a barrier. + * @param options carries optional attribute values + * @return a new instance of BarrierClose + * @see org.tensorflow.op.Ops.barrierClose + * @param cancelPendingEnqueues Sets the cancelPendingEnqueues option. + * + * @param cancelPendingEnqueues If true, all pending enqueue requests that are + * blocked on the barrier's queue will be canceled. InsertMany will fail, even + * if no new key is introduced. + * @return this Options instance. + */ + public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): + BarrierClose = java.barrierClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let{ org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } + ).toTypedArray() + ) + + /** + * Computes the number of incomplete elements in the given barrier. + * + * @param handle The handle to a barrier. + * @return a new instance of BarrierIncompleteSize + * @see org.tensorflow.op.Ops.barrierIncompleteSize + */ + public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = + java.barrierIncompleteSize( + handle + ) + + /** + * For each key, assigns the respective value to the specified component. + * If a key is not found in the barrier, this operation will create a new + * incomplete element. If a key is found in the barrier, and the element + * already has a value at component_index, this operation will fail with + * INVALID_ARGUMENT, and leave the barrier in an undefined state. + * + * @param handle The handle to a barrier. + * @param keys A one-dimensional tensor of keys, with length n. + * @param values An any-dimensional tensor of values, which are associated with the + * respective keys. The 0th dimension must have length n. + * @param componentIndex The component of the barrier elements that is being assigned. + * @return a new instance of BarrierInsertMany + * @see org.tensorflow.op.Ops.barrierInsertMany + */ + public fun barrierInsertMany( + handle: Operand, + keys: Operand, + values: Operand, + componentIndex: Long + ): BarrierInsertMany = java.barrierInsertMany( + handle, + keys, + values, + componentIndex + ) + + /** + * Computes the number of complete elements in the given barrier. + * + * @param handle The handle to a barrier. + * @return a new instance of BarrierReadySize + * @see org.tensorflow.op.Ops.barrierReadySize + */ + public fun barrierReadySize(handle: Operand): BarrierReadySize = + java.barrierReadySize( + handle + ) + + /** + * Takes the given number of completed elements from a barrier. + * This operation concatenates completed-element component tensors along + * the 0th dimension to make a single component tensor. + * + * Elements come out of the barrier when they are complete, and in the order + * in which they were placed into the barrier. The indices output provides + * information about the batch in which each element was originally inserted + * into the barrier. + * + * @param handle The handle to a barrier. + * @param numElements A single-element tensor containing the number of elements to + * take. + * @param componentTypes The type of each component in a value. + * @param options carries optional attribute values + * @return a new instance of BarrierTakeMany + * @see org.tensorflow.op.Ops.barrierTakeMany + * @param allowSmallBatch Sets the allowSmallBatch option. + * + * @param allowSmallBatch Allow to return less than num_elements items if barrier is + * already closed. + * @return this Options instance. + * @param waitForIncomplete Sets the waitForIncomplete option. + * + * @param waitForIncomplete the waitForIncomplete option + * @return this Options instance. + * @param timeoutMs Sets the timeoutMs option. + * + * @param timeoutMs If the queue is empty, this operation will block for up to + * timeout_ms milliseconds. + * Note: This option is not supported yet. + * @return this Options instance. + */ + public fun barrierTakeMany( + handle: Operand, + numElements: Operand, + componentTypes: List>, + allowSmallBatch: Boolean? = null, + waitForIncomplete: Boolean? = null, + timeoutMs: Long? = null + ): BarrierTakeMany = java.barrierTakeMany( + handle, + numElements, + componentTypes, + *listOfNotNull( + allowSmallBatch?.let{ org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, + waitForIncomplete?.let{ org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, + timeoutMs?.let{ org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } + ).toTypedArray() + ) + + /** + * Batches all input tensors nondeterministically. + * When many instances of this Op are being run concurrently with the same + * container/shared_name in the same device, some will output zero-shaped Tensors + * and others will output Tensors of size up to max_batch_size. + * + * All Tensors in in_tensors are batched together (so, for example, labels and + * features should be batched with a single instance of this operation. + * + * Each invocation of batch emits an `id` scalar which will be used to identify + * this particular invocation when doing unbatch or its gradient. + * + * Each op which emits a non-empty batch will also emit a non-empty batch_index + * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, + * start, and length of elements of each set of Tensors present in batched_tensors. + * + * Batched tensors are concatenated along the first dimension, and all tensors in + * in_tensors must have the first dimension of the same size. + * + * in_tensors: The tensors to be batched. + * num_batch_threads: Number of scheduling threads for processing batches of work. + * Determines the number of batches processed in parallel. + * max_batch_size: Batch sizes will never be bigger than this. + * batch_timeout_micros: Maximum number of microseconds to wait before outputting + * an incomplete batch. + * allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does + * nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + * batches up to one of those sizes. The entries must increase monotonically, and + * the final entry must equal max_batch_size. + * grad_timeout_micros: The timeout to use for the gradient. See Unbatch. + * batched_tensors: Either empty tensors or a batch of concatenated Tensors. + * batch_index: If out_tensors is non-empty, has information to invert it. + * container: Controls the scope of sharing of this batch. + * id: always contains a scalar with a unique ID for this invocation of Batch. + * shared_name: Concurrently running instances of batch in the same device with the + * same container and shared_name will batch their elements together. If left + * empty, the op name will be used as the shared name. + * T: the types of tensors to be batched. + * + * @param inTensors The inTensors value + * @param numBatchThreads The value of the numBatchThreads attribute + * @param maxBatchSize The value of the maxBatchSize attribute + * @param batchTimeoutMicros The value of the batchTimeoutMicros attribute + * @param gradTimeoutMicros The value of the gradTimeoutMicros attribute + * @param options carries optional attribute values + * @return a new instance of Batch + * @see org.tensorflow.op.Ops.batch + * @param maxEnqueuedBatches Sets the maxEnqueuedBatches option. + * + * @param maxEnqueuedBatches the maxEnqueuedBatches option + * @return this Options instance. + * @param allowedBatchSizes Sets the allowedBatchSizes option. + * + * @param allowedBatchSizes the allowedBatchSizes option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + * @param batchingQueue Sets the batchingQueue option. + * + * @param batchingQueue the batchingQueue option + * @return this Options instance. + */ + public fun batch( + inTensors: Iterable>, + numBatchThreads: Long, + maxBatchSize: Long, + batchTimeoutMicros: Long, + gradTimeoutMicros: Long, + maxEnqueuedBatches: Long? = null, + allowedBatchSizes: List? = null, + container: String? = null, + sharedName: String? = null, + batchingQueue: String? = null + ): Batch = java.batch( + inTensors, + numBatchThreads, + maxBatchSize, + batchTimeoutMicros, + gradTimeoutMicros, + *listOfNotNull( + maxEnqueuedBatches?.let{ org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, + allowedBatchSizes?.let{ org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, + container?.let{ org.tensorflow.op.core.Batch.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Batch.sharedName(it) }, + batchingQueue?.let{ org.tensorflow.op.core.Batch.batchingQueue(it) } + ).toTypedArray() + ) + + /** + * Batches all the inputs tensors to the computation done by the function. + * So, for example, in the following code + * ``` + * # This input will be captured. + * y = tf.placeholder_with_default(1.0, shape=[]) + * + * {@literal @ + * ```tf.Defun(tf.float32) + * def computation(a): + * return tf.matmul(a, a) + y + * + * b = gen_batch_ops.batch_function( + * f=computation + * in_tensors=[a], + * captured_tensors=computation.captured_inputs, + * Tout=[o.type for o in computation.definition.signature.output_arg], + * num_batch_threads=1, + * max_batch_size=10, + * batch_timeout_micros=100000, # 100ms + * allowed_batch_sizes=[3, 10], + * batching_queue="") + * } + * + * If more than one session.run call is simultaneously trying to compute `b` + * the values of `a` will be gathered, non-deterministically concatenated + * along the first axis, and only one thread will run the computation. + * + * Assumes that all arguments of the function are Tensors which will be batched + * along their first dimension. + * + * Arguments that are captured, are not batched. The session.run call which does + * the concatenation, will use the values of the captured tensors available to it. + * Therefore, typical uses of captured tensors should involve values which remain + * unchanged across session.run calls. Inference is a good example of this. + * + * SparseTensor is not supported. The return value of the decorated function + * must be a Tensor or a list/tuple of Tensors. + * + * @param inTensors The tensors to be batched. + * @param capturedTensors The tensors which are captured in the function, and don't need + * to be batched. + * @param f The value of the f attribute + * @param numBatchThreads Number of scheduling threads for processing batches of work. + * Determines the number of batches processed in parallel. + * @param maxBatchSize Batch sizes will never be bigger than this. + * @param batchTimeoutMicros Maximum number of microseconds to wait before outputting + * an incomplete batch. + * @param Tout the types of the output tensors. + * @param options carries optional attribute values + * @return a new instance of BatchFunction + * @see org.tensorflow.op.Ops.batchFunction + * @param maxEnqueuedBatches Sets the maxEnqueuedBatches option. + * + * @param maxEnqueuedBatches Maximum number of batches enqueued. Default: 10. + * @return this Options instance. + * @param allowedBatchSizes Sets the allowedBatchSizes option. + * + * @param allowedBatchSizes Optional list of allowed batch sizes. If left empty, does + * nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + * batches up to one of those sizes. The entries must increase monotonically. + * If enable_large_batch_splitting is false (i.e., large-input-split is not + * enabled) the final entry must equal max_batch_size. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container Controls the scope of sharing of this batch. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName Concurrently running instances of batch in the same device with the + * same container and shared_name will batch their elements together. If left + * empty, the op name will be used as the shared name. + * @return this Options instance. + * @param batchingQueue Sets the batchingQueue option. + * + * @param batchingQueue the batchingQueue option + * @return this Options instance. + * @param enableLargeBatchSplitting Sets the enableLargeBatchSplitting option. + * + * @param enableLargeBatchSplitting input with a large size (i.e., larger than the largest value + * of + * `allowed_batch_sizes`) will be splitted into multiple batches with batch size. + * @return this Options instance. + */ + public fun batchFunction( + inTensors: Iterable>, + capturedTensors: Iterable>, + f: ConcreteFunction, + numBatchThreads: Long, + maxBatchSize: Long, + batchTimeoutMicros: Long, + Tout: List>, + maxEnqueuedBatches: Long? = null, + allowedBatchSizes: List? = null, + container: String? = null, + sharedName: String? = null, + batchingQueue: String? = null, + enableLargeBatchSplitting: Boolean? = null + ): BatchFunction = java.batchFunction( + inTensors, + capturedTensors, + f, + numBatchThreads, + maxBatchSize, + batchTimeoutMicros, + Tout, + *listOfNotNull( + maxEnqueuedBatches?.let{ org.tensorflow.op.core.BatchFunction.maxEnqueuedBatches(it) }, + allowedBatchSizes?.let{ org.tensorflow.op.core.BatchFunction.allowedBatchSizes(it) }, + container?.let{ org.tensorflow.op.core.BatchFunction.container(it) }, + sharedName?.let{ org.tensorflow.op.core.BatchFunction.sharedName(it) }, + batchingQueue?.let{ org.tensorflow.op.core.BatchFunction.batchingQueue(it) }, + enableLargeBatchSplitting?.let{ + org.tensorflow.op.core.BatchFunction.enableLargeBatchSplitting(it) } + ).toTypedArray() + ) + + /** + * BatchToSpace for 4-D tensors of type T. + * This is a legacy version of the more general BatchToSpaceND. + * + * Rearranges (permutes) data from batch into blocks of spatial data, followed by + * cropping. This is the reverse transformation of SpaceToBatch. More specifically, + * this op outputs a copy of the input tensor where values from the `batch` + * dimension are moved in spatial blocks to the `height` and `width` dimensions, + * followed by cropping along the `height` and `width` dimensions. + * + * @param data type for `output` output + * @param input 4-D tensor with shape + * `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]`. + * Note that the batch size of the input tensor must be divisible by + * `block_size * block_size`. + * @param crops 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + * how many elements to crop from the intermediate result across the spatial + * dimensions as follows: + * ` + * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] + * + * ` + * @param blockSize The value of the blockSize attribute + * @param data type for `BatchToSpace` output and operands + * @return a new instance of BatchToSpace + * @see org.tensorflow.op.Ops.batchToSpace + */ + public fun batchToSpace( + input: Operand, + crops: Operand, + blockSize: Long + ): BatchToSpace = java.batchToSpace( + input, + crops, + blockSize + ) + + /** + * BatchToSpace for N-D tensors of type T. + * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + * `block_shape + [batch]`, interleaves these blocks back into the grid defined by + * the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as + * the input. The spatial dimensions of this intermediate result are then + * optionally cropped according to `crops` to produce the output. This is the + * reverse of SpaceToBatch. See below for a precise description. + * + * @param data type for `output` output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + + * remaining_shape`, + * where spatial_shape has M dimensions. + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param crops 2-D with shape `[M, 2]`, all values must be >= 0. + * `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input + * dimension `i + 1`, which corresponds to spatial dimension `i`. It is + * required that + * `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. + * + * This operation is equivalent to the following steps: + *
    + *
  1. + * + * Reshape `input` to `reshaped` of shape: + * [block_shape[0], ..., block_shape[M-1], + * batch / prod(block_shape), + * input_shape[1], ..., input_shape[N-1]] + *
  2. + *
  3. + * + * Permute dimensions of `reshaped` to produce `permuted` of shape + * [batch / prod(block_shape), + * + * input_shape[1], block_shape[0], + * ..., + * input_shape[M], block_shape[M-1], + * + * input_shape[M+1], ..., input_shape[N-1]] + *
  4. + *
  5. + * + * Reshape `permuted` to produce `reshaped_permuted` of shape + * [batch / prod(block_shape), + * + * input_shape[1] * block_shape[0], + * ..., + * input_shape[M] * block_shape[M-1], + * + * input_shape[M+1], + * ..., + * input_shape[N-1]] + *
  6. + *
  7. + * + * Crop the start and end of dimensions `[1, ..., M]` of + * `reshaped_permuted` according to `crops` to produce the output of shape: + * [batch / prod(block_shape), + * + * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], + * ..., + * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], + * + * input_shape[M+1], ..., input_shape[N-1]] + *
  8. + *
+ * + * Some examples: + * + * (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, + * and + * `crops = [[0, 0], [0, 0]]`: + * ` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * + * ` + * + * The output tensor has shape `[1, 2, 2, 1]` and value: + * ` + * x = [[[[1], [2]], [[3], [4]]]] + * + * ` + * + * (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, + * and + * `crops = [[0, 0], [0, 0]]`: + * ` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + * + * ` + * + * The output tensor has shape `[1, 2, 2, 3]` and value: + * ` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * + * ` + * + * (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, + * and + * `crops = [[0, 0], [0, 0]]`: + * ` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] + * + * ` + * + * The output tensor has shape `[1, 4, 4, 1]` and value: + * ` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * ` + * + * (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, + * and + * `crops = [[0, 0], [2, 0]]`: + * ` + * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + * [[[0], [2], [4]]], [[[0], [10], [12]]], + * [[[0], [5], [7]]], [[[0], [13], [15]]], + * [[[0], [6], [8]]], [[[0], [14], [16]]]] + * + * ` + * + * The output tensor has shape `[2, 2, 4, 1]` and value: + * ` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * ` + * @param data type for `BatchToSpaceND` output and operands + * @return a new instance of BatchToSpaceNd + * @see org.tensorflow.op.Ops.batchToSpaceNd + */ + public fun batchToSpaceNd( + input: Operand, + blockShape: Operand, + crops: Operand + ): BatchToSpaceNd = java.batchToSpaceNd( + input, + blockShape, + crops + ) + + /** + * Bitcasts a tensor from one type to another without copying data. + * Given a tensor `input`, this operation returns a tensor that has the same buffer + * data as `input` with datatype `type`. + * + * If the input datatype `T` is larger than the output datatype `type` then the + * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + * + * If `T` is smaller than `type`, the operator requires that the rightmost + * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + * [..., sizeof(`type`)/sizeof(`T`)] to [...]. + * + * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() + * gives module error. + * For example, + * + * Example 1: + * ``` + * + * a = [1., 2., 3.] + * equality_bitcast = tf.bitcast(a, tf.complex128) + * Traceback (most recent call last): + * ... + * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + * equality_cast = tf.cast(a, tf.complex128) + * print(equality_cast) + * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + * ``` + * + * Example 2: + * ``` + * + * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + * + * ``` + * + * Example 3: + * ``` + * + * x = [1., 2., 3.] + * y = [0., 2., 3.] + * equality= tf.equal(x,y) + * equality_cast = tf.cast(equality,tf.float32) + * equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + * print(equality) + * tf.Tensor([False True True], shape=(3,), dtype=bool) + * print(equality_cast) + * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + * print(equality_bitcast) + * tf.Tensor( + * [[ 0 0 0 0] + * [ 0 0 128 63] + * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + * ``` + * + * _NOTE_: Bitcast is implemented as a low-level cast, so machines with different + * endian orderings will give different results. + * + * @param data type for `output` output + * @param input The input value + * @param type The value of the type attribute + * @param data type for `Bitcast` output and operands + * @return a new instance of Bitcast + * @see org.tensorflow.op.Ops.bitcast + */ + public fun bitcast(input: Operand, type: Class): Bitcast = + java.bitcast( + input, + type + ) + + /** + * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a + * `true` in the mask. + * + * + * Numpy equivalent is `tensor[mask]`. + * + * + * In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match + * the first K dimensions of `tensor`'s shape. We then have: + * `booleanMask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]` + * where `(i1,...,iK)` is the ith `true` entry of `mask` (row-major order). + * + * + * The `axis` could be used with `mask` to indicate the axis to mask from (it's 0 by default). + * In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match + * the first `axis + dim(mask)` dimensions of `tensor`'s shape. + * + * @param tensor The tensor to mask. + * @param mask The mask to apply. + * @param options carries optional attributes values + * @return The masked tensor. + * @see org.tensorflow.op.Ops.booleanMask + * @param axis + * + * @param axis (Optional) The axis to mask from, or 0 if not set. + */ + public fun booleanMask( + tensor: Operand, + mask: Operand, + axis: Int? = null + ): Operand = java.booleanMask( + tensor, + mask, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.BooleanMask.axis(it) } + ).toTypedArray() + ) + + /** + * Updates a tensor at the masked values, and returns the updated tensor. Does not mutate the + * input tensors. `updates` will be broadcasted by default + * + * + * Numpy equivalent is `tensor[mask] = updates`. + * + * + * In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match the first K + * dimensions of + * `tensor`'s shape. We then have: ``` + * booleanMask(tensor, mask)[i, j1,...,jd] = + * tensor[i1,...,iK,j1,...,jd] + * ``` where `(i1,...,iK)` is the ith `true` entry of `mask` (row-major + * order). + * + * + * The `axis` could be used with `mask` to indicate the axis to mask from (it's 0 by default). + * In that + * case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match the first ``` + * axis + + * dim(mask) + * ``` dimensions of `tensor`'s shape. + * + * + * The shape of `updates` should be `[n, t_1, t_2, ...]` where `n` is the number of + * true values in + * `mask` and `t_i` is the `i`th dimension of `tensor` after `axis` and `mask`. + * `updates` will be broadcasted to this shape by default, which can be disabled using + * `options`. + * + * @param tensor The tensor to mask. + * @param mask The mask to apply. + * @param updates the new values + * @param options carries optional attributes values + * @return The masked tensor. + * @see org.tensorflow.op.Ops.booleanMaskUpdate + * @param axis + * + * @param axis (Optional) The axis to mask from, or 0 if not set. + * @param broadcast + * + * @param broadcast (Optional) Whether to try broadcasting update. True by default. + */ + public fun booleanMaskUpdate( + tensor: Operand, + mask: Operand, + updates: Operand, + axis: Int? = null, + broadcast: Boolean? = null + ): Operand = java.booleanMaskUpdate( + tensor, + mask, + updates, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.BooleanMaskUpdate.axis(it) }, + broadcast?.let{ org.tensorflow.op.core.BooleanMaskUpdate.broadcast(it) } + ).toTypedArray() + ) + + /** + * Return the shape of s0 op s1 with broadcast. + * Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the + * broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. + * + * @param data type for `r0` output + * @param s0 The s0 value + * @param s1 The s1 value + * @param data type for `BroadcastArgs` output and operands + * @return a new instance of BroadcastDynamicShape + * @see org.tensorflow.op.Ops.broadcastDynamicShape + */ + public fun broadcastDynamicShape(s0: Operand, s1: Operand): + BroadcastDynamicShape = java.broadcastDynamicShape( + s0, + s1 + ) + + /** + * Broadcast an array for a compatible shape. + * Broadcasting is the process of making arrays to have compatible shapes + * for arithmetic operations. Two shapes are compatible if for each + * dimension pair they are either equal or one of them is one. When trying + * to broadcast a Tensor to a shape, it starts with the trailing dimensions, + * and works its way forward. + * + * For example, + * ``` + * + * x = tf.constant([1, 2, 3]) + * y = tf.broadcast_to(x, [3, 3]) + * print(y) + * tf.Tensor( + * [[1 2 3] + * [1 2 3] + * [1 2 3]], shape=(3, 3), dtype=int32) + * ``` + * + * In the above example, the input Tensor with the shape of `[1, 3]` + * is broadcasted to output Tensor with shape of `[3, 3]`. + * + * When doing broadcasted operations such as multiplying a tensor + * by a scalar, broadcasting (usually) confers some time or space + * benefit, as the broadcasted tensor is never materialized. + * + * However, `broadcast_to` does not carry with it any such benefits. + * The newly-created tensor takes the full memory of the broadcasted + * shape. (In a graph context, `broadcast_to` might be fused to + * subsequent operation and then be optimized away, however.) + * + * @param data type for `output` output + * @param input A Tensor to broadcast. + * @param shape An 1-D `int` Tensor. The shape of the desired output. + * @param data type for `BroadcastTo` output and operands + * @return a new instance of BroadcastTo + * @see org.tensorflow.op.Ops.broadcastTo + */ + public fun broadcastTo(input: Operand, shape: Operand): + BroadcastTo = java.broadcastTo( + input, + shape + ) + + /** + * Bucketizes 'input' based on 'boundaries'. + * For example, if the inputs are + * boundaries = [0, 10, 100] + * input = [[-5, 10000] + * [150, 10] + * [5, 100]] + * + * then the output will be + * output = [[0, 3] + * [3, 2] + * [1, 3]] + * + * @param input Any shape of Tensor contains with int or float type. + * @param boundaries A sorted list of floats gives the boundary of the buckets. + * @return a new instance of Bucketize + * @see org.tensorflow.op.Ops.bucketize + */ + public fun bucketize(input: Operand, boundaries: List): Bucketize = + java.bucketize( + input, + boundaries + ) + + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. Only works for functions with a single input and output. + * + * @param argument the argument to the call + * @return the output of the function + * @see ConcreteFunction.call + * @see org.tensorflow.op.Ops.call + */ + public fun call(function: ConcreteFunction, argument: Operand<*>): Operand<*> = java.call( + function, + argument + ) + + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. The inputs and outputs are keyed by the names set in the `Signature`. + * + * @param arguments the arguments to the call + * @return the outputs of the function + * @see ConcreteFunction.call + * @see org.tensorflow.op.Ops.call + */ + public fun call(function: ConcreteFunction, arguments: Map>): Map> = java.call( + function, + arguments + ) + + /** + * An n-way switch statement which calls a single branch function. + * ``` + * An n-way switch statement, implementing the following: + * ``` + * switch (branch_index) { + * case 0: + * output = branches[0](input); + * break; + * case 1: + * output = branches[1](input); + * break; + * ... + * case [[nbranches-1]]: + * default: + * output = branches[nbranches-1](input); + * break; + * + * ``` + * ``` + * } + * + * + * Selects between [StatefulCase] and [StatelessCase] based on the statefulness of the function + * arguments. + * + * @param branchIndex The branch selector, an int32 Tensor. + * @param input A list of input tensors passed to the branch function. + * @param Tout A list of output types. + * @param branches ` + * A list of functions each of which takes 'inputs' and returns a list of + * tensors, whose types are the same as what every other branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of Case + * @see org.tensorflow.op.Ops.caseOp + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun caseOp( + branchIndex: Operand, + input: Iterable>, + Tout: List>, + branches: List, + outputShapes: List? = null + ): Case = java.caseOp( + branchIndex, + input, + Tout, + branches, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.Case.outputShapes(it) } + ).toTypedArray() + ) + + /** + * Clips tensor values to a specified min and max. + * Given a tensor `t`, this operation returns a tensor of the same type and + * shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. + * Any values less than `clip_value_min` are set to `clip_value_min`. Any values + * greater than `clip_value_max` are set to `clip_value_max`. + * + * @param data type for `output` output + * @param t A `Tensor`. + * @param clipValueMin A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + * as `t`. The minimum value to clip by. + * @param clipValueMax A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + * as `t`. The maximum value to clip by. + * @param data type for `ClipByValue` output and operands + * @return a new instance of ClipByValue + * @see org.tensorflow.op.Ops.clipByValue + */ + public fun clipByValue( + t: Operand, + clipValueMin: Operand, + clipValueMax: Operand + ): ClipByValue = java.clipByValue( + t, + clipValueMin, + clipValueMax + ) + + /** + * Concatenates tensors along one dimension. + * + * @param data type for `output` output + * @param values List of `N` Tensors to concatenate. Their ranks and types must match, + * and their sizes must match in all dimensions except `concat_dim`. + * @param axis 0-D. The dimension along which to concatenate. Must be in the + * range [-rank(values), rank(values)). + * @param data type for `ConcatV2` output and operands + * @return a new instance of Concat + * @see org.tensorflow.op.Ops.concat + */ + public fun concat(values: Iterable>, axis: Operand): + Concat = java.concat( + values, + axis + ) + + /** + * Creates a constant containing a single `int` element. + * + * @param data The value to put into the new constant. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Int): Constant = java.constant( + data + ) + + /** + * Creates a rank-3 constant of `double` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + /** + * Creates a rank-5 constant of `byte` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + /** + * Creates a constant of `String` elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. + * + * @param data an n-dimensional array of `String` elements. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: NdArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-4 constant of `int` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + /** + * Creates a constant containing a single `byte` element. + * + * @param data The value to put into the new constant. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Byte): Constant = java.constant( + data + ) + + /** + * Creates a rank-2 constant of `long` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + /** + * Creates a rank-6 constant of `float` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-6 constant of `boolean` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-4 constant of `boolean` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-3 constant of `float` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + /** + * Creates a rank-5 constant of `float` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-5 constant of `long` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-1 constant of `int` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: IntArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-2 constant of `float` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + /** + * Creates a rank-2 constant of `boolean` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + /** + * Creates a constant containing a single `double` element. + * + * @param data The value to put into the new constant. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Double): Constant = java.constant( + data + ) + + /** + * Creates a constant containing a single `boolean` element. + * + * @param data The value to put into the new constant. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Boolean): Constant = java.constant( + data + ) + + /** + * Creates a constant containing a single `long` element. + * + * @param data The value to put into the new constant. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Long): Constant = java.constant( + data + ) + + /** + * Creates a `String` constant using the default, UTF-8 encoding. + * + * @param data The string to put into the new constant. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: String): Constant = java.constant( + data + ) + + /** + * Creates a constant of `boolean` elements that is a copy of a given n-dimensional array. + * + * @param data an n-dimensional array of `boolean` elements. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: BooleanNdArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-1 constant of `double` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: DoubleArray): Constant = java.constant( + data + ) + + /** + * Creates a constant of `long` elements that is a copy of a given n-dimensional array. + * + * @param data an n-dimensional array of `long` elements. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: LongNdArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-1 constant of `float` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: FloatArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-3 constant of `long` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + /** + * Creates a rank-3 constant of `boolean` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + /** + * Creates a rank-1 constant of `byte` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: ByteArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-3 constant of `int` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + /** + * Creates a constant of `int` elements that is a copy of a given n-dimensional array. + * + * @param data an n-dimensional array of `int` elements. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: IntNdArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-1 constant of `long` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: LongArray): Constant = java.constant( + data + ) + + /** + * Creates a constant of `float` elements that is a copy of a given n-dimensional array. + * + * @param data an n-dimensional array of `float` elements. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: FloatNdArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-5 constant of `int` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-5 constant of `double` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-5 constant of `boolean` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-6 constant of `int` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + /** + * Creates a constant of `double` elements that is a copy of a given n-dimensional array. + * + * @param data an n-dimensional array of `double` elements. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: DoubleNdArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-6 constant of `double` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>>): Constant + = java.constant( + data + ) + + /** + * Creates a rank-6 constant of `long` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-2 constant of `int` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + /** + * Creates a rank-1 constant of `boolean` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: BooleanArray): Constant = java.constant( + data + ) + + /** + * Creates a constant containing a single `float` element. + * + * @param data The value to put into the new constant. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Float): Constant = java.constant( + data + ) + + /** + * Creates a rank-4 constant of `byte` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + /** + * Creates a rank-4 constant of `float` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>): Constant = + java.constant( + data + ) + + /** + * Creates a constant of `byte` elements that is a copy of a given n-dimensional array. + * + * @param data an n-dimensional array of `byte` elements. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: ByteNdArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-6 constant of `byte` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-4 constant of `long` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + /** + * Creates a rank-2 constant of `byte` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + /** + * Creates a rank-2 constant of `double` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + /** + * Creates a rank-3 constant of `byte` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + /** + * Creates a rank-4 constant of `double` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>): Constant = + java.constant( + data + ) + + /** + * Creates a rank-1 constant of `long` elements representing the size of each dimensions of + * the given shape. + * + * @param shape a shape + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(shape: Shape): Constant = java.constant( + shape + ) + + /** + * Creates a constant of `String` elements that is a copy of a given n-dimensional array, + * using the given encoding. + * + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of `String` elements. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(charset: Charset, `data`: NdArray): Constant = + java.constant( + charset, + data + ) + + /** + * Creates a constant of `String` elements, using the given charset. + * + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the `String` constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(charset: Charset, `data`: Array): Constant = + java.constant( + charset, + data + ) + + /** + * Creates a `String` constant using a specified encoding. + * + * @param charset The encoding from String to bytes. + * @param data The string to put into the new constant. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(charset: Charset, `data`: String): Constant = java.constant( + charset, + data + ) + + /** + * Create a [TBool] constant with data from the given buffer. + * + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return an boolean constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + shape, + data + ) + + /** + * Create a [TString] constant with data from the given buffer, using the default UTF-8 + * encoding. + * + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a string constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(shape: Shape, `data`: DataBuffer): Constant = + java.constant( + shape, + data + ) + + /** + * Create a [TUint8] constant with data from the given buffer. + * + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a byte constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( + shape, + data + ) + + /** + * Create a [TInt32] constant with data from the given buffer. + * + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return an integer constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( + shape, + data + ) + + /** + * Create a [TInt64] constant with data from the given buffer. + * + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a long constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( + shape, + data + ) + + /** + * Create a [TFloat64] constant with data from the given buffer. + * + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a double constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = + java.constant( + shape, + data + ) + + /** + * Create a [TFloat32] constant with data from the given buffer. + * + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a float constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( + shape, + data + ) + + /** + * Creates a scalar of `type`, with the value of `number`. `number` may be + * truncated if it does not fit in the target type. + * + * @param type the type of tensor to create. Must be concrete (i.e. not + * [org.tensorflow.types.family.TFloating]) + * @param number the value of the tensor + * @return a constant of the passed type + * @throws IllegalArgumentException if the type is abstract (i.e. + * [org.tensorflow.types.family.TFloating]) or unknown. + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(type: Class, number: Number): Constant = + java.constant( + type, + number + ) + + /** + * Create a [TString] constant with data from the given buffer, using the given encoding. + * + * @param charset charset used to encode/decode string bytes. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a string constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant( + charset: Charset, + shape: Shape, + `data`: DataBuffer + ): Constant = java.constant( + charset, + shape, + data + ) + + /** + * Create a constant with data from the given buffer. + * + * @param the tensor type + * @param type the tensor type class + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a constant of type `type` + * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the + * buffer + * @see org.tensorflow.op.Ops.constant + */ + public fun constant( + type: Class, + shape: Shape, + `data`: ByteDataBuffer + ): Constant = java.constant( + type, + shape, + data + ) + + /** + * Create a constant by making an immutable copy of `tensor`. `tensor` may be closed + * afterwards without issue. + * + * + * Note: this endpoint cannot be simply called `constant` since it will conflict with + * other endpoints accepting an NdArray in parameter {e.g. [FloatNdArray)][.tensorOf]}. + * + * @param tensor a Tensor holding the constant value + * @return a constant of the same data type as `tensor` + * @see org.tensorflow.op.Ops.constantOf + */ + public fun constantOf(tensor: T): Constant = java.constantOf( + tensor + ) + + /** + * Creates a scalar of the same type as `toMatch`, with the value of `number`. `number` may be + * truncated if it does not fit in the target type. + * + * @param toMatch the operand providing the target type + * @param number the value of the tensor + * @return a constant with the same type as `toMatch` + * @throws IllegalArgumentException if the type is unknown (which should be impossible). + * @see Ops.constant + * @see org.tensorflow.op.Ops.constantOfSameType + */ + public fun constantOfSameType(toMatch: Operand, number: Number): Constant = + java.constantOfSameType( + toMatch, + number + ) + + /** + * This op consumes a lock created by `MutexLock`. + * This op exists to consume a tensor created by `MutexLock` (other than + * direct control dependencies). It should be the only that consumes the tensor, + * and will raise an error if it is not. Its only purpose is to keep the + * mutex lock tensor alive until it is consumed by this op. + * + * **NOTE**: This operation must run on the same device as its input. This may + * be enforced via the `colocate_with` mechanism. + * + * @param mutexLock A tensor returned by `MutexLock`. + * @return a new instance of ConsumeMutexLock + * @see org.tensorflow.op.Ops.consumeMutexLock + */ + public fun consumeMutexLock(mutexLock: Operand): ConsumeMutexLock = + java.consumeMutexLock( + mutexLock + ) + + /** + * Does nothing. Serves as a control trigger for scheduling. + * Only useful as a placeholder for control edges. + * + * @return a new instance of ControlTrigger + * @see org.tensorflow.op.Ops.controlTrigger + */ + public fun controlTrigger(): ControlTrigger = java.controlTrigger( + + ) + + /** + * Increments 'ref' until it reaches 'limit'. + * + * @param data type for `output` output + * @param ref Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @param data type for `CountUpTo` output and operands + * @return a new instance of CountUpTo + * @see org.tensorflow.op.Ops.countUpTo + */ + public fun countUpTo(ref: Operand, limit: Long): CountUpTo = + java.countUpTo( + ref, + limit + ) + + /** + * The op extracts fields from a serialized protocol buffers message into tensors. + * The `decode_proto` op extracts fields from a serialized protocol buffers + * message into tensors. The fields in `field_names` are decoded and converted + * to the corresponding `output_types` if possible. + * + * A `message_type` name must be provided to give context for the field names. + * The actual message descriptor can be looked up either in the linked-in + * descriptor pool or a filename provided by the caller using the + * `descriptor_source` attribute. + * + * Each output tensor is a dense tensor. This means that it is padded to hold + * the largest number of repeated elements seen in the input minibatch. (The + * shape is also padded by one to prevent zero-sized dimensions). The actual + * repeat counts for each example in the minibatch can be found in the `sizes` + * output. In many cases the output of `decode_proto` is fed immediately into + * tf.squeeze if missing values are not a concern. When using tf.squeeze, always + * pass the squeeze dimension explicitly to avoid surprises. + * + * For the most part, the mapping between Proto field types and TensorFlow dtypes + * is straightforward. However, there are a few special cases: + *
    + *
  • + * + * A proto field that contains a submessage or group can only be converted + * to `DT_STRING` (the serialized submessage). This is to reduce the complexity + * of the API. The resulting string can be used as input to another instance of + * the decode_proto op. + *
  • + *
  • + * + * TensorFlow lacks support for unsigned integers. The ops represent uint64 + * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + * way). Unsigned int32 values can be represented exactly by specifying type + * `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + * the `output_types` attribute. + *
  • + *
+ * + * Both binary and text proto serializations are supported, and can be + * chosen using the `format` attribute. + * + * The `descriptor_source` attribute selects the source of protocol + * descriptors to consult when looking up `message_type`. This may be: + *
    + *
  • + * + * An empty string or "local://", in which case protocol descriptors are + * created for C++ (not Python) proto definitions linked to the binary. + *
  • + *
  • + * + * A file, in which case protocol descriptors are created from the file, + * which is expected to contain a `FileDescriptorSet` serialized as a string. + * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + * and `--include_imports` options to the protocol compiler `protoc`. + *
  • + *
  • + * + * A "bytes://", in which protocol descriptors are created from ``, + * which is expected to be a `FileDescriptorSet` serialized as a string. + *
  • + *
+ * + * @param bytes Tensor of serialized protos with shape `batch_shape`. + * @param messageType Name of the proto message type to decode. + * @param fieldNames List of strings containing proto field names. An extension field can be + * decoded + * by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME. + * @param outputTypes List of TF types to use for the respective field in field_names. + * @param options carries optional attribute values + * @return a new instance of DecodeProto + * @see org.tensorflow.op.Ops.decodeProto + * @param descriptorSource Sets the descriptorSource option. + * + * @param descriptorSource Either the special value `local://` or a path to a file containing + * a serialized `FileDescriptorSet`. + * @return this Options instance. + * @param messageFormat Sets the messageFormat option. + * + * @param messageFormat Either `binary` or `text`. + * @return this Options instance. + * @param sanitize Sets the sanitize option. + * + * @param sanitize Whether to sanitize the result or not. + * @return this Options instance. + */ + public fun decodeProto( + bytes: Operand, + messageType: String, + fieldNames: List, + outputTypes: List>, + descriptorSource: String? = null, + messageFormat: String? = null, + sanitize: Boolean? = null + ): DecodeProto = java.decodeProto( + bytes, + messageType, + fieldNames, + outputTypes, + *listOfNotNull( + descriptorSource?.let{ org.tensorflow.op.core.DecodeProto.descriptorSource(it) }, + messageFormat?.let{ org.tensorflow.op.core.DecodeProto.messageFormat(it) }, + sanitize?.let{ org.tensorflow.op.core.DecodeProto.sanitize(it) } + ).toTypedArray() + ) + + /** + * Makes a copy of `x`. + * + * @param data type for `y` output + * @param x The source tensor of type `T`. + * @param data type for `DeepCopy` output and operands + * @return a new instance of DeepCopy + * @see org.tensorflow.op.Ops.deepCopy + */ + public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( + x + ) + + /** + * Delete the tensor specified by its handle in the session. + * + * @param handle The handle for a tensor stored in the session state. + * @return a new instance of DeleteSessionTensor + * @see org.tensorflow.op.Ops.deleteSessionTensor + */ + public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = + java.deleteSessionTensor( + handle + ) + + /** + * Deletes the resource specified by the handle. + * All subsequent operations using the resource will result in a NotFound + * error status. + * + * @param resource handle to the resource to delete. + * @param options carries optional attribute values + * @return a new instance of DestroyResourceOp + * @see org.tensorflow.op.Ops.destroyResourceOp + * @param ignoreLookupError Sets the ignoreLookupError option. + * + * @param ignoreLookupError whether to ignore the error when the resource + * doesn't exist. + * @return this Options instance. + */ + public fun destroyResourceOp(resource: Operand, ignoreLookupError: Boolean? = null): + DestroyResourceOp = java.destroyResourceOp( + resource, + *listOfNotNull( + ignoreLookupError?.let{ org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } + ).toTypedArray() + ) + + /** + * Destroys the temporary variable and returns its final value. + * Sets output to the value of the Tensor pointed to by 'ref', then destroys + * the temporary variable called 'var_name'. + * All other uses of 'ref' _must_ have executed before this op. + * This is typically achieved by chaining the ref through each assign op, or by + * using control dependencies. + * + * Outputs the final value of the tensor pointed to by 'ref'. + * + * @param data type for `value` output + * @param ref A reference to the temporary variable tensor. + * @param varName Name of the temporary variable, usually the name of the matching + * 'TemporaryVariable' op. + * @param data type for `DestroyTemporaryVariable` output and operands + * @return a new instance of DestroyTemporaryVariable + * @see org.tensorflow.op.Ops.destroyTemporaryVariable + */ + public fun destroyTemporaryVariable(ref: Operand, varName: String): + DestroyTemporaryVariable = java.destroyTemporaryVariable( + ref, + varName + ) + + /** + * Partitions `data` into `num_partitions` tensors using indices from `partitions`. + * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` + * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] + * = i` + * are placed in `outputs[i]` in lexicographic order of `js`, and the first + * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. + * In detail, + * ``` + * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] + * + * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) + * + * ``` + * + * `data.shape` must start with `partitions.shape`. + * + * For example: + * ``` + * # Scalar partitions. + * partitions = 1 + * num_partitions = 2 + * data = [10, 20] + * outputs[0] = [] # Empty with shape [0, 2] + * outputs[1] = [[10, 20]] + * + * # Vector partitions. + * partitions = [0, 0, 1, 1, 0] + * num_partitions = 2 + * data = [10, 20, 30, 40, 50] + * outputs[0] = [10, 20, 50] + * outputs[1] = [30, 40] + * + * ``` + * + * See `dynamic_stitch` for an example on how to merge partitions back. + *
+ * + *
+ * + * @param data type for `outputs` output + * @param data The data value + * @param partitions Any shape. Indices in the range `[0, num_partitions)`. + * @param numPartitions The number of partitions to output. + * @param data type for `DynamicPartition` output and operands + * @return a new instance of DynamicPartition + * @see org.tensorflow.op.Ops.dynamicPartition + */ + public fun dynamicPartition( + `data`: Operand, + partitions: Operand, + numPartitions: Long + ): DynamicPartition = java.dynamicPartition( + data, + partitions, + numPartitions + ) + + /** + * Interleave the values from the `data` tensors into a single tensor. + * Builds a merged tensor such that + * ``` + * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + * + * ``` + * + * For example, if each `indices[m]` is scalar or vector, we have + * ``` + * # Scalar indices: + * merged[indices[m], ...] = data[m][...] + * + * # Vector indices: + * merged[indices[m][i], ...] = data[m][i, ...] + * + * ``` + * + * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is + * ``` + * merged.shape = [max(indices)] + constant + * + * ``` + * + * Values are merged in order, so if an index appears in both `indices[m][i]` and + * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the + * merged result. If you do not need this guarantee, ParallelDynamicStitch might + * perform better on some devices. + * + * For example: + * ``` + * indices[0] = 6 + * indices[1] = [4, 1] + * indices[2] = [[5, 2], [0, 3]] + * data[0] = [61, 62] + * data[1] = [[41, 42], [11, 12]] + * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + * [51, 52], [61, 62]] + * + * ``` + * + * This method can be used to merge partitions created by `dynamic_partition` + * as illustrated on the following example: + * ``` + * # Apply function (increments x_i) on elements for which a certain condition + * # apply (x_i != -1 in this example). + * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + * condition_mask=tf.not_equal(x,tf.constant(-1.)) + * partitioned_data = tf.dynamic_partition( + * x, tf.cast(condition_mask, tf.int32) , 2) + * partitioned_data[1] = partitioned_data[1] + 1.0 + * condition_indices = tf.dynamic_partition( + * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + * x = tf.dynamic_stitch(condition_indices, partitioned_data) + * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + * # unchanged. + * + * ``` + *
+ * + *
+ * + * @param data type for `merged` output + * @param indices The indices value + * @param data The data value + * @param data type for `DynamicStitch` output and operands + * @return a new instance of DynamicStitch + * @see org.tensorflow.op.Ops.dynamicStitch + */ + public fun dynamicStitch(indices: Iterable>, + `data`: Iterable>): DynamicStitch = java.dynamicStitch( + indices, + data + ) + + /** + * Computes the (possibly normalized) Levenshtein Edit Distance. + * The inputs are variable-length sequences provided by SparseTensors + * (hypothesis_indices, hypothesis_values, hypothesis_shape) + * and + * (truth_indices, truth_values, truth_shape). + * + * The inputs are: + * + * @param hypothesisIndices The indices of the hypothesis list SparseTensor. + * This is an N x R int64 matrix. + * @param hypothesisValues The values of the hypothesis list SparseTensor. + * This is an N-length vector. + * @param hypothesisShape The shape of the hypothesis list SparseTensor. + * This is an R-length vector. + * @param truthIndices The indices of the truth list SparseTensor. + * This is an M x R int64 matrix. + * @param truthValues The values of the truth list SparseTensor. + * This is an M-length vector. + * @param truthShape truth indices, vector. + * @param options carries optional attribute values + * @param data type for `EditDistance` output and operands + * @return a new instance of EditDistance + * @see org.tensorflow.op.Ops.editDistance + * @param normalize Sets the normalize option. + * + * @param normalize boolean (if true, edit distances are normalized by length of truth). + * + * The output is: + * @return this Options instance. + */ + public fun editDistance( + hypothesisIndices: Operand, + hypothesisValues: Operand, + hypothesisShape: Operand, + truthIndices: Operand, + truthValues: Operand, + truthShape: Operand, + normalize: Boolean? = null + ): EditDistance = java.editDistance( + hypothesisIndices, + hypothesisValues, + hypothesisShape, + truthIndices, + truthValues, + truthShape, + *listOfNotNull( + normalize?.let{ org.tensorflow.op.core.EditDistance.normalize(it) } + ).toTypedArray() + ) + + /** + * Creates a tensor with the given shape. + * + * This operation creates a tensor of `shape` and `dtype`. + * + * @param data type for `output` output + * @param shape 1-D. Represents the shape of the output tensor. + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `Empty` output and operands + * @return a new instance of Empty + * @see org.tensorflow.op.Ops.empty + * @param init Sets the init option. + * + * @param init If True, initialize the returned tensor with the default value of dtype. + * Otherwise, the implementation is free not to initializethe tensor's content. + * @return this Options instance. + */ + public fun empty( + shape: Operand, + dtype: Class, + `init`: Boolean? = null + ): Empty = java.empty( + shape, + dtype, + *listOfNotNull( + init?.let{ org.tensorflow.op.core.Empty.init(it) } + ).toTypedArray() + ) + + /** + * Creates and returns an empty tensor list. + * All list elements must be tensors of dtype element_dtype and shape compatible + * with element_shape. + * + * handle: an empty tensor list. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. + * + * @param elementShape The elementShape value + * @param maxNumElements The maxNumElements value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `EmptyTensorList` output and operands + * @return a new instance of EmptyTensorList + * @see org.tensorflow.op.Ops.emptyTensorList + */ + public fun emptyTensorList( + elementShape: Operand, + maxNumElements: Operand, + elementDtype: Class + ): EmptyTensorList = java.emptyTensorList( + elementShape, + maxNumElements, + elementDtype + ) + + /** + * Creates and returns an empty tensor map. + * handle: an empty tensor map + * + * @return a new instance of EmptyTensorMap + * @see org.tensorflow.op.Ops.emptyTensorMap + */ + public fun emptyTensorMap(): EmptyTensorMap = java.emptyTensorMap( + + ) + + /** + * The op serializes protobuf messages provided in the input tensors. + * The types of the tensors in `values` must match the schema for the fields + * specified in `field_names`. All the tensors in `values` must have a common + * shape prefix, _batch_shape_. + * + * The `sizes` tensor specifies repeat counts for each field. The repeat count + * (last dimension) of a each tensor in `values` must be greater than or equal + * to corresponding repeat count in `sizes`. + * + * A `message_type` name must be provided to give context for the field names. + * The actual message descriptor can be looked up either in the linked-in + * descriptor pool or a filename provided by the caller using the + * `descriptor_source` attribute. + * + * For the most part, the mapping between Proto field types and TensorFlow dtypes + * is straightforward. However, there are a few special cases: + *
    + *
  • + * + * A proto field that contains a submessage or group can only be converted + * to `DT_STRING` (the serialized submessage). This is to reduce the complexity + * of the API. The resulting string can be used as input to another instance of + * the decode_proto op. + *
  • + *
  • + * + * TensorFlow lacks support for unsigned integers. The ops represent uint64 + * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + * way). Unsigned int32 values can be represented exactly by specifying type + * `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + * the `output_types` attribute. + *
  • + *
+ * + * The `descriptor_source` attribute selects the source of protocol + * descriptors to consult when looking up `message_type`. This may be: + *
    + *
  • + * + * An empty string or "local://", in which case protocol descriptors are + * created for C++ (not Python) proto definitions linked to the binary. + *
  • + *
  • + * + * A file, in which case protocol descriptors are created from the file, + * which is expected to contain a `FileDescriptorSet` serialized as a string. + * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + * and `--include_imports` options to the protocol compiler `protoc`. + *
  • + *
  • + * + * A "bytes://", in which protocol descriptors are created from ``, + * which is expected to be a `FileDescriptorSet` serialized as a string. + *
  • + *
+ * + * @param sizes Tensor of int32 with shape `[batch_shape, len(field_names)]`. + * @param values List of tensors containing values for the corresponding field. + * @param fieldNames List of strings containing proto field names. + * @param messageType Name of the proto message type to decode. + * @param options carries optional attribute values + * @return a new instance of EncodeProto + * @see org.tensorflow.op.Ops.encodeProto + * @param descriptorSource Sets the descriptorSource option. + * + * @param descriptorSource the descriptorSource option + * @return this Options instance. + */ + public fun encodeProto( + sizes: Operand, + values: Iterable>, + fieldNames: List, + messageType: String, + descriptorSource: String? = null + ): EncodeProto = java.encodeProto( + sizes, + values, + fieldNames, + messageType, + *listOfNotNull( + descriptorSource?.let{ org.tensorflow.op.core.EncodeProto.descriptorSource(it) } + ).toTypedArray() + ) + + /** + * Ensures that the tensor's shape matches the expected shape. + * Raises an error if the input tensor's shape does not match the specified shape. + * Returns the input tensor otherwise. + * + * @param data type for `output` output + * @param input A tensor, whose shape is to be validated. + * @param shape The expected (possibly partially specified) shape of the input tensor. + * @param data type for `EnsureShape` output and operands + * @return a new instance of EnsureShape + * @see org.tensorflow.op.Ops.ensureShape + */ + public fun ensureShape(input: Operand, shape: Shape): EnsureShape = + java.ensureShape( + input, + shape + ) + + /** + * Inserts a dimension of 1 into a tensor's shape. + * Given a tensor `input`, this operation inserts a dimension of 1 at the + * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at + * zero; if you specify a negative number for `axis` it is counted backward from + * the end. + * + * This operation is useful if you want to add a batch dimension to a single + * element. For example, if you have a single image of shape `[height, width, + * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + * which will make the shape `[1, height, width, channels]`. + * + * Other examples: + * ``` + * # 't' is a tensor of shape [2] + * shape(expand_dims(t, 0)) ==> [1, 2] + * shape(expand_dims(t, 1)) ==> [2, 1] + * shape(expand_dims(t, -1)) ==> [2, 1] + * + * # 't2' is a tensor of shape [2, 3, 5] + * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + * + * ``` + * + * This operation requires that: + * + * `-1-input.dims() <= dim <= input.dims()` + * + * This operation is related to `squeeze()`, which removes dimensions of + * size 1. + * + * @param data type for `output` output + * @param input The input value + * @param axis 0-D (scalar). Specifies the dimension index at which to + * expand the shape of `input`. Must be in the range + * `[-rank(input) - 1, rank(input)]`. + * @param data type for `ExpandDims` output and operands + * @return a new instance of ExpandDims + * @see org.tensorflow.op.Ops.expandDims + */ + public fun expandDims(input: Operand, axis: Operand): ExpandDims + = java.expandDims( + input, + axis + ) + + /** + * Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension + * of `extract_image_patches`. + * + * @param data type for `patches` output + * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of `input`. + * @param strides 1-D of length 5. How far the centers of two consecutive patches are in + * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + * @param padding The type of padding algorithm to use. + * + * The size-related attributes are specified as follows: + * ` + * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] + * strides = [1, stride_planes, strides_rows, strides_cols, 1] + * + * ` + * @param data type for `ExtractVolumePatches` output and operands + * @return a new instance of ExtractVolumePatches + * @see org.tensorflow.op.Ops.extractVolumePatches + */ + public fun extractVolumePatches( + input: Operand, + ksizes: List, + strides: List, + padding: String + ): ExtractVolumePatches = java.extractVolumePatches( + input, + ksizes, + strides, + padding + ) + + /** + * Creates a tensor filled with a scalar value. + * This operation creates a tensor of shape `dims` and fills it with `value`. + * + * For example: + * ``` + * # Output tensor has shape [2, 3]. + * fill([2, 3], 9) ==> [[9, 9, 9] + * [9, 9, 9]] + * + * ``` + * + * `tf.fill` differs from `tf.constant` in a few ways: + *
    + *
  • `tf.fill` only supports scalar contents, whereas `tf.constant` supports + * Tensor values.
  • + *
  • `tf.fill` creates an Op in the computation graph that constructs the actual + * Tensor value at runtime. This is in contrast to `tf.constant` which embeds + * the entire Tensor into the graph with a `Const` node.
  • + *
  • Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes + * based on other runtime Tensors, unlike `tf.constant`.
  • + *
+ * + * @param data type for `output` output + * @param dims 1-D. Represents the shape of the output tensor. + * @param value 0-D (scalar). Value to fill the returned tensor. + * + * `@`compatibility(numpy) + * + * Equivalent to np.full + * + * `@`end_compatibility + * @param data type for `Fill` output and operands + * @return a new instance of Fill + * @see org.tensorflow.op.Ops.fill + */ + public fun fill(dims: Operand, value: Operand): Fill = + java.fill( + dims, + value + ) + + /** + * Generates fingerprint values. + * Generates fingerprint values of `data`. + * + * Fingerprint op considers the first dimension of `data` as the batch dimension, + * and `output[i]` contains the fingerprint value generated from contents in + * `data[i, ...]` for all `i`. + * + * Fingerprint op writes fingerprint values as byte arrays. For example, the + * default method `farmhash64` generates a 64-bit fingerprint value at a time. + * This 8-byte value is written out as an `uint8` array of size 8, in little-endian + * order. + * + * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), + * and that the fingerprint method is `farmhash64`. In this case, the output shape + * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of + * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in + * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 + * integers + * in `data[1, :, :]`. + * + * Note that this op fingerprints the raw underlying buffer, and it does not + * fingerprint Tensor's metadata such as data type and/or shape. For example, the + * fingerprint values are invariant under reshapes and bitcasts as long as the + * batch dimension remain the same: + * ``` + * Fingerprint(data) == Fingerprint(Reshape(data, ...)) + * Fingerprint(data) == Fingerprint(Bitcast(data, ...)) + * + * ``` + * + * For string data, one should expect `Fingerprint(data) != Fingerprint(ReduceJoin(data))` in + * general. + * + * @param data Must have rank 1 or higher. + * @param method Fingerprint method used by this op. Currently available method is + * `farmhash::fingerprint64`. + * @return a new instance of Fingerprint + * @see org.tensorflow.op.Ops.fingerprint + */ + public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = + java.fingerprint( + data, + method + ) + + /** + * ``` + * output = input; + * for i in range(start, limit, delta) + * output = body(i, output); + * + * ``` + * + * @param start The lower bound. An int32 + * @param limit The upper bound. An int32 + * @param delta The increment. An int32 + * @param input A list of input tensors whose types are T. + * @param body ` + * A function that takes a list of tensors (int32, T) and returns another + * list of tensors (T). + * + * ` + * @return a new instance of For + * @see org.tensorflow.op.Ops.forOp + */ + public fun forOp( + start: Operand, + limit: Operand, + delta: Operand, + input: Iterable>, + body: ConcreteFunction + ): For = java.forOp( + start, + limit, + delta, + input, + body + ) + + /** + * Gather slices from `params` axis `axis` according to `indices`. + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `params.shape[:axis] + + * indices.shape[batch_dims:] + params.shape[axis + 1:]` where: + * ``` + * # Scalar indices (output is rank(params) - 1). + * output[a_0, ..., a_n, b_0, ..., b_n] = + * params[a_0, ..., a_n, indices, b_0, ..., b_n] + * + * # Vector indices (output is rank(params)). + * output[a_0, ..., a_n, i, b_0, ..., b_n] = + * params[a_0, ..., a_n, indices[i], b_0, ..., b_n] + * + * # Higher rank indices (output is rank(params) + rank(indices) - 1). + * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + * + * ``` + *
+ * + *
+ * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, a 0 is stored in the + * corresponding output value. + * + * See also `tf.batch_gather` and `tf.gather_nd`. + * + * @param data type for `output` output + * @param params The tensor from which to gather values. Must be at least rank + * `axis + 1`. + * @param indices Index tensor. Must be in range `[0, params.shape[axis])`. + * @param axis The axis in `params` to gather `indices` from. Defaults to the first + * dimension. Supports negative indexes. + * @param options carries optional attribute values + * @param data type for `GatherV2` output and operands + * @return a new instance of Gather + * @see org.tensorflow.op.Ops.gather + * @param batchDims Sets the batchDims option. + * + * @param batchDims the batchDims option + * @return this Options instance. + */ + public fun gather( + params: Operand, + indices: Operand, + axis: Operand, + batchDims: Long? = null + ): Gather = java.gather( + params, + indices, + axis, + *listOfNotNull( + batchDims?.let{ org.tensorflow.op.core.Gather.batchDims(it) } + ).toTypedArray() + ) + + /** + * Gather slices from `params` into a Tensor with shape specified by `indices`. + * `indices` is a K-dimensional integer tensor, best thought of as a + * (K-1)-dimensional tensor of indices into `params`, where each element defines a + * slice of `params`: + * `output[\`\(i_0, ..., i_{K-2`\\)`] = params[indices[\`\(i_0, ..., i_{K-2}\\)`]] + * } + * + * Whereas in `tf.gather` `indices` defines slices into the `axis` + * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the + * first `N` dimensions of `params`, where `N = indices.shape[-1]`. + * + * The last dimension of `indices` can be at most the rank of + * `params`: + * ``` + * indices.shape[-1] <= params.rank + * + * ``` + * + * The last dimension of `indices` corresponds to elements + * (if `indices.shape[-1] == params.rank`) or slices + * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + * of `params`. The output tensor has shape + * ``` + * indices.shape[:-1] + params.shape[indices.shape[-1]:] + * + * ``` + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, a 0 is stored in the + * corresponding output value. + * + * Some examples below. + * + * Simple indexing into a matrix: + * ``` + * indices = [[0, 0], [1, 1]] + * params = [['a', 'b'], ['c', 'd']] + * output = ['a', 'd'] + * + * ``` + * + * Slice indexing into a matrix: + * ``` + * indices = [[1], [0]] + * params = [['a', 'b'], ['c', 'd']] + * output = [['c', 'd'], ['a', 'b']] + * + * ``` + * + * Indexing into a 3-tensor: + * ``` + * indices = [[1]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[['a1', 'b1'], ['c1', 'd1']]] + * + * + * indices = [[0, 1], [1, 0]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [['c0', 'd0'], ['a1', 'b1']] + * + * + * indices = [[0, 0, 1], [1, 0, 1]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = ['b0', 'b1'] + * + * ``` + * + * Batched indexing into a matrix: + * ``` + * indices = [[[0, 0]], [[0, 1]]] + * params = [['a', 'b'], ['c', 'd']] + * output = [['a'], ['b']] + * + * ``` + * + * Batched slice indexing into a matrix: + * ``` + * indices = [[[1]], [[0]]] + * params = [['a', 'b'], ['c', 'd']] + * output = [[['c', 'd']], [['a', 'b']]] + * + * ``` + * + * Batched indexing into a 3-tensor: + * ``` + * indices = [[[1]], [[0]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[[['a1', 'b1'], ['c1', 'd1']]], + * [[['a0', 'b0'], ['c0', 'd0']]]] + * + * indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[['c0', 'd0'], ['a1', 'b1']], + * [['a0', 'b0'], ['c1', 'd1']]] + * + * + * indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [['b0', 'b1'], ['d0', 'c1']] + * + * ``` + * + * See also `tf.gather` and `tf.batch_gather`. + * + * @param data type for `output` output + * @param params The tensor from which to gather values. + * @param indices Index tensor. + * @param data type for `GatherNd` output and operands + * @return a new instance of GatherNd + * @see org.tensorflow.op.Ops.gatherNd + */ + public fun gatherNd(params: Operand, indices: Operand): GatherNd + = java.gatherNd( + params, + indices + ) + + /** + * Store the input tensor in the state of the current session. + * + * @param value The tensor to be stored. + * @return a new instance of GetSessionHandle + * @see org.tensorflow.op.Ops.getSessionHandle + */ + public fun getSessionHandle(value: Operand): GetSessionHandle = + java.getSessionHandle( + value + ) + + /** + * Get the value of the tensor specified by its handle. + * + * @param data type for `value` output + * @param handle The handle for a tensor stored in the session state. + * @param dtype The type of the output value. + * @param data type for `GetSessionTensor` output and operands + * @return a new instance of GetSessionTensor + * @see org.tensorflow.op.Ops.getSessionTensor + */ + public fun getSessionTensor(handle: Operand, dtype: Class): + GetSessionTensor = java.getSessionTensor( + handle, + dtype + ) + + /** + * Adds gradients computation ops to the graph according to scope. + * + * @param y outputs of the function to derive + * @param x inputs of the function for which partial derivatives are computed + * @param options carries optional attributes values + * @return a new instance of `Gradients` + * @throws IllegalArgumentException if execution environment is not a graph + * @see org.tensorflow.op.Ops.gradients + * @param dx + * + * @param dx partial derivatives of some loss function `L` w.r.t. `y` + * @return this option builder + */ + public fun gradients( + y: Iterable>, + x: Iterable>, + dx: Iterable>? = null + ): Gradients = java.gradients( + y, + x, + *listOfNotNull( + dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } + ).toTypedArray() + ) + + /** + * Adds operations to compute the partial derivatives of sum of `y`s w.r.t `x`s, + * i.e., `d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...` + * + * + * If `Options.dx()` values are set, they are as the initial symbolic partial derivatives of + * some loss + * function `L` w.r.t. `y`. `Options.dx()` must have the size of `y`. + * + * + * If `Options.dx()` is not set, the implementation will use dx of `OnesLike` for all + * shapes in `y`. + * + * + * The partial derivatives are returned in output `dy`, with the size of `x`. + * + * + * Example of usage: + * ``` + * {@code + * Gradients gradients = tf.gradients(loss, Arrays.asList(w, b)); + * Constant alpha = tf.constant(1.0f); + * tf.train.applyGradientDescent(w, alpha, gradients.dy(0)); + * tf.train.applyGradientDescent(b, alpha, gradients.dy(1)); + * + * ```} + * + * @param y output of the function to derive + * @param x inputs of the function for which partial derivatives are computed + * @param options carries optional attributes values + * @return a new instance of `Gradients` + * @throws IllegalArgumentException if execution environment is not a graph + * @see org.tensorflow.op.Ops.gradients + * @param dx + * + * @param dx partial derivatives of some loss function `L` w.r.t. `y` + * @return this option builder + */ + public fun gradients( + y: Operand<*>, + x: Iterable>, + dx: Iterable>? = null + ): Gradients = java.gradients( + y, + x, + *listOfNotNull( + dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } + ).toTypedArray() + ) + + /** + * Gives a guarantee to the TF runtime that the input tensor is a constant. + * The runtime is then free to make optimizations based on this. + * + * Only accepts value typed tensors as inputs and rejects resource variable handles + * as input. + * + * Returns the input tensor without modification. + * + * @param data type for `output` output + * @param input The input value + * @param data type for `GuaranteeConst` output and operands + * @return a new instance of GuaranteeConst + * @see org.tensorflow.op.Ops.guaranteeConst + */ + public fun guaranteeConst(input: Operand): GuaranteeConst = + java.guaranteeConst( + input + ) + + /** + * Creates a non-initialized hash table. + * This op creates a hash table, specifying the type of its keys and values. + * Before using the table you will have to initialize it. After initialization the + * table will be immutable. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for `HashTableV2` output and operands + * @param data type for `HashTableV2` output and operands + * @return a new instance of HashTable + * @see org.tensorflow.op.Ops.hashTable + * @param container Sets the container option. + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing If true and shared_name is empty, the table is shared + * using the node name. + * @return this Options instance. + */ + public fun hashTable( + keyDtype: Class, + valueDtype: Class, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): HashTable = java.hashTable( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.HashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.HashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } + ).toTypedArray() + ) + + /** + * Return histogram of values. + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * nbins = 5 + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * + * with tf.get_default_session() as sess: + * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + * variables.global_variables_initializer().run() + * sess.run(hist) => [2, 1, 1, 0, 2] + * + * ``` + * + * @param data type for `out` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @param data type for `HistogramFixedWidth` output and operands + * @return a new instance of HistogramFixedWidth, with default output types + * @see org.tensorflow.op.Ops.histogramFixedWidth + */ + public fun histogramFixedWidth( + values: Operand, + valueRange: Operand, + nbins: Operand + ): HistogramFixedWidth = java.histogramFixedWidth( + values, + valueRange, + nbins + ) + + /** + * Return histogram of values. + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * nbins = 5 + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * + * with tf.get_default_session() as sess: + * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + * variables.global_variables_initializer().run() + * sess.run(hist) => [2, 1, 1, 0, 2] + * + * ``` + * + * @param data type for `out` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @param dtype The value of the dtype attribute + * @param data type for `HistogramFixedWidth` output and operands + * @param data type for `HistogramFixedWidth` output and operands + * @return a new instance of HistogramFixedWidth + * @see org.tensorflow.op.Ops.histogramFixedWidth + */ + public fun histogramFixedWidth( + values: Operand, + valueRange: Operand, + nbins: Operand, + dtype: Class + ): HistogramFixedWidth = java.histogramFixedWidth( + values, + valueRange, + nbins, + dtype + ) + + /** + * Return a tensor with the same shape and contents as the input tensor or value. + * + * @param data type for `output` output + * @param input The input value + * @param data type for `Identity` output and operands + * @return a new instance of Identity + * @see org.tensorflow.op.Ops.identity + */ + public fun identity(input: Operand): Identity = java.identity( + input + ) + + /** + * Returns a list of tensors with the same shapes and contents as the input + * tensors. + * + * This op can be used to override the gradient for complicated functions. For + * example, suppose y = f(x) and we wish to apply a custom function g for backprop + * such that dx = g(dy). In Python, + * ``` + * with tf.get_default_graph().gradient_override_map( + * {'IdentityN': 'OverrideGradientWithG' + * ```): + * y, _ = identity_n([f(x), x]) + * + * `@`tf.RegisterGradient('OverrideGradientWithG') + * def ApplyG(op, dy, _): + * return [None, g(dy)] # Do not backprop to f(x). + * } + * + * @param input The input value + * @return a new instance of IdentityN + * @see org.tensorflow.op.Ops.identityN + */ + public fun identityN(input: Iterable>): IdentityN = java.identityN( + input + ) + + /** + * output = cond ? then_branch(input) : else_branch(input) + * + * + * Selects between [StatefulIf] and [StatelessIf] based on the statefulness of the function + * arguments. + * + * @param cond ` + * A Tensor. If the tensor is a scalar of non-boolean type, the + * scalar is converted to a boolean according to the + * following rule: if the scalar is a numerical value, non-zero means + * `True` and zero means False; if the scalar is a string, non-empty + * means `True` and empty means `False`. If the tensor is not a scalar, + * being empty means False and being non-empty means True. + * + * ` + * @param input A list of input tensors. + * @param Tout A list of output types. + * @param thenBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what else_branch returns. + * + * ` + * @param elseBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what then_branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of If + * @see org.tensorflow.op.Ops.ifOp + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun ifOp( + cond: Operand, + input: Iterable>, + Tout: List>, + thenBranch: ConcreteFunction, + elseBranch: ConcreteFunction, + outputShapes: List? = null + ): If = java.ifOp( + cond, + input, + Tout, + thenBranch, + elseBranch, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.If.outputShapes(it) } + ).toTypedArray() + ) + + /** + * Returns immutable tensor from memory region. + * The current implementation memmaps the tensor from a file. + * + * @param data type for `tensor` output + * @param dtype Type of the returned tensor. + * @param shape Shape of the returned tensor. + * @param memoryRegionName Name of readonly memory region used by the tensor, see + * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * @param data type for `ImmutableConst` output and operands + * @return a new instance of ImmutableConst + * @see org.tensorflow.op.Ops.immutableConst + */ + public fun immutableConst( + dtype: Class, + shape: Shape, + memoryRegionName: String + ): ImmutableConst = java.immutableConst( + dtype, + shape, + memoryRegionName + ) + + /** + * Table initializer that takes two tensors for keys and values respectively. + * + * @param tableHandle Handle to a table which will be initialized. + * @param keys Keys of type Tkey. + * @param values Values of type Tval. + * @return a new instance of InitializeTable + * @see org.tensorflow.op.Ops.initializeTable + */ + public fun initializeTable( + tableHandle: Operand, + keys: Operand, + values: Operand + ): InitializeTable = java.initializeTable( + tableHandle, + keys, + values + ) + + /** + * Initializes a table from a text file. + * It inserts one key-value pair into the table for each line of the file. + * The key and value is extracted from the whole line content, elements from the + * split line based on `delimiter` or the line number (starting from zero). + * Where to extract the key and value from a line is specified by `key_index` and + * `value_index`. + *
    + *
  • A value of -1 means use the line number(starting from zero), expects `int64`.
  • + *
  • A value of -2 means use the whole line content, expects `string`.
  • + *
  • A value >= 0 means use the index (starting at zero) of the split line based + * on `delimiter`.
  • + *
+ * + * @param tableHandle Handle to a table which will be initialized. + * @param filename Filename of a vocabulary text file. + * @param keyIndex Column index in a line to get the table `key` values from. + * @param valueIndex Column index that represents information of a line to get the table + * `value` values from. + * @param options carries optional attribute values + * @return a new instance of InitializeTableFromTextFile + * @see org.tensorflow.op.Ops.initializeTableFromTextFile + * @param vocabSize Sets the vocabSize option. + * + * @param vocabSize Number of elements of the file, use -1 if unknown. + * @return this Options instance. + * @param delimiter Sets the delimiter option. + * + * @param delimiter Delimiter to separate fields in a line. + * @return this Options instance. + * @param offset Sets the offset option. + * + * @param offset the offset option + * @return this Options instance. + */ + public fun initializeTableFromTextFile( + tableHandle: Operand, + filename: Operand, + keyIndex: Long, + valueIndex: Long, + vocabSize: Long? = null, + delimiter: String? = null, + offset: Long? = null + ): InitializeTableFromTextFile = java.initializeTableFromTextFile( + tableHandle, + filename, + keyIndex, + valueIndex, + *listOfNotNull( + vocabSize?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, + delimiter?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) }, + offset?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.offset(it) } + ).toTypedArray() + ) + + /** + * Adds v into specified rows of x. + * ``` + * Computes y = x; y[i, :] += v; return y. + * + * ``` + * + * @param data type for `y` output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * must be the same as i's size. + * @param data type for `InplaceAdd` output and operands + * @return a new instance of InplaceAdd + * @see org.tensorflow.op.Ops.inplaceAdd + */ + public fun inplaceAdd( + x: Operand, + i: Operand, + v: Operand + ): InplaceAdd = java.inplaceAdd( + x, + i, + v + ) + + /** + * ``` + * Subtracts `v` into specified rows of `x`. + * + * Computes y = x; y[i, :] -= v; return y. + * + * ``` + * + * @param data type for `y` output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * must be the same as i's size. + * @param data type for `InplaceSub` output and operands + * @return a new instance of InplaceSub + * @see org.tensorflow.op.Ops.inplaceSub + */ + public fun inplaceSub( + x: Operand, + i: Operand, + v: Operand + ): InplaceSub = java.inplaceSub( + x, + i, + v + ) + + /** + * Updates specified rows 'i' with values 'v'. + * Computes `x[i, :] = v; return x`. + * + * Originally this function is mutative however for compilation we make this + * operation create / operate on a copy of `x`. + * + * @param data type for `y` output + * @param x A tensor of type `T`. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * must be the same as i's size. + * @param data type for `InplaceUpdate` output and operands + * @return a new instance of InplaceUpdate + * @see org.tensorflow.op.Ops.inplaceUpdate + */ + public fun inplaceUpdate( + x: Operand, + i: Operand, + v: Operand + ): InplaceUpdate = java.inplaceUpdate( + x, + i, + v + ) + + /** + * Checks whether a tensor has been initialized. + * Outputs boolean scalar indicating whether the tensor has been initialized. + * + * @param ref Should be from a `Variable` node. May be uninitialized. + * @return a new instance of IsVariableInitialized + * @see org.tensorflow.op.Ops.isVariableInitialized + */ + public fun isVariableInitialized(ref: Operand): IsVariableInitialized = + java.isVariableInitialized( + ref + ) + + /** + * Computes the Kth order statistic of a data set. The current + * implementation uses a binary search requiring exactly 32 passes over + * the input data. The running time is linear with respect to input + * size. The median-of-medians algorithm is probably faster, but is + * difficult to implement efficiently in XLA. The implementation imposes + * a total ordering on floats. The ordering is consistent with the usual + * partial order. Positive NaNs are greater than positive + * infinity. Negative NaNs are less than negative infinity. NaNs with + * distinct payloads are treated as distinct. Subnormal numbers are + * preserved (not flushed to zero). Positive infinity is greater than all + * numbers. Negative infinity is less than all numbers. Positive is + * greater than negative zero. There are less than k values greater than + * the kth order statistic. There are at least k values greater than or + * equal to the Kth order statistic. The semantics are not the same as + * top_k_unique. + * + * @param input The input value + * @param k The value of the k attribute + * @return a new instance of KthOrderStatistic + * @see org.tensorflow.op.Ops.kthOrderStatistic + */ + public fun kthOrderStatistic(input: Operand, k: Long): KthOrderStatistic = + java.kthOrderStatistic( + input, + k + ) + + /** + * Outputs all keys and values in the table. + * + * @param data type for `keys` output + * @param data type for `values` output + * @param tableHandle Handle to the table. + * @param Tkeys The value of the Tkeys attribute + * @param Tvalues The value of the Tvalues attribute + * @param data type for `LookupTableExportV2` output and operands + * @param data type for `LookupTableExportV2` output and operands + * @return a new instance of LookupTableExport + * @see org.tensorflow.op.Ops.lookupTableExport + */ + public fun lookupTableExport( + tableHandle: Operand, + Tkeys: Class, + Tvalues: Class + ): LookupTableExport = java.lookupTableExport( + tableHandle, + Tkeys, + Tvalues + ) + + /** + * Looks up keys in a table, outputs the corresponding values. + * The tensor `keys` must of the same type as the keys of the table. + * The output `values` is of the type of the table values. + * + * The scalar `default_value` is the value output for keys not present in the + * table. It must also be of the same type as the table values. + * + * @param data type for `values` output + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param defaultValue The defaultValue value + * @param data type for `LookupTableFindV2` output and operands + * @return a new instance of LookupTableFind + * @see org.tensorflow.op.Ops.lookupTableFind + */ + public fun lookupTableFind( + tableHandle: Operand, + keys: Operand, + defaultValue: Operand + ): LookupTableFind = java.lookupTableFind( + tableHandle, + keys, + defaultValue + ) + + /** + * Replaces the contents of the table with the specified keys and values. + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. + * + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param values Values to associate with keys. + * @return a new instance of LookupTableImport + * @see org.tensorflow.op.Ops.lookupTableImport + */ + public fun lookupTableImport( + tableHandle: Operand, + keys: Operand, + values: Operand + ): LookupTableImport = java.lookupTableImport( + tableHandle, + keys, + values + ) + + /** + * Updates the table to associates keys with values. + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. + * + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param values Values to associate with keys. + * @return a new instance of LookupTableInsert + * @see org.tensorflow.op.Ops.lookupTableInsert + */ + public fun lookupTableInsert( + tableHandle: Operand, + keys: Operand, + values: Operand + ): LookupTableInsert = java.lookupTableInsert( + tableHandle, + keys, + values + ) + + /** + * Computes the number of elements in the given table. + * + * @param tableHandle Handle to the table. + * @return a new instance of LookupTableSize + * @see org.tensorflow.op.Ops.lookupTableSize + */ + public fun lookupTableSize(tableHandle: Operand): LookupTableSize = + java.lookupTableSize( + tableHandle + ) + + /** + * Forwards the input to the output. + * This operator represents the loop termination condition used by the + * "pivot" switches of a loop. + * + * @param input A boolean scalar, representing the branch predicate of the Switch op. + * @return a new instance of LoopCond + * @see org.tensorflow.op.Ops.loopCond + */ + public fun loopCond(input: Operand): LoopCond = java.loopCond( + input + ) + + /** + * Make all elements in the non-Batch dimension unique, but "close" to + * their initial value. Never returns a sub-normal number. Never returns + * zero. The sign of each input element is always identical to the sign + * of the corresponding output element. Behavior for infinite elements is + * undefined. Behavior for subnormal elements is undefined. + * + * @param input The input value + * @return a new instance of MakeUnique + * @see org.tensorflow.op.Ops.makeUnique + */ + public fun makeUnique(input: Operand): MakeUnique = java.makeUnique( + input + ) + + /** + * Op removes all elements in the underlying container. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of MapClear + * @see org.tensorflow.op.Ops.mapClear + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun mapClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapClear = java.mapClear( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapClear.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op returns the number of incomplete elements in the underlying container. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of MapIncompleteSize + * @see org.tensorflow.op.Ops.mapIncompleteSize + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun mapIncompleteSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapIncompleteSize = java.mapIncompleteSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapIncompleteSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op peeks at the values at the specified key. If the + * underlying container does not contain this key + * this op will block until it does. + * + * @param key The key value + * @param indices The indices value + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of MapPeek + * @see org.tensorflow.op.Ops.mapPeek + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun mapPeek( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapPeek = java.mapPeek( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapPeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapPeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapPeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapPeek.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op returns the number of elements in the underlying container. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of MapSize + * @see org.tensorflow.op.Ops.mapSize + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun mapSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapSize = java.mapSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapSize.sharedName(it) } + ).toTypedArray() + ) + + /** + * Stage (key, values) in the underlying container which behaves like a hashtable. + * + * @param key int64 + * @param indices The indices value + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of MapStage + * @see org.tensorflow.op.Ops.mapStage + * @param capacity Sets the capacity option. + * + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * on the container will block when the capacity is reached. + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this queue is placed in the given container. Otherwise, + * a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName It is necessary to match this name to the matching Unstage Op. + * @return this Options instance. + */ + public fun mapStage( + key: Operand, + indices: Operand, + values: Iterable>, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapStage = java.mapStage( + key, + indices, + values, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapStage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapStage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapStage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapStage.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op removes and returns the values associated with the key + * from the underlying container. If the underlying container + * does not contain this key, the op will block until it does. + * + * @param key The key value + * @param indices The indices value + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of MapUnstage + * @see org.tensorflow.op.Ops.mapUnstage + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun mapUnstage( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapUnstage = java.mapUnstage( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapUnstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapUnstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapUnstage.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op removes and returns a random (key, value) + * from the underlying container. If the underlying container + * does not contain elements, the op will block until it does. + * + * @param indices The indices value + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of MapUnstageNoKey + * @see org.tensorflow.op.Ops.mapUnstageNoKey + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun mapUnstageNoKey( + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapUnstageNoKey = java.mapUnstageNoKey( + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapUnstageNoKey.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } + ).toTypedArray() + ) + + /** + * Computes the maximum of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Max` output and operands + * @return a new instance of Max + * @see org.tensorflow.op.Ops.max + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun max( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Max = java.max( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Max.keepDims(it) } + ).toTypedArray() + ) + + /** + * Forwards the value of an available tensor from `inputs` to `output`. + * `Merge` waits for at least one of the tensors in `inputs` to become available. + * It is usually combined with `Switch` to implement branching. + * + * `Merge` forwards the first tensor to become available to `output`, and sets + * `value_index` to its index in `inputs`. + * + * @param data type for `output` output + * @param inputs The input tensors, exactly one of which will become available. + * @param data type for `Merge` output and operands + * @return a new instance of Merge + * @see org.tensorflow.op.Ops.merge + */ + public fun merge(inputs: Iterable>): Merge = java.merge( + inputs + ) + + /** + * Computes the minimum of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Min` output and operands + * @return a new instance of Min + * @see org.tensorflow.op.Ops.min + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun min( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Min = java.min( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Min.keepDims(it) } + ).toTypedArray() + ) + + /** + * Pads a tensor with mirrored values. + * This operation pads a `input` with mirrored values according to the `paddings` + * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many values to add before the contents of `input` in that dimension, and + * `paddings[D, 1]` indicates how many values to add after the contents of `input` + * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no + * greater + * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true + * (if false, respectively). + * + * The padded size of each dimension D of the output is: + * + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * + * For example: + * ``` + * # 't' is [[1, 2, 3], [4, 5, 6]]. + * # 'paddings' is [[1, 1]], [2, 2]]. + * # 'mode' is SYMMETRIC. + * # rank of 't' is 2. + * pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + * [2, 1, 1, 2, 3, 3, 2] + * [5, 4, 4, 5, 6, 6, 5] + * [5, 4, 4, 5, 6, 6, 5]] + * + * ``` + * + * @param data type for `output` output + * @param input The input tensor to be padded. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of `input`. + * @param mode Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + * do not include the borders, while in symmetric mode the padded regions + * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` + * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and + * it is `[1, 2, 3, 3, 2]` in symmetric mode. + * @param data type for `MirrorPad` output and operands + * @return a new instance of MirrorPad + * @see org.tensorflow.op.Ops.mirrorPad + */ + public fun mirrorPad( + input: Operand, + paddings: Operand, + mode: String + ): MirrorPad = java.mirrorPad( + input, + paddings, + mode + ) + + /** + * Wraps an arbitrary MLIR computation expressed as a module with a main() function. + * This operation does not have an associated kernel and is not intended to be + * executed in a regular TensorFlow session. Instead it is intended to be used for + * testing or for special case where a user intends to pass custom MLIR computation + * through a TensorFlow graph with the intent of having custom tooling processing + * it downstream (when targeting a different environment, like TensorFlow lite for + * example). + * The MLIR module is expected to have a main() function that will be used as an + * entry point. The inputs to the operations will be passed as argument to the + * main() function and the returned values of the main function mapped to the + * outputs. + * Example usage: + * ``` + * import tensorflow as tf + * from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op + * + * mlir_module = '''python + * func {@literal @ + * ```main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { + * %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> + * tensor<10x10xf32> + * return %ret : tensor<10x10xf32> + * } + * ''' + * + * `@`tf.function + * def foo(x, y): + * return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) + * + * graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), + * tf.TensorSpec([10], tf.float32)).graph.as_graph_def() + * } + * + * @param inputs The inputs value + * @param mlirModule The value of the mlirModule attribute + * @param Toutputs The value of the Toutputs attribute + * @return a new instance of MlirPassthroughOp + * @see org.tensorflow.op.Ops.mlirPassthroughOp + */ + public fun mlirPassthroughOp( + inputs: Iterable>, + mlirModule: String, + Toutputs: List> + ): MlirPassthroughOp = java.mlirPassthroughOp( + inputs, + mlirModule, + Toutputs + ) + + /** + * Creates an empty hash table that uses tensors as the backing store. + * It uses "open addressing" with quadratic reprobing to resolve + * collisions. + * + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param emptyKey The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + * @param deletedKey The deletedKey value + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for `MutableDenseHashTableV2` output and operands + * @param data type for `MutableDenseHashTableV2` output and operands + * @return a new instance of MutableDenseHashTable + * @see org.tensorflow.op.Ops.mutableDenseHashTable + * @param container Sets the container option. + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing the useNodeNameSharing option + * @return this Options instance. + * @param valueShape Sets the valueShape option. + * + * @param valueShape The shape of each value. + * @return this Options instance. + * @param initialNumBuckets Sets the initialNumBuckets option. + * + * @param initialNumBuckets The initial number of hash table buckets. Must be a power + * to 2. + * @return this Options instance. + * @param maxLoadFactor Sets the maxLoadFactor option. + * + * @param maxLoadFactor The maximum ratio between number of entries and number of + * buckets before growing the table. Must be between 0 and 1. + * @return this Options instance. + */ + public fun mutableDenseHashTable( + emptyKey: Operand, + deletedKey: Operand, + valueDtype: Class, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null, + initialNumBuckets: Long? = null, + maxLoadFactor: Float? = null + ): MutableDenseHashTable = java.mutableDenseHashTable( + emptyKey, + deletedKey, + valueDtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.MutableDenseHashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) + }, + valueShape?.let{ org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, + initialNumBuckets?.let{ org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, + maxLoadFactor?.let{ org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } + ).toTypedArray() + ) + + /** + * Creates an empty hash table. + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for `MutableHashTableV2` output and operands + * @param data type for `MutableHashTableV2` output and operands + * @return a new instance of MutableHashTable + * @see org.tensorflow.op.Ops.mutableHashTable + * @param container Sets the container option. + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing If true and shared_name is empty, the table is shared + * using the node name. + * @return this Options instance. + */ + public fun mutableHashTable( + keyDtype: Class, + valueDtype: Class, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): MutableHashTable = java.mutableHashTable( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.MutableHashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableHashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } + ).toTypedArray() + ) + + /** + * Creates an empty hash table. + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a vector. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for `MutableHashTableOfTensorsV2` output and operands + * @param data type for `MutableHashTableOfTensorsV2` output and operands + * @return a new instance of MutableHashTableOfTensors + * @see org.tensorflow.op.Ops.mutableHashTableOfTensors + * @param container Sets the container option. + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing the useNodeNameSharing option + * @return this Options instance. + * @param valueShape Sets the valueShape option. + * + * @param valueShape the valueShape option + * @return this Options instance. + */ + public fun mutableHashTableOfTensors( + keyDtype: Class, + valueDtype: Class, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null + ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, + useNodeNameSharing?.let{ + org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) }, + valueShape?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } + ).toTypedArray() + ) + + /** + * Creates a Mutex resource that can be locked by `MutexLock`. + * + * @param options carries optional attribute values + * @return a new instance of Mutex + * @see org.tensorflow.op.Ops.mutex + * + * @param container Sets the container option. + * + * @param container If non-empty, this variable is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this variable is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + */ + public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( + *listOfNotNull( + container?.let{ org.tensorflow.op.core.Mutex.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Mutex.sharedName(it) } + ).toTypedArray() + ) + + /** + * Locks a mutex resource. The output is the lock. So long as the lock tensor + * is alive, any other request to use `MutexLock` with this mutex will wait. + * + * This is particularly useful for creating a critical section when used in + * conjunction with `MutexLockIdentity`: + * ``` + * mutex = mutex_v2( + * shared_name=handle_name, container=container, name=name) + * + * def execute_in_critical_section(fn, *args, **kwargs): + * lock = gen_resource_variable_ops.mutex_lock(mutex) + * + * with ops.control_dependencies([lock]): + * r = fn(*args, **kwargs) + * + * with ops.control_dependencies(nest.flatten(r)): + * with ops.colocate_with(mutex): + * ensure_lock_exists = mutex_lock_identity(lock) + * + * # Make sure that if any element of r is accessed, all of + * # them are executed together. + * r = nest.map_structure(tf.identity, r) + * + * with ops.control_dependencies([ensure_lock_exists]): + * return nest.map_structure(tf.identity, r) + * + * ``` + * + * While `fn` is running in the critical section, no other functions which wish to + * use this critical section may run. + * + * Often the use case is that two executions of the same graph, in parallel, + * wish to run `fn`; and we wish to ensure that only one of them executes + * at a time. This is especially important if `fn` modifies one or more + * variables at a time. + * + * It is also useful if two separate functions must share a resource, but we + * wish to ensure the usage is exclusive. + * + * @param mutex The mutex resource to lock. + * @return a new instance of MutexLock + * @see org.tensorflow.op.Ops.mutexLock + */ + public fun mutexLock(mutex: Operand): MutexLock = java.mutexLock( + mutex + ) + + /** + * Makes its input available to the next iteration. + * + * @param data type for `output` output + * @param data The tensor to be made available to the next iteration. + * @param data type for `NextIteration` output and operands + * @return a new instance of NextIteration + * @see org.tensorflow.op.Ops.nextIteration + */ + public fun nextIteration(`data`: Operand): NextIteration = + java.nextIteration( + data + ) + + /** + * Does nothing. Only useful as a placeholder for control edges. + * + * @return a new instance of NoOp + * @see org.tensorflow.op.Ops.noOp + */ + public fun noOp(): NoOp = java.noOp( + + ) + + /** + * Returns a one-hot tensor. + * The locations represented by indices in `indices` take value `on_value`, + * while all other locations take value `off_value`. + * + * If the input `indices` is rank `N`, the output will have rank `N+1`, + * The new axis is created at dimension `axis` (default: the new axis is + * appended at the end). + * + * If `indices` is a scalar the output shape will be a vector of length `depth`. + * + * If `indices` is a vector of length `features`, the output shape will be: + * ``` + * features x depth if axis == -1 + * depth x features if axis == 0 + * + * ``` + * + * If `indices` is a matrix (batch) with shape `[batch, features]`, + * the output shape will be: + * ``` + * batch x features x depth if axis == -1 + * batch x depth x features if axis == 1 + * depth x batch x features if axis == 0 + * + * ``` + * **Examples** + * + * + * Suppose that + * ``` + * indices = [0, 2, -1, 1] + * depth = 3 + * on_value = 5.0 + * off_value = 0.0 + * axis = -1 + * + * ``` + * + * Then output is `[4 x 3]`: + * ``` + * output = + * [5.0 0.0 0.0] // one_hot(0) + * [0.0 0.0 5.0] // one_hot(2) + * [0.0 0.0 0.0] // one_hot(-1) + * [0.0 5.0 0.0] // one_hot(1) + * + * ``` + * + * Suppose that + * ``` + * indices = [0, 2, -1, 1] + * depth = 3 + * on_value = 0.0 + * off_value = 3.0 + * axis = 0 + * + * ``` + * + * Then output is `[3 x 4]`: + * ``` + * output = + * [0.0 3.0 3.0 3.0] + * [3.0 3.0 3.0 0.0] + * [3.0 3.0 3.0 3.0] + * [3.0 0.0 3.0 3.0] + * // ^ one_hot(0) + * // ^ one_hot(2) + * // ^ one_hot(-1) + * // ^ one_hot(1) + * + * ``` + * + * Suppose that + * ``` + * indices = [[0, 2], [1, -1]] + * depth = 3 + * on_value = 1.0 + * off_value = 0.0 + * axis = -1 + * + * ``` + * + * Then output is `[2 x 2 x 3]`: + * ``` + * output = + * [ + * [1.0, 0.0, 0.0] // one_hot(0) + * [0.0, 0.0, 1.0] // one_hot(2) + * ][ + * [0.0, 1.0, 0.0] // one_hot(1) + * [0.0, 0.0, 0.0] // one_hot(-1) + * ] + * + * ``` + * + * @param data type for `output` output + * @param indices A tensor of indices. + * @param depth A scalar defining the depth of the one hot dimension. + * @param onValue A scalar defining the value to fill in output when `indices[j] = i`. + * @param offValue A scalar defining the value to fill in output when `indices[j] != i`. + * @param options carries optional attribute values + * @param data type for `OneHot` output and operands + * @return a new instance of OneHot + * @see org.tensorflow.op.Ops.oneHot + * @param axis Sets the axis option. + * + * @param axis The axis to fill (default: -1, a new inner-most axis). + * @return this Options instance. + */ + public fun oneHot( + indices: Operand, + depth: Operand, + onValue: Operand, + offValue: Operand, + axis: Long? = null + ): OneHot = java.oneHot( + indices, + depth, + onValue, + offValue, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.OneHot.axis(it) } + ).toTypedArray() + ) + + /** + * Creates a one valued tensor given its type and shape. + * + * @param dims a 1-D operand that represents the shape of the output tensor + * @param type the output tensor type class. Can not be TString. + * @return a constant tensor initialized with ones + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with ones. + * @see org.tensorflow.op.Ops.ones + */ + public fun ones(dims: Operand, type: Class): Ones = + java.ones( + dims, + type + ) + + /** + * Returns a tensor of ones with the same shape and type as x. + * + * @param data type for `y` output + * @param x a tensor of type T. + * @param data type for `OnesLike` output and operands + * @return a new instance of OnesLike + * @see org.tensorflow.op.Ops.onesLike + */ + public fun onesLike(x: Operand): OnesLike = java.onesLike( + x + ) + + /** + * Op removes all elements in the underlying container. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of OrderedMapClear + * @see org.tensorflow.op.Ops.orderedMapClear + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun orderedMapClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapClear = java.orderedMapClear( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapClear.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op returns the number of incomplete elements in the underlying container. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of OrderedMapIncompleteSize + * @see org.tensorflow.op.Ops.orderedMapIncompleteSize + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun orderedMapIncompleteSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op peeks at the values at the specified key. If the + * underlying container does not contain this key + * this op will block until it does. This Op is optimized for + * performance. + * + * @param key The key value + * @param indices The indices value + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of OrderedMapPeek + * @see org.tensorflow.op.Ops.orderedMapPeek + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun orderedMapPeek( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapPeek = java.orderedMapPeek( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapPeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op returns the number of elements in the underlying container. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of OrderedMapSize + * @see org.tensorflow.op.Ops.orderedMapSize + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun orderedMapSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapSize = java.orderedMapSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapSize.sharedName(it) } + ).toTypedArray() + ) + + /** + * Stage (key, values) in the underlying container which behaves like a ordered + * associative container. Elements are ordered by key. + * + * @param key int64 + * @param indices The indices value + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of OrderedMapStage + * @see org.tensorflow.op.Ops.orderedMapStage + * @param capacity Sets the capacity option. + * + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * on the container will block when the capacity is reached. + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this queue is placed in the given container. Otherwise, + * a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName It is necessary to match this name to the matching Unstage Op. + * @return this Options instance. + */ + public fun orderedMapStage( + key: Operand, + indices: Operand, + values: Iterable>, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapStage = java.orderedMapStage( + key, + indices, + values, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapStage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapStage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapStage.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op removes and returns the values associated with the key + * from the underlying container. If the underlying container + * does not contain this key, the op will block until it does. + * + * @param key The key value + * @param indices The indices value + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of OrderedMapUnstage + * @see org.tensorflow.op.Ops.orderedMapUnstage + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun orderedMapUnstage( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapUnstage = java.orderedMapUnstage( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapUnstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op removes and returns the (key, value) element with the smallest + * key from the underlying container. If the underlying container + * does not contain elements, the op will block until it does. + * + * @param indices The indices value + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of OrderedMapUnstageNoKey + * @see org.tensorflow.op.Ops.orderedMapUnstageNoKey + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun orderedMapUnstageNoKey( + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } + ).toTypedArray() + ) + + /** + * Pads a tensor. + * This operation pads `input` according to the `paddings` and `constant_values` + * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many padding values to add before the contents of `input` in that dimension, + * and `paddings[D, 1]` indicates how many padding values to add after the contents + * of `input` in that dimension. `constant_values` is a scalar tensor of the same + * type as `input` that indicates the value to use for padding `input`. + * + * The padded size of each dimension D of the output is: + * + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * + * For example: + * ``` + * # 't' is [[1, 1], [2, 2]] + * # 'paddings' is [[1, 1], [2, 2]] + * # 'constant_values' is 0 + * # rank of 't' is 2 + * pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + * [0, 0, 1, 1, 0, 0] + * [0, 0, 2, 2, 0, 0] + * [0, 0, 0, 0, 0, 0]] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param paddings The paddings value + * @param constantValues The constantValues value + * @param data type for `PadV2` output and operands + * @return a new instance of Pad + * @see org.tensorflow.op.Ops.pad + */ + public fun pad( + input: Operand, + paddings: Operand, + constantValues: Operand + ): Pad = java.pad( + input, + paddings, + constantValues + ) + + /** + * Concatenates a list of `N` tensors along the first dimension. + * The input tensors are all required to have size 1 in the first dimension. + * + * For example: + * ``` + * # 'x' is [[1, 4]] + * # 'y' is [[2, 5]] + * # 'z' is [[3, 6]] + * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + * + * ``` + * + * The difference between concat and parallel_concat is that concat requires all + * of the inputs be computed before the operation will begin but doesn't require + * that the input shapes be known during graph construction. Parallel concat + * will copy pieces of the input into the output as they become available, in + * some situations this can provide a performance benefit. + * + * @param data type for `output` output + * @param values Tensors to be concatenated. All must have size 1 in the first dimension + * and same shape. + * @param shape the final shape of the result; should be equal to the shapes of any input + * but with the number of input values in the first dimension. + * @param data type for `ParallelConcat` output and operands + * @return a new instance of ParallelConcat + * @see org.tensorflow.op.Ops.parallelConcat + */ + public fun parallelConcat(values: Iterable>, shape: Shape): + ParallelConcat = java.parallelConcat( + values, + shape + ) + + /** + * Interleave the values from the `data` tensors into a single tensor. + * Builds a merged tensor such that + * ``` + * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + * + * ``` + * + * For example, if each `indices[m]` is scalar or vector, we have + * ``` + * # Scalar indices: + * merged[indices[m], ...] = data[m][...] + * + * # Vector indices: + * merged[indices[m][i], ...] = data[m][i, ...] + * + * ``` + * + * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is + * ``` + * merged.shape = [max(indices)] + constant + * + * ``` + * + * Values may be merged in parallel, so if an index appears in both `indices[m][i]` + * and `indices[n][j]`, the result may be invalid. This differs from the normal + * DynamicStitch operator that defines the behavior in that case. + * + * For example: + * ``` + * indices[0] = 6 + * indices[1] = [4, 1] + * indices[2] = [[5, 2], [0, 3]] + * data[0] = [61, 62] + * data[1] = [[41, 42], [11, 12]] + * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + * [51, 52], [61, 62]] + * + * ``` + * + * This method can be used to merge partitions created by `dynamic_partition` + * as illustrated on the following example: + * ``` + * # Apply function (increments x_i) on elements for which a certain condition + * # apply (x_i != -1 in this example). + * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + * condition_mask=tf.not_equal(x,tf.constant(-1.)) + * partitioned_data = tf.dynamic_partition( + * x, tf.cast(condition_mask, tf.int32) , 2) + * partitioned_data[1] = partitioned_data[1] + 1.0 + * condition_indices = tf.dynamic_partition( + * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + * x = tf.dynamic_stitch(condition_indices, partitioned_data) + * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + * # unchanged. + * + * ``` + *
+ * + *
+ * + * @param data type for `merged` output + * @param indices The indices value + * @param data The data value + * @param data type for `ParallelDynamicStitch` output and operands + * @return a new instance of ParallelDynamicStitch + * @see org.tensorflow.op.Ops.parallelDynamicStitch + */ + public fun parallelDynamicStitch(indices: Iterable>, + `data`: Iterable>): ParallelDynamicStitch = + java.parallelDynamicStitch( + indices, + data + ) + + /** + * returns `f(inputs)`, where `f`'s body is placed and partitioned. + * + * + * Selects between [StatefulPartitionedCall] and [StatelessPartitionedCall] based on the + * statefulness of the function arguments. + * + * @param args A list of input tensors. + * @param Tout A list of output types. + * @param f ` + * A function that takes 'args', a list of tensors, and returns 'output', + * another list of tensors. Input and output types are specified by 'Tin' + * and 'Tout'. The function body of f will be placed and partitioned across + * devices, setting this op apart from the regular Call op. This op is + * stateful. + * + * ` + * @param options carries optional attribute values + * @return a new instance of PartitionedCall + * @see org.tensorflow.op.Ops.partitionedCall + * @param config Sets the config option. + * + * @param config the config option + * @return this Options instance. + * @param configProto Sets the configProto option. + * + * @param configProto the configProto option + * @return this Options instance. + * @param executorType Sets the executorType option. + * + * @param executorType the executorType option + * @return this Options instance. + */ + public fun partitionedCall( + args: Iterable>, + Tout: List>, + f: ConcreteFunction, + config: String? = null, + configProto: String? = null, + executorType: String? = null + ): PartitionedCall = java.partitionedCall( + args, + Tout, + f, + *listOfNotNull( + config?.let{ org.tensorflow.op.core.PartitionedCall.config(it) }, + configProto?.let{ org.tensorflow.op.core.PartitionedCall.configProto(it) }, + executorType?.let{ org.tensorflow.op.core.PartitionedCall.executorType(it) } + ).toTypedArray() + ) + + /** + * A placeholder op for a value that will be fed into the computation. + * N.B. This operation will fail with an error if it is executed. It is + * intended as a way to represent a value that will always be fed, and to + * provide attrs that enable the fed value to be checked at runtime. + * + * @param data type for `output` output + * @param dtype The type of elements in the tensor. + * @param options carries optional attribute values + * @param data type for `Placeholder` output and operands + * @return a new instance of Placeholder + * @see org.tensorflow.op.Ops.placeholder + * @param shape Sets the shape option. + * + * @param shape (Optional) The shape of the tensor. If the shape has 0 dimensions, the + * shape is unconstrained. + * @return this Options instance. + */ + public fun placeholder(dtype: Class, shape: Shape? = null): Placeholder = + java.placeholder( + dtype, + *listOfNotNull( + shape?.let{ org.tensorflow.op.core.Placeholder.shape(it) } + ).toTypedArray() + ) + + /** + * A placeholder op that passes through `input` when its output is not fed. + * + * @param data type for `output` output + * @param input The default value to produce when `output` is not fed. + * @param shape The (possibly partial) shape of the tensor. + * @param data type for `PlaceholderWithDefault` output and operands + * @return a new instance of PlaceholderWithDefault + * @see org.tensorflow.op.Ops.placeholderWithDefault + */ + public fun placeholderWithDefault(input: Operand, shape: Shape): + PlaceholderWithDefault = java.placeholderWithDefault( + input, + shape + ) + + /** + * Prints a string scalar. + * Prints a string scalar to the desired output_stream. + * + * @param input The string scalar to print. + * @param options carries optional attribute values + * @return a new instance of Print + * @see org.tensorflow.op.Ops.print + * @param outputStream Sets the outputStream option. + * + * @param outputStream A string specifying the output stream or logging level to print to. + * @return this Options instance. + * @param end Sets the end option. + * + * @param end the end option + * @return this Options instance. + */ + public fun print( + input: Operand, + outputStream: String? = null, + end: String? = null + ): Print = java.print( + input, + *listOfNotNull( + outputStream?.let{ org.tensorflow.op.core.Print.outputStream(it) }, + end?.let{ org.tensorflow.op.core.Print.end(it) } + ).toTypedArray() + ) + + /** + * Computes the product of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Prod` output and operands + * @return a new instance of Prod + * @see org.tensorflow.op.Ops.prod + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun prod( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Prod = java.prod( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Prod.keepDims(it) } + ).toTypedArray() + ) + + /** + * Reshapes a quantized tensor as per the Reshape op. + * + * @param data type for `output` output + * @param tensor The tensor value + * @param shape Defines the shape of the output tensor. + * @param inputMin The minimum value of the input. + * @param inputMax The maximum value of the input. + * @param data type for `QuantizedReshape` output and operands + * @return a new instance of QuantizedReshape + * @see org.tensorflow.op.Ops.quantizedReshape + */ + public fun quantizedReshape( + tensor: Operand, + shape: Operand, + inputMin: Operand, + inputMax: Operand + ): QuantizedReshape = java.quantizedReshape( + tensor, + shape, + inputMin, + inputMax + ) + + /** + * Creates a sequence of numbers. + * This operation creates a sequence of numbers that begins at `start` and + * extends by increments of `delta` up to but not including `limit`. + * + * For example: + * ``` + * # 'start' is 3 + * # 'limit' is 18 + * # 'delta' is 3 + * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + * + * ``` + * + * @param data type for `output` output + * @param start 0-D (scalar). First entry in the sequence. + * @param limit 0-D (scalar). Upper limit of sequence, exclusive. + * @param delta 0-D (scalar). Optional. Default is 1. Number that increments `start`. + * @param data type for `Range` output and operands + * @return a new instance of Range + * @see org.tensorflow.op.Ops.range + */ + public fun range( + start: Operand, + limit: Operand, + delta: Operand + ): Range = java.range( + start, + limit, + delta + ) + + /** + * Returns the rank of a tensor. + * This operation returns an integer representing the rank of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * # shape of tensor 't' is [2, 2, 3] + * rank(t) ==> 3 + * + * ``` + * + * **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank + * of a tensor is the number of indices required to uniquely select each element + * of the tensor. Rank is also known as "order", "degree", or + * "ndims." + * + * @param input The input value + * @return a new instance of Rank + * @see org.tensorflow.op.Ops.rank + */ + public fun rank(input: Operand): Rank = java.rank( + input + ) + + /** + * Reads the value of a variable. + * The tensor returned by this operation is immutable. + * + * The value returned by this operation is guaranteed to be influenced by all the + * writes on which this operation depends directly or indirectly, and to not be + * influenced by any of the writes which depend directly or indirectly on this + * operation. + * + * @param data type for `value` output + * @param resource handle to the resource in which to store the variable. + * @param dtype the dtype of the value. + * @param data type for `ReadVariableOp` output and operands + * @return a new instance of ReadVariableOp + * @see org.tensorflow.op.Ops.readVariableOp + */ + public fun readVariableOp(resource: Operand, dtype: Class): + ReadVariableOp = java.readVariableOp( + resource, + dtype + ) + + /** + * Computes the "logical and" of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @return a new instance of ReduceAll + * @see org.tensorflow.op.Ops.reduceAll + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun reduceAll( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceAll = java.reduceAll( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceAll.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the "logical or" of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @return a new instance of ReduceAny + * @see org.tensorflow.op.Ops.reduceAny + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun reduceAny( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceAny = java.reduceAny( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceAny.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the maximum of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Max` output and operands + * @return a new instance of ReduceMax + * @see org.tensorflow.op.Ops.reduceMax + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun reduceMax( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceMax = java.reduceMax( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceMax.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the minimum of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Min` output and operands + * @return a new instance of ReduceMin + * @see org.tensorflow.op.Ops.reduceMin + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun reduceMin( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceMin = java.reduceMin( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceMin.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the product of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Prod` output and operands + * @return a new instance of ReduceProd + * @see org.tensorflow.op.Ops.reduceProd + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun reduceProd( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceProd = java.reduceProd( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceProd.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the sum of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Sum` output and operands + * @return a new instance of ReduceSum + * @see org.tensorflow.op.Ops.reduceSum + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun reduceSum( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceSum = java.reduceSum( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceSum.keepDims(it) } + ).toTypedArray() + ) + + /** + * Makes its input available to the next iteration. + * + * @param data type for `output` output + * @param data The tensor to be made available to the next iteration. + * @param data type for `RefNextIteration` output and operands + * @return a new instance of RefNextIteration + * @see org.tensorflow.op.Ops.refNextIteration + */ + public fun refNextIteration(`data`: Operand): RefNextIteration = + java.refNextIteration( + data + ) + + /** + * Forwards the `index`th element of `inputs` to `output`. + * + * @param data type for `output` output + * @param index A scalar that determines the input that gets selected. + * @param inputs A list of ref tensors, one of which will be forwarded to `output`. + * @param data type for `RefSelect` output and operands + * @return a new instance of RefSelect + * @see org.tensorflow.op.Ops.refSelect + */ + public fun refSelect(index: Operand, inputs: Iterable>): + RefSelect = java.refSelect( + index, + inputs + ) + + /** + * Forwards the ref tensor `data` to the output port determined by `pred`. + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. + * + * See also `Switch` and `Merge`. + * + * @param data type for `output_false` output + * @param data The ref tensor to be forwarded to the appropriate output. + * @param pred A scalar that specifies which output port will receive data. + * @param data type for `RefSwitch` output and operands + * @return a new instance of RefSwitch + * @see org.tensorflow.op.Ops.refSwitch + */ + public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = + java.refSwitch( + data, + pred + ) + + /** + * Runs function `f` on a remote device indicated by `target`. + * + * @param target A fully specified device name where we want to run the function. + * @param args A list of arguments for the function. + * @param Tout The type list for the return values. + * @param f The function to run remotely. + * @return a new instance of RemoteCall + * @see org.tensorflow.op.Ops.remoteCall + */ + public fun remoteCall( + target: Operand, + args: Iterable>, + Tout: List>, + f: ConcreteFunction + ): RemoteCall = java.remoteCall( + target, + args, + Tout, + f + ) + + /** + * Reshapes a tensor. + * Given `tensor`, this operation returns a tensor that has the same values + * as `tensor` with shape `shape`. + * + * If one component of 1-D tensor `shape` is the special value -1, the size of that + * dimension is computed so that the total size remains constant. In particular, a + * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be + * unknown. + * + * The `shape` must be 1-D and the operation returns a tensor with shape + * `shape` filled with the values of `tensor`. In this case, the number of elements + * implied by `shape` must be the same as the number of elements in `tensor`. + * + * It is an error if `shape` is not 1-D. + * + * For example: + * ``` + * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] + * # tensor 't' has shape [9] + * reshape(t, [3, 3]) ==> [[1, 2, 3], + * [4, 5, 6], + * [7, 8, 9]] + * + * # tensor 't' is [[[1, 1], [2, 2]], + * # [[3, 3], [4, 4]]] + * # tensor 't' has shape [2, 2, 2] + * reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + * [3, 3, 4, 4]] + * + * # tensor 't' is [[[1, 1, 1], + * # [2, 2, 2]], + * # [[3, 3, 3], + * # [4, 4, 4]], + * # [[5, 5, 5], + * # [6, 6, 6]]] + * # tensor 't' has shape [3, 2, 3] + * # pass '[-1]' to flatten 't' + * reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] + * + * # -1 can also be used to infer the shape + * + * # -1 is inferred to be 9: + * reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + * [4, 4, 4, 5, 5, 5, 6, 6, 6]] + * # -1 is inferred to be 2: + * reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + * [4, 4, 4, 5, 5, 5, 6, 6, 6]] + * # -1 is inferred to be 3: + * reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + * [2, 2, 2], + * [3, 3, 3]], + * [[4, 4, 4], + * [5, 5, 5], + * [6, 6, 6]]] + * + * # tensor 't' is [7] + * # shape `[]` reshapes to a scalar + * reshape(t, []) ==> 7 + * + * ``` + * + * @param data type for `output` output + * @param tensor The tensor value + * @param shape Defines the shape of the output tensor. + * @param data type for `Reshape` output and operands + * @return a new instance of Reshape + * @see org.tensorflow.op.Ops.reshape + */ + public fun reshape(tensor: Operand, shape: Operand): Reshape = + java.reshape( + tensor, + shape + ) + + /** + * Increments variable pointed to by 'resource' until it reaches 'limit'. + * + * @param data type for `output` output + * @param resource Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @param T The value of the T attribute + * @param data type for `ResourceCountUpTo` output and operands + * @return a new instance of ResourceCountUpTo + * @see org.tensorflow.op.Ops.resourceCountUpTo + */ + public fun resourceCountUpTo( + resource: Operand, + limit: Long, + T_: Class + ): ResourceCountUpTo = java.resourceCountUpTo( + resource, + limit, + T_ + ) + + /** + * Gather slices from the variable pointed to by `resource` according to `indices`. + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + * ``` + * # Scalar indices + * output[:, ..., :] = params[indices, :, ... :] + * + * # Vector indices + * output[i, :, ..., :] = params[indices[i], :, ... :] + * + * # Higher rank indices + * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + * + * ``` + * + * @param data type for `output` output + * @param resource The resource value + * @param indices The indices value + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `ResourceGather` output and operands + * @return a new instance of ResourceGather + * @see org.tensorflow.op.Ops.resourceGather + * @param batchDims Sets the batchDims option. + * + * @param batchDims the batchDims option + * @return this Options instance. + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. + */ + public fun resourceGather( + resource: Operand, + indices: Operand, + dtype: Class, + batchDims: Long? = null, + validateIndices: Boolean? = null + ): ResourceGather = java.resourceGather( + resource, + indices, + dtype, + *listOfNotNull( + batchDims?.let{ org.tensorflow.op.core.ResourceGather.batchDims(it) }, + validateIndices?.let{ org.tensorflow.op.core.ResourceGather.validateIndices(it) } + ).toTypedArray() + ) + + /** + * The ResourceGatherNd operation + * + * @param data type for `output` output + * @param resource The resource value + * @param indices The indices value + * @param dtype The value of the dtype attribute + * @param data type for `ResourceGatherNd` output and operands + * @return a new instance of ResourceGatherNd + * @see org.tensorflow.op.Ops.resourceGatherNd + */ + public fun resourceGatherNd( + resource: Operand, + indices: Operand, + dtype: Class + ): ResourceGatherNd = java.resourceGatherNd( + resource, + indices, + dtype + ) + + /** + * Adds sparse updates to the variable referenced by `resource`. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] += updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * + * ``` + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterAdd + * @see org.tensorflow.op.Ops.resourceScatterAdd + */ + public fun resourceScatterAdd( + resource: Operand, + indices: Operand, + updates: Operand + ): ResourceScatterAdd = java.resourceScatterAdd( + resource, + indices, + updates + ) + + /** + * Divides sparse updates into the variable referenced by `resource`. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] /= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] /= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + * + * ``` + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterDiv + * @see org.tensorflow.op.Ops.resourceScatterDiv + */ + public fun resourceScatterDiv( + resource: Operand, + indices: Operand, + updates: Operand + ): ResourceScatterDiv = java.resourceScatterDiv( + resource, + indices, + updates + ) + + /** + * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) + * + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * + * ``` + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions are combined. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMax + * @see org.tensorflow.op.Ops.resourceScatterMax + */ + public fun resourceScatterMax( + resource: Operand, + indices: Operand, + updates: Operand + ): ResourceScatterMax = java.resourceScatterMax( + resource, + indices, + updates + ) + + /** + * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) + * + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * + * ``` + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions are combined. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMin + * @see org.tensorflow.op.Ops.resourceScatterMin + */ + public fun resourceScatterMin( + resource: Operand, + indices: Operand, + updates: Operand + ): ResourceScatterMin = java.resourceScatterMin( + resource, + indices, + updates + ) + + /** + * Multiplies sparse updates into the variable referenced by `resource`. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] *= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] *= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + * + * ``` + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMul + * @see org.tensorflow.op.Ops.resourceScatterMul + */ + public fun resourceScatterMul( + resource: Operand, + indices: Operand, + updates: Operand + ): ResourceScatterMul = java.resourceScatterMul( + resource, + indices, + updates + ) + + /** + * Applies sparse addition to individual values or slices in a Variable. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] + * } + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * add = tf.scatter_nd_add(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(add) + * + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, 13, 3, 14, 14, 6, 7, 20] + * + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values to add to ref. + * @param options carries optional attribute values + * @return a new instance of ResourceScatterNdAdd + * @see org.tensorflow.op.Ops.resourceScatterNdAdd + * @param useLocking Sets the useLocking option. + * + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceScatterNdAdd( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdAdd = java.resourceScatterNdAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } + ).toTypedArray() + ) + + /** + * The ResourceScatterNdMax operation + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values whose element wise max is taken with ref + * @param options carries optional attribute values + * @return a new instance of ResourceScatterNdMax + * @see org.tensorflow.op.Ops.resourceScatterNdMax + * @param useLocking Sets the useLocking option. + * + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceScatterNdMax( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdMax = java.resourceScatterNdMax( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } + ).toTypedArray() + ) + + /** + * The ResourceScatterNdMin operation + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values whose element wise min is taken with ref. + * @param options carries optional attribute values + * @return a new instance of ResourceScatterNdMin + * @see org.tensorflow.op.Ops.resourceScatterNdMin + * @param useLocking Sets the useLocking option. + * + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceScatterNdMin( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdMin = java.resourceScatterNdMin( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } + ).toTypedArray() + ) + + /** + * Applies sparse subtraction to individual values or slices in a Variable. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] + * } + * + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * sub = tf.scatter_nd_sub(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(sub) + * + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, -9, 3, -6, -4, 6, 7, -4] + * + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values to add to ref. + * @param options carries optional attribute values + * @return a new instance of ResourceScatterNdSub + * @see org.tensorflow.op.Ops.resourceScatterNdSub + * @param useLocking Sets the useLocking option. + * + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceScatterNdSub( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdSub = java.resourceScatterNdSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } + ).toTypedArray() + ) + + /** + * Applies sparse `updates` to individual values or slices within a given + * variable according to `indices`. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]]. + * } + * + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1] ,[7]]) + * updates = tf.constant([9, 10, 11, 12]) + * update = tf.scatter_nd_update(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(update) + * + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, 11, 3, 10, 9, 6, 7, 12] + * + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated + * values to add to ref. + * @param options carries optional attribute values + * @return a new instance of ResourceScatterNdUpdate + * @see org.tensorflow.op.Ops.resourceScatterNdUpdate + * @param useLocking Sets the useLocking option. + * + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceScatterNdUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } + ).toTypedArray() + ) + + /** + * Subtracts sparse updates from the variable referenced by `resource`. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] -= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] -= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + * + * ``` + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterSub + * @see org.tensorflow.op.Ops.resourceScatterSub + */ + public fun resourceScatterSub( + resource: Operand, + indices: Operand, + updates: Operand + ): ResourceScatterSub = java.resourceScatterSub( + resource, + indices, + updates + ) + + /** + * Assigns sparse updates to the variable referenced by `resource`. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] = updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] = updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * + * ``` + * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterUpdate + * @see org.tensorflow.op.Ops.resourceScatterUpdate + */ + public fun resourceScatterUpdate( + resource: Operand, + indices: Operand, + updates: Operand + ): ResourceScatterUpdate = java.resourceScatterUpdate( + resource, + indices, + updates + ) + + /** + * Assign `value` to the sliced l-value reference of `ref`. + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. + * + * @param ref The ref value + * @param begin The begin value + * @param end The end value + * @param strides The strides value + * @param value The value value + * @param options carries optional attribute values + * @param data type for `ResourceStridedSliceAssign` output and operands + * @return a new instance of ResourceStridedSliceAssign + * @see org.tensorflow.op.Ops.resourceStridedSliceAssign + * @param beginMask Sets the beginMask option. + * + * @param beginMask the beginMask option + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask the endMask option + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask the ellipsisMask option + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask the newAxisMask option + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask the shrinkAxisMask option + * @return this Options instance. + */ + public fun resourceStridedSliceAssign( + ref: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( + ref, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } + ).toTypedArray() + ) + + /** + * Reverses specific dimensions of a tensor. + * Given a `tensor`, and a `int32` tensor `axis` representing the set of + * dimensions of `tensor` to reverse. This operation reverses each dimension + * `i` for which there exists `j` s.t. `axis[j] == i`. + * + * `tensor` can have up to 8 dimensions. The number of dimensions specified + * in `axis` may be 0 or more entries. If an index is specified more than + * once, a InvalidArgument error is raised. + * + * For example: + * ``` + * # tensor 't' is [[[[ 0, 1, 2, 3], + * # [ 4, 5, 6, 7], + * # [ 8, 9, 10, 11]], + * # [[12, 13, 14, 15], + * # [16, 17, 18, 19], + * # [20, 21, 22, 23]]]] + * # tensor 't' shape is [1, 2, 3, 4] + * + * # 'dims' is [3] or 'dims' is [-1] + * reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + * [ 7, 6, 5, 4], + * [ 11, 10, 9, 8]], + * [[15, 14, 13, 12], + * [19, 18, 17, 16], + * [23, 22, 21, 20]]]] + * + * # 'dims' is '[1]' (or 'dims' is '[-3]') + * reverse(t, dims) ==> [[[[12, 13, 14, 15], + * [16, 17, 18, 19], + * [20, 21, 22, 23] + * [[ 0, 1, 2, 3], + * [ 4, 5, 6, 7], + * [ 8, 9, 10, 11]]]] + * + * # 'dims' is '[2]' (or 'dims' is '[-2]') + * reverse(t, dims) ==> [[[[8, 9, 10, 11], + * [4, 5, 6, 7], + * [0, 1, 2, 3]] + * [[20, 21, 22, 23], + * [16, 17, 18, 19], + * [12, 13, 14, 15]]]] + * + * ``` + * + * @param data type for `output` output + * @param tensor Up to 8-D. + * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range + * `[-rank(tensor), rank(tensor))`. + * @param data type for `ReverseV2` output and operands + * @return a new instance of Reverse + * @see org.tensorflow.op.Ops.reverse + */ + public fun reverse(tensor: Operand, axis: Operand): Reverse = + java.reverse( + tensor, + axis + ) + + /** + * Reverses variable length slices. + * This op first slices `input` along the dimension `batch_dim`, and for each + * slice `i`, reverses the first `seq_lengths[i]` elements along + * the dimension `seq_dim`. + * + * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, + * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + * + * The output slice `i` along dimension `batch_dim` is then given by input + * slice `i`, with the first `seq_lengths[i]` slices along dimension + * `seq_dim` reversed. + * + * For example: + * ``` + * # Given this: + * batch_dim = 0 + * seq_dim = 1 + * input.dims = (4, 8, ...) + * seq_lengths = [7, 2, 3, 5] + * + * # then slices of input are reversed on seq_dim, but only up to seq_lengths: + * output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + * output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + * output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + * output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] + * + * # while entries past seq_lens are copied through: + * output[0, 7:, :, ...] = input[0, 7:, :, ...] + * output[1, 2:, :, ...] = input[1, 2:, :, ...] + * output[2, 3:, :, ...] = input[2, 3:, :, ...] + * output[3, 2:, :, ...] = input[3, 2:, :, ...] + * + * ``` + * + * In contrast, if: + * ``` + * # Given this: + * batch_dim = 2 + * seq_dim = 0 + * input.dims = (8, ?, 4, ...) + * seq_lengths = [7, 2, 3, 5] + * + * # then slices of input are reversed on seq_dim, but only up to seq_lengths: + * output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + * output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + * output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + * output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] + * + * # while entries past seq_lens are copied through: + * output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + * + * ``` + * + * @param data type for `output` output + * @param input The input to reverse. + * @param seqLengths 1-D with length `input.dims(batch_dim)` and + * `max(seq_lengths) <= input.dims(seq_dim)` + * @param seqDim The dimension which is partially reversed. + * @param options carries optional attribute values + * @param data type for `ReverseSequence` output and operands + * @return a new instance of ReverseSequence + * @see org.tensorflow.op.Ops.reverseSequence + * @param batchDim Sets the batchDim option. + * + * @param batchDim The dimension along which reversal is performed. + * @return this Options instance. + */ + public fun reverseSequence( + input: Operand, + seqLengths: Operand, + seqDim: Long, + batchDim: Long? = null + ): ReverseSequence = java.reverseSequence( + input, + seqLengths, + seqDim, + *listOfNotNull( + batchDim?.let{ org.tensorflow.op.core.ReverseSequence.batchDim(it) } + ).toTypedArray() + ) + + /** + * Rolls the elements of a tensor along an axis. + * The elements are shifted positively (towards larger indices) by the offset of + * `shift` along the dimension of `axis`. Negative `shift` values will shift + * elements in the opposite direction. Elements that roll passed the last position + * will wrap around to the first and vice versa. Multiple shifts along multiple + * axes may be specified. + * + * For example: + * ``` + * # 't' is [0, 1, 2, 3, 4] + * roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] + * + * # shifting along multiple dimensions + * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + * roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] + * + * # shifting along the same axis multiple times + * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which + * elements are shifted positively (towards larger indices) along the dimension + * specified by `axis[i]`. Negative shifts will roll the elements in the opposite + * direction. + * @param axis Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift + * `shift[i]` should occur. If the same axis is referenced more than once, the + * total shift for that axis will be the sum of all the shifts that belong to that + * axis. + * @param data type for `Roll` output and operands + * @return a new instance of Roll + * @see org.tensorflow.op.Ops.roll + */ + public fun roll( + input: Operand, + shift: Operand, + axis: Operand + ): Roll = java.roll( + input, + shift, + axis + ) + + /** + * Adds sparse updates to a variable reference. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] += updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @param options carries optional attribute values + * @param data type for `ScatterAdd` output and operands + * @return a new instance of ScatterAdd + * @see org.tensorflow.op.Ops.scatterAdd + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the addition will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterAdd( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterAdd = java.scatterAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterAdd.useLocking(it) } + ).toTypedArray() + ) + + /** + * Divides a variable reference by sparse updates. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] /= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] /= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + * + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions divide. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of values that `ref` is divided by. + * @param options carries optional attribute values + * @param data type for `ScatterDiv` output and operands + * @return a new instance of ScatterDiv + * @see org.tensorflow.op.Ops.scatterDiv + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the operation will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterDiv( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterDiv = java.scatterDiv( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterDiv.useLocking(it) } + ).toTypedArray() + ) + + /** + * Reduces sparse updates into a variable reference using the `max` operation. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) + * + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions combine. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. + * @param options carries optional attribute values + * @param data type for `ScatterMax` output and operands + * @return a new instance of ScatterMax + * @see org.tensorflow.op.Ops.scatterMax + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the update will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterMax( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMax = java.scatterMax( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterMax.useLocking(it) } + ).toTypedArray() + ) + + /** + * Reduces sparse updates into a variable reference using the `min` operation. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) + * + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions combine. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. + * @param options carries optional attribute values + * @param data type for `ScatterMin` output and operands + * @return a new instance of ScatterMin + * @see org.tensorflow.op.Ops.scatterMin + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the update will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterMin( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMin = java.scatterMin( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterMin.useLocking(it) } + ).toTypedArray() + ) + + /** + * Multiplies sparse updates into a variable reference. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] *= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] *= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + * + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to multiply to `ref`. + * @param options carries optional attribute values + * @param data type for `ScatterMul` output and operands + * @return a new instance of ScatterMul + * @see org.tensorflow.op.Ops.scatterMul + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the operation will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterMul( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMul = java.scatterMul( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterMul.useLocking(it) } + ).toTypedArray() + ) + + /** + * Scatters `updates` into a tensor of shape `shape` according to `indices`. + * Update the input tensor by scattering sparse `updates` according to individual values at the + * specified `indices`. + * This op returns an `output` tensor with the `shape` you specify. This op is the + * inverse of the `tf.gather_nd` operator which extracts values or slices from a + * given tensor. + * + * This operation is similar to `tf.tensor_scatter_add`, except that the tensor is + * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` + * is identical to calling + * `tf.tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`. + * + * If `indices` contains duplicates, the duplicate `values` are accumulated + * (summed). + * + * **WARNING**: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if `indices` contains duplicates; + * numbers summed in different order may yield different results because of some + * numerical approximation issues. + * + * `indices` is an integer tensor of shape `shape`. The last dimension + * of `indices` can be at most the rank of `shape`: + * ``` + * indices.shape[-1] <= shape.rank + * + * ``` + * + * The last dimension of `indices` corresponds to indices of elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. + * + * `updates` is a tensor with shape: + * ``` + * indices.shape[:-1] + shape[indices.shape[-1]:] + * + * ``` + * + * The simplest form of the scatter op is to insert individual elements in + * a tensor by index. Consider an example where you want to insert 4 scattered + * elements in a rank-1 tensor with 8 elements. + *
+ * + *
+ * + * In Python, this scatter operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * shape = tf.constant([8]) + * scatter = tf.scatter_nd(indices, updates, shape) + * print(scatter) + * + * ``` + * + * The resulting tensor would look like this: + * ``` + * [0, 11, 0, 10, 9, 0, 0, 12] + * + * ``` + * + * You can also insert entire slices of a higher rank tensor all at once. For + * example, you can insert two slices in the first dimension of a rank-3 tensor + * with two matrices of new values. + *
+ * + *
+ * + * In Python, this scatter operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * shape = tf.constant([4, 4, 4]) + * scatter = tf.scatter_nd(indices, updates, shape) + * print(scatter) + * + * ``` + * + * The resulting tensor would look like this: + * ``` + * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + * + * ``` + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. + * + * @param data type for `output` output + * @param indices Tensor of indices. + * @param updates Values to scatter into the output tensor. + * @param shape 1-D. The shape of the output tensor. + * @param data type for `ScatterNd` output and operands + * @param data type for `ScatterNd` output and operands + * @return a new instance of ScatterNd + * @see org.tensorflow.op.Ops.scatterNd + */ + public fun scatterNd( + indices: Operand, + updates: Operand, + shape: Operand + ): ScatterNd = java.scatterNd( + indices, + updates, + shape + ) + + /** + * Applies sparse addition to individual values or slices in a Variable. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] + * } + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * add = tf.scatter_nd_add(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(add) + * + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, 13, 3, 14, 14, 6, 7, 20] + * + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param data type for `output_ref` output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to ref. + * @param options carries optional attribute values + * @param data type for `ScatterNdAdd` output and operands + * @return a new instance of ScatterNdAdd + * @see org.tensorflow.op.Ops.scatterNdAdd + * @param useLocking Sets the useLocking option. + * + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterNdAdd( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdAdd = java.scatterNdAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } + ).toTypedArray() + ) + + /** + * Applies sparse addition to `input` using individual values or slices + * from `updates` according to indices `indices`. The updates are non-aliasing: + * `input` is only modified in-place if no other operations will use it. + * Otherwise, a copy of `input` is made. This operation has a gradient with + * respect to both `input` and `updates`. + * + * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `input`. + * It must be shape `\([d_0, ..., d_{Q-2}, K]\)` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or `(P-K)`-dimensional slices + * (if `K < P`) along the `K`th dimension of `input`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * + * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + * elements. In Python, that addition would look like this: + * ``` + * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + * with tf.Session() as sess: + * print(sess.run(output)) + * + * ``` + * + * The resulting value `output` would look like this: + * ``` + * [1, 13, 3, 14, 14, 6, 7, 20] + * + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to slices. + * + * @param data type for `output` output + * @param input A Tensor. + * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. + * A tensor of indices into `input`. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to `input`. + * @param data type for `ScatterNdNonAliasingAdd` output and operands + * @return a new instance of ScatterNdNonAliasingAdd + * @see org.tensorflow.op.Ops.scatterNdNonAliasingAdd + */ + public fun scatterNdNonAliasingAdd( + input: Operand, + indices: Operand, + updates: Operand + ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( + input, + indices, + updates + ) + + /** + * Applies sparse subtraction to individual values or slices in a Variable. + * within a given variable according to `indices`. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] + * } + * + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * sub = tf.scatter_nd_sub(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(sub) + * + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, -9, 3, -6, -4, 6, 7, -4] + * + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param data type for `output_ref` output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to subtract from ref. + * @param options carries optional attribute values + * @param data type for `ScatterNdSub` output and operands + * @return a new instance of ScatterNdSub + * @see org.tensorflow.op.Ops.scatterNdSub + * @param useLocking Sets the useLocking option. + * + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterNdSub( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdSub = java.scatterNdSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterNdSub.useLocking(it) } + ).toTypedArray() + ) + + /** + * Applies sparse `updates` to individual values or slices within a given + * variable according to `indices`. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `\([d_0, ..., d_{Q-2}, K]\)` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * + * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ + * + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1] ,[7]]) + * updates = tf.constant([9, 10, 11, 12]) + * update = tf.scatter_nd_update(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(update) + * + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, 11, 3, 10, 9, 6, 7, 12] + * + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * See also `tf.scatter_update` and `tf.batch_scatter_update`. + * + * @param data type for `output_ref` output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated + * values to add to ref. + * @param options carries optional attribute values + * @param data type for `ScatterNdUpdate` output and operands + * @return a new instance of ScatterNdUpdate + * @see org.tensorflow.op.Ops.scatterNdUpdate + * @param useLocking Sets the useLocking option. + * + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterNdUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdUpdate = java.scatterNdUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } + ).toTypedArray() + ) + + /** + * Subtracts sparse updates to a variable reference. + * ``` + * # Scalar indices + * ref[indices, ...] -= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] -= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + * + * ``` + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their (negated) contributions add. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to subtract from `ref`. + * @param options carries optional attribute values + * @param data type for `ScatterSub` output and operands + * @return a new instance of ScatterSub + * @see org.tensorflow.op.Ops.scatterSub + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterSub( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterSub = java.scatterSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterSub.useLocking(it) } + ).toTypedArray() + ) + + /** + * Applies sparse updates to a variable reference. + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] = updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] = updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * If values in `ref` is to be updated more than once, because there are + * duplicate entries in `indices`, the order at which the updates happen + * for each value is undefined. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *
+ * + *
+ * + * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to store in `ref`. + * @param options carries optional attribute values + * @param data type for `ScatterUpdate` output and operands + * @return a new instance of ScatterUpdate + * @see org.tensorflow.op.Ops.scatterUpdate + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the assignment will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun scatterUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterUpdate = java.scatterUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterUpdate.useLocking(it) } + ).toTypedArray() + ) + + /** + * The SelectV2 operation + * + * @param data type for `output` output + * @param condition The condition value + * @param t The t value + * @param e The e value + * @param data type for `SelectV2` output and operands + * @return a new instance of Select + * @see org.tensorflow.op.Ops.select + */ + public fun select( + condition: Operand, + t: Operand, + e: Operand + ): Select = java.select( + condition, + t, + e + ) + + /** + * Computes the difference between two lists of numbers or strings. + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + * + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] + * + * ``` + * + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] + * + * ``` + * + * @param data type for `out` output + * @param data type for `idx` output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @param data type for `ListDiff` output and operands + * @return a new instance of SetDiff1d, with default output types + * @see org.tensorflow.op.Ops.setDiff1d + */ + public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = + java.setDiff1d( + x, + y + ) + + /** + * Computes the difference between two lists of numbers or strings. + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + * + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] + * + * ``` + * + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] + * + * ``` + * + * @param data type for `out` output + * @param data type for `idx` output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @param outIdx The value of the outIdx attribute + * @param data type for `ListDiff` output and operands + * @param data type for `ListDiff` output and operands + * @return a new instance of SetDiff1d + * @see org.tensorflow.op.Ops.setDiff1d + */ + public fun setDiff1d( + x: Operand, + y: Operand, + outIdx: Class + ): SetDiff1d = java.setDiff1d( + x, + y, + outIdx + ) + + /** + * Number of unique elements along last dimension of input `set`. + * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, + * and `set_shape`. The last dimension contains values in a set, duplicates are + * allowed but ignored. + * + * If `validate_indices` is `True`, this op validates the order and range of `set` + * indices. + * + * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. + * @param setValues 1D `Tensor`, values of a `SparseTensor`. + * @param setShape 1D `Tensor`, shape of a `SparseTensor`. + * @param options carries optional attribute values + * @return a new instance of SetSize + * @see org.tensorflow.op.Ops.setSize + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. + */ + public fun setSize( + setIndices: Operand, + setValues: Operand, + setShape: Operand, + validateIndices: Boolean? = null + ): SetSize = java.setSize( + setIndices, + setValues, + setShape, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.core.SetSize.validateIndices(it) } + ).toTypedArray() + ) + + /** + * Returns the shape of a tensor. + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @return a new instance of Shape, with default output types + * @see org.tensorflow.op.Ops.shape + */ + public fun shape(input: Operand): org.tensorflow.op.core.Shape = java.shape( + input + ) + + /** + * Returns the shape of a tensor. + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param outType The value of the outType attribute + * @param data type for `Shape` output and operands + * @return a new instance of Shape + * @see org.tensorflow.op.Ops.shape + */ + public fun shape(input: Operand, outType: Class): + org.tensorflow.op.core.Shape = java.shape( + input, + outType + ) + + /** + * Returns shape of tensors. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param data type for `output` output + * @param input The input value + * @return a new instance of ShapeN, with default output types + * @see org.tensorflow.op.Ops.shapeN + */ + public fun shapeN(input: Iterable>): ShapeN = java.shapeN( + input + ) + + /** + * Returns shape of tensors. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param data type for `output` output + * @param input The input value + * @param outType The value of the outType attribute + * @param data type for `ShapeN` output and operands + * @return a new instance of ShapeN + * @see org.tensorflow.op.Ops.shapeN + */ + public fun shapeN(input: Iterable>, outType: Class): + ShapeN = java.shapeN( + input, + outType + ) + + /** + * Returns the size of a tensor. + * This operation returns an integer representing the number of elements in + * `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @return a new instance of Size, with default output types + * @see org.tensorflow.op.Ops.size + */ + public fun size(input: Operand): Size = java.size( + input + ) + + /** + * Returns the size of a tensor. + * This operation returns an integer representing the number of elements in + * `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param outType The value of the outType attribute + * @param data type for `Size` output and operands + * @return a new instance of Size + * @see org.tensorflow.op.Ops.size + */ + public fun size(input: Operand, outType: Class): Size = + java.size( + input, + outType + ) + + /** + * Parses a text file and creates a batch of examples. + * + * @param filename The corpus's text file name. + * @param batchSize The size of produced batch. + * @param options carries optional attribute values + * @return a new instance of Skipgram + * @see org.tensorflow.op.Ops.skipgram + * @param windowSize Sets the windowSize option. + * + * @param windowSize The number of words to predict to the left and right of the target. + * @return this Options instance. + * @param minCount Sets the minCount option. + * + * @param minCount The minimum number of word occurrences for it to be included in the + * vocabulary. + * @return this Options instance. + * @param subsample Sets the subsample option. + * + * @param subsample Threshold for word occurrence. Words that appear with higher + * frequency will be randomly down-sampled. Set to 0 to disable. + * @return this Options instance. + */ + public fun skipgram( + filename: String, + batchSize: Long, + windowSize: Long? = null, + minCount: Long? = null, + subsample: Float? = null + ): Skipgram = java.skipgram( + filename, + batchSize, + *listOfNotNull( + windowSize?.let{ org.tensorflow.op.core.Skipgram.windowSize(it) }, + minCount?.let{ org.tensorflow.op.core.Skipgram.minCount(it) }, + subsample?.let{ org.tensorflow.op.core.Skipgram.subsample(it) } + ).toTypedArray() + ) + + /** + * Return a slice from 'input'. + * The output tensor is a tensor with dimensions described by 'size' + * whose values are extracted from 'input' starting at the offsets in + * 'begin'. + * + * _Requirements_: + * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + * + * @param data type for `output` output + * @param input The input value + * @param begin begin[i] specifies the offset into the 'i'th dimension of + * 'input' to slice from. + * @param sizeOutput size[i] specifies the number of elements of the 'i'th dimension + * of 'input' to slice. If size[i] is -1, all remaining elements in dimension + * i are included in the slice (i.e. this is equivalent to setting + * size[i] = input.dim_size(i) - begin[i]). + * @param data type for `Slice` output and operands + * @param data type for `Slice` output and operands + * @return a new instance of Slice + * @see org.tensorflow.op.Ops.slice + */ + public fun slice( + input: Operand, + begin: Operand, + sizeOutput: Operand + ): Slice = java.slice( + input, + begin, + sizeOutput + ) + + /** + * Returns a copy of the input tensor. + * + * @param data type for `output` output + * @param input The input value + * @param data type for `Snapshot` output and operands + * @return a new instance of Snapshot + * @see org.tensorflow.op.Ops.snapshot + */ + public fun snapshot(input: Operand): Snapshot = java.snapshot( + input + ) + + /** + * SpaceToBatch for N-D tensors of type T. + * This operation divides "spatial" dimensions `[1, ..., M]` of the input + * into a + * grid of blocks of shape `block_shape`, and interleaves these blocks with the + * "batch" dimension (0) such that in the output, the spatial dimensions + * `[1, ..., M]` correspond to the position within the grid, and the batch + * dimension combines both the position within a spatial block and the original + * batch position. Prior to division into blocks, the spatial dimensions of the + * input are optionally zero padded according to `paddings`. See below for a + * precise description. + * + * This operation is equivalent to the following steps: + *
    + *
  1. + * + * Zero-pad the start and end of dimensions `[1, ..., M]` of the + * input according to `paddings` to produce `padded` of shape `padded_shape`. + *
  2. + *
  3. + * + * Reshape `padded` to `reshaped_padded` of shape: + * + * [batch] + + * [padded_shape[1] / block_shape[0], + * block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1], + * block_shape[M-1]] + + * remaining_shape + *
  4. + *
  5. + * + * Permute dimensions of `reshaped_padded` to produce + * `permuted_reshaped_padded` of shape: + * + * block_shape + + * [batch] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + *
  6. + *
  7. + * + * Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch + * dimension, producing an output tensor of shape: + * + * [batch * prod(block_shape)] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + *
  8. + *
+ * + * Some examples: + * + * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, + * and + * `paddings = [[0, 0], [0, 0]]`: + * ``` + * x = [[[[1], [2]], [[3], [4]]]] + * + * ``` + * + * The output tensor has shape `[4, 1, 1, 1]` and value: + * ``` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * + * ``` + * + * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, + * and + * `paddings = [[0, 0], [0, 0]]`: + * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * + * ``` + * + * The output tensor has shape `[4, 1, 1, 3]` and value: + * ``` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + * + * ``` + * + * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, + * and + * `paddings = [[0, 0], [0, 0]]`: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * ``` + * + * The output tensor has shape `[4, 2, 2, 1]` and value: + * ``` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] + * + * ``` + * + * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, + * and + * paddings = `[[0, 0], [2, 0]]`: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * ``` + * + * The output tensor has shape `[8, 1, 3, 1]` and value: + * ``` + * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + * [[[0], [2], [4]]], [[[0], [10], [12]]], + * [[[0], [5], [7]]], [[[0], [13], [15]]], + * [[[0], [6], [8]]], [[[0], [14], [16]]]] + * + * ``` + * + * Among others, this operation is useful for reducing atrous convolution into + * regular convolution. + * + * @param data type for `output` output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + + * remaining_shape`, + * where spatial_shape has `M` dimensions. + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. + * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + * `i + 1`, which corresponds to spatial dimension `i`. It is required that + * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. + * @param data type for `SpaceToBatchND` output and operands + * @return a new instance of SpaceToBatchNd + * @see org.tensorflow.op.Ops.spaceToBatchNd + */ + public fun spaceToBatchNd( + input: Operand, + blockShape: Operand, + paddings: Operand + ): SpaceToBatchNd = java.spaceToBatchNd( + input, + blockShape, + paddings + ) + + /** + * Splits a tensor into `num_split` tensors along one dimension. + * + * @param data type for `output` output + * @param axis 0-D. The dimension along which to split. Must be in the range + * `[-rank(value), rank(value))`. + * @param value The tensor to split. + * @param numSplit The number of ways to split. Must evenly divide + * `value.shape[split_dim]`. + * @param data type for `Split` output and operands + * @return a new instance of Split + * @see org.tensorflow.op.Ops.split + */ + public fun split( + axis: Operand, + value: Operand, + numSplit: Long + ): Split = java.split( + axis, + value, + numSplit + ) + + /** + * Splits a tensor into `num_split` tensors along one dimension. + * + * @param data type for `output` output + * @param value The tensor to split. + * @param sizeSplits list containing the sizes of each output tensor along the split + * dimension. Must sum to the dimension of value along split_dim. + * Can contain one -1 indicating that dimension is to be inferred. + * @param axis 0-D. The dimension along which to split. Must be in the range + * `[-rank(value), rank(value))`. + * @param numSplit The value of the numSplit attribute + * @param data type for `SplitV` output and operands + * @return a new instance of SplitV + * @see org.tensorflow.op.Ops.splitV + */ + public fun splitV( + value: Operand, + sizeSplits: Operand, + axis: Operand, + numSplit: Long + ): SplitV = java.splitV( + value, + sizeSplits, + axis, + numSplit + ) + + /** + * Removes dimensions of size 1 from the shape of a tensor. + * Given a tensor `input`, this operation returns a tensor of the same type with + * all dimensions of size 1 removed. If you don't want to remove all size 1 + * dimensions, you can remove specific size 1 dimensions by specifying + * `axis`. + * + * For example: + * ``` + * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + * shape(squeeze(t)) ==> [2, 3] + * + * ``` + * + * Or, to remove specific size 1 dimensions: + * ``` + * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + * + * ``` + * + * @param data type for `output` output + * @param input The `input` to squeeze. + * @param options carries optional attribute values + * @param data type for `Squeeze` output and operands + * @return a new instance of Squeeze + * @see org.tensorflow.op.Ops.squeeze + * @param axis Sets the axis option. + * + * @param axis If specified, only squeezes the dimensions listed. The dimension + * index starts at 0. It is an error to squeeze a dimension that is not 1. Must + * be in the range `[-rank(input), rank(input))`. + * @return this Options instance. + */ + public fun squeeze(input: Operand, axis: List? = null): Squeeze = + java.squeeze( + input, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Squeeze.axis(it) } + ).toTypedArray() + ) + + /** + * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. + * Packs the `N` tensors in `values` into a tensor with rank one higher than each + * tensor in `values`, by packing them along the `axis` dimension. + * Given a list of tensors of shape `(A, B, C)`; + * + * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. + * Etc. + * + * For example: + * ``` + * # 'x' is [1, 4] + * # 'y' is [2, 5] + * # 'z' is [3, 6] + * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + * + * ``` + * + * This is the opposite of `unpack`. + * + * @param data type for `output` output + * @param values Must be of same shape and type. + * @param options carries optional attribute values + * @param data type for `Pack` output and operands + * @return a new instance of Stack + * @see org.tensorflow.op.Ops.stack + * @param axis Sets the axis option. + * + * @param axis Dimension along which to pack. Negative values wrap around, so the + * valid range is `[-(R+1), R+1)`. + * @return this Options instance. + */ + public fun stack(values: Iterable>, axis: Long? = null): Stack = + java.stack( + values, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Stack.axis(it) } + ).toTypedArray() + ) + + /** + * Stage values similar to a lightweight Enqueue. + * The basic functionality of this Op is similar to a queue with many + * fewer capabilities and options. This Op is optimized for performance. + * + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param options carries optional attribute values + * @return a new instance of Stage + * @see org.tensorflow.op.Ops.stage + * @param capacity Sets the capacity option. + * + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * on the container will block when the capacity is reached. + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit The maximum number of bytes allowed for Tensors in the Staging Area. + * If > 0, inserts will block until sufficient space is available. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container If non-empty, this queue is placed in the given container. Otherwise, + * a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName It is necessary to match this name to the matching Unstage Op. + * @return this Options instance. + */ + public fun stage( + values: Iterable>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): Stage = java.stage( + values, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.Stage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.Stage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.Stage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Stage.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op removes all elements in the underlying container. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of StageClear + * @see org.tensorflow.op.Ops.stageClear + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun stageClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StageClear = java.stageClear( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.StageClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StageClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StageClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StageClear.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op peeks at the values at the specified index. If the + * underlying container does not contain sufficient elements + * this op will block until it does. This Op is optimized for + * performance. + * + * @param index The index value + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of StagePeek + * @see org.tensorflow.op.Ops.stagePeek + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun stagePeek( + index: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StagePeek = java.stagePeek( + index, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.StagePeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StagePeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StagePeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StagePeek.sharedName(it) } + ).toTypedArray() + ) + + /** + * Op returns the number of elements in the underlying container. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of StageSize + * @see org.tensorflow.op.Ops.stageSize + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun stageSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StageSize = java.stageSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.StageSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StageSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StageSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StageSize.sharedName(it) } + ).toTypedArray() + ) + + /** + * An n-way switch statement which calls a single branch function. + * ``` + * An n-way switch statement, implementing the following: + * ``` + * switch (branch_index) { + * case 0: + * output = branches[0](input); + * break; + * case 1: + * output = branches[1](input); + * break; + * ... + * case [[nbranches-1]]: + * default: + * output = branches[nbranches-1](input); + * break; + * + * ``` + * ``` + * } + * + * @param branchIndex The branch selector, an int32 Tensor. + * @param input A list of input tensors passed to the branch function. + * @param Tout A list of output types. + * @param branches ` + * A list of functions each of which takes 'inputs' and returns a list of + * tensors, whose types are the same as what every other branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatefulCase + * @see org.tensorflow.op.Ops.statefulCase + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun statefulCase( + branchIndex: Operand, + input: Iterable>, + Tout: List>, + branches: List, + outputShapes: List? = null + ): StatefulCase = java.statefulCase( + branchIndex, + input, + Tout, + branches, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.Case.outputShapes(it) } + ).toTypedArray() + ) + + /** + * output = cond ? then_branch(input) : else_branch(input) + * + * @param cond ` + * A Tensor. If the tensor is a scalar of non-boolean type, the + * scalar is converted to a boolean according to the + * following rule: if the scalar is a numerical value, non-zero means + * `True` and zero means False; if the scalar is a string, non-empty + * means `True` and empty means `False`. If the tensor is not a scalar, + * being empty means False and being non-empty means True. + * + * ` + * @param input A list of input tensors. + * @param Tout A list of output types. + * @param thenBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what else_branch returns. + * + * ` + * @param elseBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what then_branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatefulIf + * @see org.tensorflow.op.Ops.statefulIf + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun statefulIf( + cond: Operand, + input: Iterable>, + Tout: List>, + thenBranch: ConcreteFunction, + elseBranch: ConcreteFunction, + outputShapes: List? = null + ): StatefulIf = java.statefulIf( + cond, + input, + Tout, + thenBranch, + elseBranch, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.If.outputShapes(it) } + ).toTypedArray() + ) + + /** + * returns `f(inputs)`, where `f`'s body is placed and partitioned. + * + * @param args A list of input tensors. + * @param Tout A list of output types. + * @param f ` + * A function that takes 'args', a list of tensors, and returns 'output', + * another list of tensors. Input and output types are specified by 'Tin' + * and 'Tout'. The function body of f will be placed and partitioned across + * devices, setting this op apart from the regular Call op. This op is + * stateful. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatefulPartitionedCall + * @see org.tensorflow.op.Ops.statefulPartitionedCall + * @param config Sets the config option. + * + * @param config the config option + * @return this Options instance. + * @param configProto Sets the configProto option. + * + * @param configProto the configProto option + * @return this Options instance. + * @param executorType Sets the executorType option. + * + * @param executorType the executorType option + * @return this Options instance. + */ + public fun statefulPartitionedCall( + args: Iterable>, + Tout: List>, + f: ConcreteFunction, + config: String? = null, + configProto: String? = null, + executorType: String? = null + ): StatefulPartitionedCall = java.statefulPartitionedCall( + args, + Tout, + f, + *listOfNotNull( + config?.let{ org.tensorflow.op.core.PartitionedCall.config(it) }, + configProto?.let{ org.tensorflow.op.core.PartitionedCall.configProto(it) }, + executorType?.let{ org.tensorflow.op.core.PartitionedCall.executorType(it) } + ).toTypedArray() + ) + + /** + * output = input; While (Cond(output)) { output = Body(output) } + * + * @param input A list of input tensors whose types are T. + * @param cond ` + * A function takes 'input' and returns a tensor. If the tensor is + * a scalar of non-boolean, the scalar is converted to a boolean + * according to the following rule: if the scalar is a numerical + * value, non-zero means True and zero means False; if the scalar is + * a string, non-empty means True and empty means False. If the + * tensor is not a scalar, non-emptiness means True and False + * otherwise. + * + * ` + * @param body ` + * A function that takes a list of tensors and returns another + * list of tensors. Both lists have the same types as specified + * by T. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatefulWhile + * @see org.tensorflow.op.Ops.statefulWhile + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + * @param parallelIterations Sets the parallelIterations option. + * + * @param parallelIterations the parallelIterations option + * @return this Options instance. + */ + public fun statefulWhile( + input: Iterable>, + cond: ConcreteFunction, + body: ConcreteFunction, + outputShapes: List? = null, + parallelIterations: Long? = null + ): StatefulWhile = java.statefulWhile( + input, + cond, + body, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.While.outputShapes(it) }, + parallelIterations?.let{ org.tensorflow.op.core.While.parallelIterations(it) } + ).toTypedArray() + ) + + /** + * output = cond ? then_branch(input) : else_branch(input) + * + * @param cond ` + * A Tensor. If the tensor is a scalar of non-boolean type, the + * scalar is converted to a boolean according to the + * following rule: if the scalar is a numerical value, non-zero means + * `True` and zero means False; if the scalar is a string, non-empty + * means `True` and empty means `False`. If the tensor is not a scalar, + * being empty means False and being non-empty means True. + * + * This should only be used when the if then/else body functions do not + * have stateful ops. + * + * ` + * @param input A list of input tensors. + * @param Tout A list of output types. + * @param thenBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what else_branch returns. + * + * ` + * @param elseBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what then_branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatelessIf + * @see org.tensorflow.op.Ops.statelessIf + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun statelessIf( + cond: Operand, + input: Iterable>, + Tout: List>, + thenBranch: ConcreteFunction, + elseBranch: ConcreteFunction, + outputShapes: List? = null + ): StatelessIf = java.statelessIf( + cond, + input, + Tout, + thenBranch, + elseBranch, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.If.outputShapes(it) } + ).toTypedArray() + ) + + /** + * returns `f(inputs)`, where `f`'s body is placed and partitioned. + * Asynchronously executes a function, potentially across multiple devices but + * within a single process. The kernel places and partitions a given function's + * underlying graph, and executes each of the partitioned subgraphs as a function. + * + * @param args A list of input tensors. + * @param Tout A list of output types. + * @param f ` + * A function that takes 'args', a list of tensors, and returns 'output', + * another list of tensors. Input and output types are specified by 'Tin' + * and 'Tout'. The function body of f will be placed and partitioned across + * devices, setting this op apart from the regular Call op. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatelessPartitionedCall + * @see org.tensorflow.op.Ops.statelessPartitionedCall + * @param config Sets the config option. + * + * @param config the config option + * @return this Options instance. + * @param configProto Sets the configProto option. + * + * @param configProto the configProto option + * @return this Options instance. + * @param executorType Sets the executorType option. + * + * @param executorType the executorType option + * @return this Options instance. + */ + public fun statelessPartitionedCall( + args: Iterable>, + Tout: List>, + f: ConcreteFunction, + config: String? = null, + configProto: String? = null, + executorType: String? = null + ): StatelessPartitionedCall = java.statelessPartitionedCall( + args, + Tout, + f, + *listOfNotNull( + config?.let{ org.tensorflow.op.core.PartitionedCall.config(it) }, + configProto?.let{ org.tensorflow.op.core.PartitionedCall.configProto(it) }, + executorType?.let{ org.tensorflow.op.core.PartitionedCall.executorType(it) } + ).toTypedArray() + ) + + /** + * output = input; While (Cond(output)) { output = Body(output) } + * + * @param input A list of input tensors whose types are T. + * @param cond ` + * A function takes 'input' and returns a tensor. If the tensor is + * a scalar of non-boolean, the scalar is converted to a boolean + * according to the following rule: if the scalar is a numerical + * value, non-zero means True and zero means False; if the scalar is + * a string, non-empty means True and empty means False. If the + * tensor is not a scalar, non-emptiness means True and False + * otherwise. + * + * This should only be used when the while condition and body functions + * do not have stateful ops. + * + * ` + * @param body ` + * A function that takes a list of tensors and returns another + * list of tensors. Both lists have the same types as specified + * by T. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatelessWhile + * @see org.tensorflow.op.Ops.statelessWhile + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + * @param parallelIterations Sets the parallelIterations option. + * + * @param parallelIterations the parallelIterations option + * @return this Options instance. + */ + public fun statelessWhile( + input: Iterable>, + cond: ConcreteFunction, + body: ConcreteFunction, + outputShapes: List? = null, + parallelIterations: Long? = null + ): StatelessWhile = java.statelessWhile( + input, + cond, + body, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.While.outputShapes(it) }, + parallelIterations?.let{ org.tensorflow.op.core.While.parallelIterations(it) } + ).toTypedArray() + ) + + /** + * Stops gradient computation. + * When executed in a graph, this op outputs its input tensor as-is. + * + * When building ops to compute gradients, this op prevents the contribution of + * its inputs to be taken into account. Normally, the gradient generator adds ops + * to a graph to compute the derivatives of a specified 'loss' by recursively + * finding out inputs that contributed to its computation. If you insert this op + * in the graph it inputs are masked from the gradient generator. They are not + * taken into account for computing gradients. + * + * This is useful any time you want to compute a value with TensorFlow but need + * to pretend that the value was a constant. For example, the softmax function + * for a vector x can be written as + * ``` + * def softmax(x): + * numerator = tf.exp(x) + * denominator = tf.reduce_sum(numerator) + * return numerator / denominator + * + * ``` + * + * This however is susceptible to overflow if the values in x are large. An + * alternative more stable way is to subtract the maximum of x from each of the + * values. + * ``` + * def stable_softmax(x): + * z = x - tf.reduce_max(x) + * numerator = tf.exp(z) + * denominator = tf.reduce_sum(numerator) + * return numerator / denominator + * + * ``` + * + * However, when we backprop through the softmax to x, we dont want to backprop + * through the `tf.reduce_max(x)` (if the max values are not unique then the + * gradient could flow to the wrong input) calculation and treat that as a + * constant. Therefore, we should write this out as + * ``` + * def stable_softmax(x): + * z = x - tf.stop_gradient(tf.reduce_max(x)) + * numerator = tf.exp(z) + * denominator = tf.reduce_sum(numerator) + * return numerator / denominator + * + * ``` + * + * Some other examples include: + *
    + *
  • The _EM_ algorithm where the _M-step_ should not involve backpropagation + * through the output of the _E-step_.
  • + *
  • Contrastive divergence training of Boltzmann machines where, when + * differentiating the energy function, the training must not backpropagate + * through the graph that generated the samples from the model.
  • + *
  • Adversarial training, where no backprop should happen through the adversarial + * example generation process.
  • + *
+ * + * @param data type for `output` output + * @param input The input value + * @param data type for `StopGradient` output and operands + * @return a new instance of StopGradient + * @see org.tensorflow.op.Ops.stopGradient + */ + public fun stopGradient(input: Operand): StopGradient = java.stopGradient( + input + ) + + /** + * Return a strided slice from `input`. + * + * + * The goal of this op is to produce a new tensor with a subset of the elements from the `n` + * dimensional `input` + * tensor. The subset is chosen using a sequence of `m` sparse range specifications encoded + * into the arguments of this + * function. Note, in some cases `m` could be equal to `n`, but this need not be the case. Each + * range specification + * entry can be one of the following: + * + * + * - An ellipsis (...) using [Indices.ellipsis]. Ellipses are used to imply zero or more + * dimensions of + * full-dimension selection. For example, `stridedSlice(foo, Indices.ellipsis()` is the + * identity slice. + * + * + * - A new axis using [Indices.newAxis]. This is used to insert a new shape=1 dimension. + * For example, ``stridedSlice(foo, Indices.newAxis())` where `foo` is shape `(3, 4)` + * produces a `(1, 3, 4)` tensor. + * + * + * - A range `begin:end:stride` using [Long,][Indices.slice] Index.slice()} or [Indices.all]. + * This is used to specify + * how much to choose from a given dimension. `stride` can be any integer but 0. `begin` is an + * integer which + * represents the index of the first value to select while `end` represents the index of the + * last value to select + * (exclusive). Begin and end can be null, in which case the index begins or ends at the + * beginning or end of the dimension, + * respectively (reversed if stride is negative). When both are null, `slice()` is the same as + * `all()`. + * The number of values selected in each dimension is `end - begin` if `stride > 0` and + * `begin - end` + * if `stride < 0`. `begin` and `end` can be negative where `-1` is the last element, `-2` + * is the second to last. For example, given a shape `(3,)` tensor `stridedSlice(foo, + * Indices.all())`, the + * effective `begin` and `end` are `0` and `3`. Do not assume this is equivalent to + * `stridedSlice(foo, Indices.slice(0, -1))` which has an effective `begin` and `end` of `0` + * and + * `2`. Another example is `stridedSlice(foo, Indices.slice(-2, null, -1))` which reverses the + * first dimension + * of a tensor while dropping the last two (in the original order elements). For example ``` + * foo = [1,2,3,4]; + * stridedSlice(foo, Indices.slice(-2, null, -1) + * ``` is `[4,3]`. + * + * + * - A single index using [Indices.at]. This is used to keep only elements that have a given + * index. For + * example (`stridedSlice(foo, Indices.at(2))` on a shape `(5,6)` tensor produces a shape + * `(6,)` tensor. + * The dimension can be kept with size one using [boolean)][Indices.at]. + * + * + * These semantics generally follow NumPy's indexing semantics, which can be found + * here:[https://numpy.org/doc/stable/reference/arrays.indexing.html](https://numpy.org/doc/stable/reference/arrays.indexing.html) + * + * + * + * _Requirements_: + * `0 != strides[i] for i in [0, m)` Only one ellipsis. + * + * @param data type for `output()` output + * @param indices The indices to slice. See [Indices]. + * @return a new instance of StridedSlice + * @see Indices + * @see org.tensorflow.op.Ops.stridedSlice + */ + public fun stridedSlice(input: Operand, vararg indices: Index): StridedSlice = + java.stridedSlice( + input, + *indices + ) + + /** + * Return a strided slice from `input`. + * Note, most python users will want to use the Python `Tensor.__getitem__` + * or `Variable.__getitem__` rather than this op directly. + * + * The goal of this op is to produce a new tensor with a subset of + * the elements from the `n` dimensional `input` tensor. The subset is chosen using + * a sequence of `m` sparse range specifications encoded into the arguments + * of this function. Note, in some cases + * `m` could be equal to `n`, but this need not be the case. Each + * range specification entry can be one of the following: + *
    + *
  • + * + * An ellipsis (...). Ellipses are used to imply zero or more + * dimensions of full-dimension selection and are produced using + * `ellipsis_mask`. For example, `foo[...]` is the identity slice. + *
  • + *
  • + * + * A new axis. This is used to insert a new shape=1 dimension and is + * produced using `new_axis_mask`. For example, `foo[:, ...]` where + * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. + *
  • + *
  • + * + * A range `begin:end:stride`. This is used to specify how much to choose from + * a given dimension. `stride` can be any integer but 0. `begin` is an integer + * which represents the index of the first value to select while `end` represents + * the index of the last value to select. The number of values selected in each + * dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. + * `begin` and `end` can be negative where `-1` is the last element, `-2` is + * the second to last. `begin_mask` controls whether to replace the explicitly + * given `begin` with an implicit effective value of `0` if `stride > 0` and + * `-1` if `stride < 0`. `end_mask` is analogous but produces the number + * required to create the largest open interval. For example, given a shape + * `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do + * not assume this is equivalent to `foo[0:-1]` which has an effective `begin` + * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the + * first dimension of a tensor while dropping the last two (in the original + * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is + * `[4,3]`. + *
  • + *
  • + * + * A single index. This is used to keep only elements that have a given + * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a + * shape `(6,)` tensor. This is encoded in `begin` and `end` and + * `shrink_axis_mask`. + *
  • + *
+ * + * Each conceptual range specification is encoded in the op's argument. This + * encoding is best understand by considering a non-trivial example. In + * particular, + * `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as + * ``` + * begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) + * end = [2, 4, x, x, -3, x] + * strides = [1, 1, x, x, -1, 1] + * begin_mask = 1<<4 | 1<<5 = 48 + * end_mask = 1<<5 = 32 + * ellipsis_mask = 1<<3 = 8 + * new_axis_mask = 1<<2 = 4 + * shrink_axis_mask = 1<<0 = 1 + * + * ``` + * + * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of + * the slice becomes (2, 1, 5, 5, 2, 5). + * Let us walk step by step through each argument specification. + *
    + *
  1. + * + * The first argument in the example slice is turned into `begin = 1` and + * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we + * also set the appropriate bit in `shrink_axis_mask`. + *
  2. + *
  3. + * + * `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have + * zero bits contributed. + *
  4. + *
  5. + * + * None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 + * dimension in the final shape. Dummy values are contributed to begin, + * end and stride, while the new_axis_mask bit is set. + *
  6. + *
  7. + * + * `...` grab the full ranges from as many dimensions as needed to + * fully specify a slice for every dimension of the input shape. + *
  8. + *
  9. + * + * `:-3:-1` shows the use of negative indices. A negative index `i` associated + * with a dimension that has shape `s` is converted to a positive index + * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion + * is done internally so begin, end and strides receive x, -3, and -1. + * The appropriate begin_mask bit is set to indicate the start range is the + * full range (ignoring the x). + *
  10. + *
  11. + * + * `:` indicates that the entire contents of the corresponding dimension + * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides + * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and + * `end_mask` are also set. + *
  12. + *
+ * + * _Requirements_: + * `0 != strides[i] for i in [0, m)` + * `ellipsis_mask must be a power of two (only one ellipsis)` + * + * @param data type for `output` output + * @param input The input value + * @param begin `begin[k]` specifies the offset into the `k`th range specification. + * The exact dimension this corresponds to will be determined by context. + * Out-of-bounds values will be silently clamped. If the `k`th bit of + * `begin_mask` then `begin[k]` is ignored and the full range of the + * appropriate dimension is used instead. Negative values causes indexing + * to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. + * @param end `end[i]` is like `begin` with the exception that `end_mask` is + * used to determine full ranges. + * @param strides `strides[i]` specifies the increment in the `i`th specification + * after extracting a given element. Negative indices will reverse + * the original order. Out or range values are + * clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` + * @param options carries optional attribute values + * @param data type for `StridedSlice` output and operands + * @param data type for `StridedSlice` output and operands + * @return a new instance of StridedSlice + * @see org.tensorflow.op.Ops.stridedSlice + * @param beginMask Sets the beginMask option. + * + * @param beginMask a bitmask where a bit i being 1 means to ignore the begin + * value and instead use the largest interval possible. At runtime + * begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or + * `[-1, n-1]` if `stride[i] < 0` + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask analogous to `begin_mask` + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask a bitmask where bit `i` being 1 means the `i`th + * position is actually an ellipsis. One bit at most can be 1. + * If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` + * is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis + * implicitly creates as many range specifications as necessary to fully + * specify the sliced range for every dimension. For example for a 4-dimensional + * tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask a bitmask where bit `i` being 1 means the `i`th + * specification creates a new shape 1 dimension. For example + * `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask a bitmask where bit `i` implies that the `i`th + * specification should shrink the dimensionality. begin and end + * must imply a slice of size 1 in the dimension. For example in + * python one might do `foo[:, 3, :]` which would result in + * `shrink_axis_mask` being 2. + * @return this Options instance. + */ + public fun stridedSlice( + input: Operand, + begin: Operand, + end: Operand, + strides: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSlice = java.stridedSlice( + input, + begin, + end, + strides, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.StridedSlice.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSlice.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } + ).toTypedArray() + ) + + /** + * Assign `value` to the sliced l-value reference of `ref`. + * + * + * The values of `value` are assigned to the positions in the variable `ref` that are selected + * by the slice + * parameters. The slice parameters `begin`, `end`, `strides`, etc. work exactly as in + * `StridedSlice`. + * + * + * NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly + * the shape produced by + * the slice of `ref`. + * + * @param data type for `outputRef()` output + * @param ref the tensor to assign to. + * @param value the value to assign. + * @param indices The indices to slice. See [Indices]. + * @return a new instance of StridedSliceAssign + * @see org.tensorflow.op.Ops.stridedSlice + * @see org.tensorflow.op.Ops.stridedSliceAssign + */ + public fun stridedSliceAssign( + ref: Operand, + value: Operand, + vararg indices: Index + ): StridedSliceAssign = java.stridedSliceAssign( + ref, + value, + *indices + ) + + /** + * Assign `value` to the sliced l-value reference of `ref`. + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. + * + * @param data type for `output_ref` output + * @param ref The ref value + * @param begin The begin value + * @param end The end value + * @param strides The strides value + * @param value The value value + * @param options carries optional attribute values + * @param data type for `StridedSliceAssign` output and operands + * @param data type for `StridedSliceAssign` output and operands + * @return a new instance of StridedSliceAssign + * @see org.tensorflow.op.Ops.stridedSliceAssign + * @param beginMask Sets the beginMask option. + * + * @param beginMask the beginMask option + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask the endMask option + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask the ellipsisMask option + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask the newAxisMask option + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask the shrinkAxisMask option + * @return this Options instance. + */ + public fun stridedSliceAssign( + ref: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSliceAssign = java.stridedSliceAssign( + ref, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } + ).toTypedArray() + ) + + /** + * Returns the gradient of `StridedSlice`. + * Since `StridedSlice` cuts out pieces of its `input` which is size + * `shape`, its gradient will have the same shape (which is passed here + * as `shape`). The gradient will be zero in any element that the slice + * does not select. + * + * Arguments are the same as StridedSliceGrad with the exception that + * `dy` is the input gradient to be propagated and `shape` is the + * shape of `StridedSlice`'s `input`. + * + * @param data type for `output` output + * @param shape The shape value + * @param begin The begin value + * @param end The end value + * @param strides The strides value + * @param dy The dy value + * @param options carries optional attribute values + * @param data type for `StridedSliceGrad` output and operands + * @param data type for `StridedSliceGrad` output and operands + * @return a new instance of StridedSliceGrad + * @see org.tensorflow.op.Ops.stridedSliceGrad + * @param beginMask Sets the beginMask option. + * + * @param beginMask the beginMask option + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask the endMask option + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask the ellipsisMask option + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask the newAxisMask option + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask the shrinkAxisMask option + * @return this Options instance. + */ + public fun stridedSliceGrad( + shape: Operand, + begin: Operand, + end: Operand, + strides: Operand, + dy: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSliceGrad = java.stridedSliceGrad( + shape, + begin, + end, + strides, + dy, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } + ).toTypedArray() + ) + + /** + * Computes the sum of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Sum` output and operands + * @return a new instance of Sum + * @see org.tensorflow.op.Ops.sum + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun sum( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Sum = java.sum( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Sum.keepDims(it) } + ).toTypedArray() + ) + + /** + * Forwards `data` to the output port determined by `pred`. + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. + * + * See also `RefSwitch` and `Merge`. + * + * @param data type for `output_false` output + * @param data The tensor to be forwarded to the appropriate output. + * @param pred A scalar that specifies which output port will receive data. + * @param data type for `Switch` output and operands + * @return a new instance of SwitchCond + * @see org.tensorflow.op.Ops.switchCond + */ + public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = + java.switchCond( + data, + pred + ) + + /** + * Returns a tensor that may be mutated, but only persists within a single step. + * This is an experimental op for internal use only and it is possible to use this + * op in unsafe ways. DO NOT USE unless you fully understand the risks. + * + * It is the caller's responsibility to ensure that 'ref' is eventually passed to a + * matching 'DestroyTemporaryVariable' op after all other uses have completed. + * + * Outputs a ref to the tensor state so it may be read or modified. + * + * E.g. + * var = state_ops._temporary_variable([1, 2], types.float_) + * var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * + * @param data type for `ref` output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. + * @param options carries optional attribute values + * @param data type for `TemporaryVariable` output and operands + * @return a new instance of TemporaryVariable + * @see org.tensorflow.op.Ops.temporaryVariable + * @param varName Sets the varName option. + * + * @param varName Overrides the name used for the temporary variable resource. Default + * value is the name of the 'TemporaryVariable' op (which is guaranteed unique). + * @return this Options instance. + */ + public fun temporaryVariable( + shape: Shape, + dtype: Class, + varName: String? = null + ): TemporaryVariable = java.temporaryVariable( + shape, + dtype, + *listOfNotNull( + varName?.let{ org.tensorflow.op.core.TemporaryVariable.varName(it) } + ).toTypedArray() + ) + + /** + * An array of Tensors of given size. + * Write data via Write and read via Read or Pack. + * + * @param sizeOutput The size of the array. + * @param dtype The type of the elements on the tensor_array. + * @param options carries optional attribute values + * @param data type for `TensorArrayV3` output and operands + * @return a new instance of TensorArray + * @see org.tensorflow.op.Ops.tensorArray + * @param elementShape Sets the elementShape option. + * + * @param elementShape The expected shape of an element, if known. Used to + * validate the shapes of TensorArray elements. If this shape is not + * fully specified, gathering zero-size TensorArrays is an error. + * @return this Options instance. + * @param dynamicSize Sets the dynamicSize option. + * + * @param dynamicSize A boolean that determines whether writes to the TensorArray + * are allowed to grow the size. By default, this is not allowed. + * @return this Options instance. + * @param clearAfterRead Sets the clearAfterRead option. + * + * @param clearAfterRead If true (default), Tensors in the TensorArray are cleared + * after being read. This disables multiple read semantics but allows early + * release of memory. + * @return this Options instance. + * @param identicalElementShapes Sets the identicalElementShapes option. + * + * @param identicalElementShapes If true (default is false), then all + * elements in the TensorArray will be expected to have identical shapes. + * This allows certain behaviors, like dynamically checking for + * consistent shapes on write, and being able to fill in properly + * shaped zero tensors on stack -- even if the element_shape attribute + * is not fully defined. + * @return this Options instance. + * @param tensorArrayName Sets the tensorArrayName option. + * + * @param tensorArrayName Overrides the name used for the temporary tensor_array + * resource. Default value is the name of the 'TensorArray' op (which + * is guaranteed unique). + * @return this Options instance. + */ + public fun tensorArray( + sizeOutput: Operand, + dtype: Class, + elementShape: Shape? = null, + dynamicSize: Boolean? = null, + clearAfterRead: Boolean? = null, + identicalElementShapes: Boolean? = null, + tensorArrayName: String? = null + ): TensorArray = java.tensorArray( + sizeOutput, + dtype, + *listOfNotNull( + elementShape?.let{ org.tensorflow.op.core.TensorArray.elementShape(it) }, + dynamicSize?.let{ org.tensorflow.op.core.TensorArray.dynamicSize(it) }, + clearAfterRead?.let{ org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, + identicalElementShapes?.let{ org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, + tensorArrayName?.let{ org.tensorflow.op.core.TensorArray.tensorArrayName(it) } + ).toTypedArray() + ) + + /** + * Delete the TensorArray from its resource container. + * This enables the user to close and release the resource in the middle + * of a step/run. + * + * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + * @return a new instance of TensorArrayClose + * @see org.tensorflow.op.Ops.tensorArrayClose + */ + public fun tensorArrayClose(handle: Operand): TensorArrayClose = + java.tensorArrayClose( + handle + ) + + /** + * Concat the elements from the TensorArray into value `value`. + * Takes `T` elements of shapes + * ``` + * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + * + * ``` + * + * and concatenates them into a Tensor of shape: + * + * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)` + * + * All elements must have the same shape (excepting the first dimension). + * + * @param data type for `value` output + * @param handle The handle to a TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attribute values + * @param data type for `TensorArrayConcatV3` output and operands + * @return a new instance of TensorArrayConcat + * @see org.tensorflow.op.Ops.tensorArrayConcat + * @param elementShapeExcept0 Sets the elementShapeExcept0 option. + * + * @param elementShapeExcept0 The expected shape of an element, if known, + * excluding the first dimension. Used to validate the shapes of + * TensorArray elements. If this shape is not fully specified, concatenating + * zero-size TensorArrays is an error. + * @return this Options instance. + */ + public fun tensorArrayConcat( + handle: Operand, + flowIn: Operand, + dtype: Class, + elementShapeExcept0: Shape? = null + ): TensorArrayConcat = java.tensorArrayConcat( + handle, + flowIn, + dtype, + *listOfNotNull( + elementShapeExcept0?.let{ org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } + ).toTypedArray() + ) + + /** + * Gather specific elements from the TensorArray into output `value`. + * All elements selected by `indices` must have the same shape. + * + * @param data type for `value` output + * @param handle The handle to a TensorArray. + * @param indices The locations in the TensorArray from which to read tensor elements. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attribute values + * @param data type for `TensorArrayGatherV3` output and operands + * @return a new instance of TensorArrayGather + * @see org.tensorflow.op.Ops.tensorArrayGather + * @param elementShape Sets the elementShape option. + * + * @param elementShape The expected shape of an element, if known. Used to + * validate the shapes of TensorArray elements. If this shape is not + * fully specified, gathering zero-size TensorArrays is an error. + * @return this Options instance. + */ + public fun tensorArrayGather( + handle: Operand, + indices: Operand, + flowIn: Operand, + dtype: Class, + elementShape: Shape? = null + ): TensorArrayGather = java.tensorArrayGather( + handle, + indices, + flowIn, + dtype, + *listOfNotNull( + elementShape?.let{ org.tensorflow.op.core.TensorArrayGather.elementShape(it) } + ).toTypedArray() + ) + + /** + * Creates a TensorArray for storing the gradients of values in the given handle. + * If the given TensorArray gradient already exists, returns a reference to it. + * + * Locks the size of the original TensorArray by disabling its dynamic size flag. + * + * **A note about the input flow_in:** + * + * The handle flow_in forces the execution of the gradient lookup to occur + * only after certain other operations have occurred. For example, when + * the forward TensorArray is dynamically sized, writes to this TensorArray + * may resize the object. The gradient TensorArray is statically sized based + * on the size of the forward TensorArray when this operation executes. + * Furthermore, the size of the forward TensorArray is frozen by this call. + * As a result, the flow is used to ensure that the call to generate the gradient + * TensorArray only happens after all writes are executed. + * + * In the case of dynamically sized TensorArrays, gradient computation should + * only be performed on read operations that have themselves been chained via + * flow to occur only after all writes have executed. That way the final size + * of the forward TensorArray is known when this operation is called. + * + * **A note about the source attribute:** + * + * TensorArray gradient calls use an accumulator TensorArray object. If + * multiple gradients are calculated and run in the same session, the multiple + * gradient nodes may accidentally flow through the same accumulator TensorArray. + * This double counts and generally breaks the TensorArray gradient flow. + * + * The solution is to identify which gradient call this particular + * TensorArray gradient is being called in. This is performed by identifying + * a unique string (e.g. "gradients", "gradients_1", ...) from the input + * gradient Tensor's name. This string is used as a suffix when creating + * the TensorArray gradient object here (the attribute `source`). + * + * The attribute `source` is added as a suffix to the forward TensorArray's + * name when performing the creation / lookup, so that each separate gradient + * calculation gets its own TensorArray accumulator. + * + * @param handle The handle to the forward TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param source The gradient source string, used to decide which gradient TensorArray + * to return. + * @return a new instance of TensorArrayGrad + * @see org.tensorflow.op.Ops.tensorArrayGrad + */ + public fun tensorArrayGrad( + handle: Operand, + flowIn: Operand, + source: String + ): TensorArrayGrad = java.tensorArrayGrad( + handle, + flowIn, + source + ) + + /** + * Creates a TensorArray for storing multiple gradients of values in the given handle. + * Similar to TensorArrayGradV3. However it creates an accumulator with an + * expanded shape compared to the input TensorArray whose gradient is being + * computed. This enables multiple gradients for the same TensorArray to be + * calculated using the same accumulator. + * + * @param handle The handle to the forward TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient + * accumulator will + * have shape which is this shape_to_prepend value concatenated with shape of the + * elements in the TensorArray corresponding to the input handle. + * @param source The gradient source string, used to decide which gradient TensorArray + * to return. + * @return a new instance of TensorArrayGradWithShape + * @see org.tensorflow.op.Ops.tensorArrayGradWithShape + */ + public fun tensorArrayGradWithShape( + handle: Operand, + flowIn: Operand, + shapeToPrepend: Operand, + source: String + ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( + handle, + flowIn, + shapeToPrepend, + source + ) + + /** + * The TensorArrayPack operation + * + * @param data type for `value` output + * @param handle The handle value + * @param flowIn The flowIn value + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `TensorArrayPack` output and operands + * @return a new instance of TensorArrayPack + * @see org.tensorflow.op.Ops.tensorArrayPack + * @param elementShape Sets the elementShape option. + * + * @param elementShape the elementShape option + * @return this Options instance. + */ + public fun tensorArrayPack( + handle: Operand, + flowIn: Operand, + dtype: Class, + elementShape: Shape? = null + ): TensorArrayPack = java.tensorArrayPack( + handle, + flowIn, + dtype, + *listOfNotNull( + elementShape?.let{ org.tensorflow.op.core.TensorArrayPack.elementShape(it) } + ).toTypedArray() + ) + + /** + * Read an element from the TensorArray into output `value`. + * + * @param data type for `value` output + * @param handle The handle to a TensorArray. + * @param index The index value + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param data type for `TensorArrayReadV3` output and operands + * @return a new instance of TensorArrayRead + * @see org.tensorflow.op.Ops.tensorArrayRead + */ + public fun tensorArrayRead( + handle: Operand, + index: Operand, + flowIn: Operand, + dtype: Class + ): TensorArrayRead = java.tensorArrayRead( + handle, + index, + flowIn, + dtype + ) + + /** + * Scatter the data from the input value into specific TensorArray elements. + * `indices` must be a vector, its length must match the first dim of `value`. + * + * @param handle The handle to a TensorArray. + * @param indices The locations at which to write the tensor elements. + * @param value The concatenated tensor to write to the TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArrayScatter + * @see org.tensorflow.op.Ops.tensorArrayScatter + */ + public fun tensorArrayScatter( + handle: Operand, + indices: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayScatter = java.tensorArrayScatter( + handle, + indices, + value, + flowIn + ) + + /** + * Get the current size of the TensorArray. + * + * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArraySize + * @see org.tensorflow.op.Ops.tensorArraySize + */ + public fun tensorArraySize(handle: Operand, flowIn: Operand): + TensorArraySize = java.tensorArraySize( + handle, + flowIn + ) + + /** + * Split the data from the input value into TensorArray elements. + * Assuming that `lengths` takes on values + * + * `(n0, n1, ..., n(T-1))` + * + * and that `value` has shape + * + * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)`, + * + * this splits values into a TensorArray with T tensors. + * + * TensorArray index t will be the subtensor of values with starting position + * + * `(n0 + n1 + ... + n(t-1), 0, 0, ...)` + * + * and having size + * + * `nt x d0 x d1 x ...` + * + * @param handle The handle to a TensorArray. + * @param value The concatenated tensor to write to the TensorArray. + * @param lengths The vector of lengths, how to split the rows of value into the + * TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArraySplit + * @see org.tensorflow.op.Ops.tensorArraySplit + */ + public fun tensorArraySplit( + handle: Operand, + value: Operand, + lengths: Operand, + flowIn: Operand + ): TensorArraySplit = java.tensorArraySplit( + handle, + value, + lengths, + flowIn + ) + + /** + * The TensorArrayUnpack operation + * + * @param handle The handle value + * @param value The value value + * @param flowIn The flowIn value + * @return a new instance of TensorArrayUnpack + * @see org.tensorflow.op.Ops.tensorArrayUnpack + */ + public fun tensorArrayUnpack( + handle: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayUnpack = java.tensorArrayUnpack( + handle, + value, + flowIn + ) + + /** + * Push an element onto the tensor_array. + * + * @param handle The handle to a TensorArray. + * @param index The position to write to inside the TensorArray. + * @param value The tensor to write to the TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArrayWrite + * @see org.tensorflow.op.Ops.tensorArrayWrite + */ + public fun tensorArrayWrite( + handle: Operand, + index: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayWrite = java.tensorArrayWrite( + handle, + index, + value, + flowIn + ) + + /** + * Concats all tensors in the list along the 0th dimension. + * Requires that all tensors have the same shape except the first dimension. + * + * input_handle: The input list. + * element_shape: The shape of the uninitialized elements in the list. If the first + * dimension is not -1, it is assumed that all list elements have the same + * leading dim. + * leading_dims: The list of leading dims of uninitialized list elements. Used if + * the leading dim of input_handle.element_shape or the element_shape input arg + * is not already set. + * tensor: The concated result. + * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used + * for computing the gradient. + * + * @param data type for `tensor` output + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param leadingDims The leadingDims value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListConcatV2` output and operands + * @return a new instance of TensorListConcat + * @see org.tensorflow.op.Ops.tensorListConcat + */ + public fun tensorListConcat( + inputHandle: Operand, + elementShape: Operand, + leadingDims: Operand, + elementDtype: Class + ): TensorListConcat = java.tensorListConcat( + inputHandle, + elementShape, + leadingDims, + elementDtype + ) + + /** + * The TensorListConcatLists operation + * + * @param inputA The inputA value + * @param inputB The inputB value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListConcatLists` output and operands + * @return a new instance of TensorListConcatLists + * @see org.tensorflow.op.Ops.tensorListConcatLists + */ + public fun tensorListConcatLists( + inputA: Operand, + inputB: Operand, + elementDtype: Class + ): TensorListConcatLists = java.tensorListConcatLists( + inputA, + inputB, + elementDtype + ) + + /** + * The shape of the elements of the given list, as a tensor. + * input_handle: the list + * element_shape: the shape of elements of the list + * + * @param data type for `element_shape` output + * @param inputHandle The inputHandle value + * @param shapeType The value of the shapeType attribute + * @param data type for `TensorListElementShape` output and operands + * @return a new instance of TensorListElementShape + * @see org.tensorflow.op.Ops.tensorListElementShape + */ + public fun tensorListElementShape(inputHandle: Operand, + shapeType: Class): TensorListElementShape = java.tensorListElementShape( + inputHandle, + shapeType + ) + + /** + * Creates a TensorList which, when stacked, has the value of `tensor`. + * Each tensor in the result list corresponds to one row of the input tensor. + * + * tensor: The input tensor. + * output_handle: The list. + * + * @param tensor The tensor value + * @param elementShape The elementShape value + * @return a new instance of TensorListFromTensor + * @see org.tensorflow.op.Ops.tensorListFromTensor + */ + public fun tensorListFromTensor(tensor: Operand, elementShape: Operand): + TensorListFromTensor = java.tensorListFromTensor( + tensor, + elementShape + ) + + /** + * Creates a Tensor by indexing into the TensorList. + * Each row in the produced Tensor corresponds to the element in the TensorList + * specified by the given index (see `tf.gather`). + * + * input_handle: The input tensor list. + * indices: The indices used to index into the list. + * values: The tensor. + * + * @param data type for `values` output + * @param inputHandle The inputHandle value + * @param indices The indices value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListGather` output and operands + * @return a new instance of TensorListGather + * @see org.tensorflow.op.Ops.tensorListGather + */ + public fun tensorListGather( + inputHandle: Operand, + indices: Operand, + elementShape: Operand, + elementDtype: Class + ): TensorListGather = java.tensorListGather( + inputHandle, + indices, + elementShape, + elementDtype + ) + + /** + * The TensorListGetItem operation + * + * @param data type for `item` output + * @param inputHandle The inputHandle value + * @param index The index value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListGetItem` output and operands + * @return a new instance of TensorListGetItem + * @see org.tensorflow.op.Ops.tensorListGetItem + */ + public fun tensorListGetItem( + inputHandle: Operand, + index: Operand, + elementShape: Operand, + elementDtype: Class + ): TensorListGetItem = java.tensorListGetItem( + inputHandle, + index, + elementShape, + elementDtype + ) + + /** + * Returns the number of tensors in the input tensor list. + * input_handle: the input list + * length: the number of tensors in the list + * + * @param inputHandle The inputHandle value + * @return a new instance of TensorListLength + * @see org.tensorflow.op.Ops.tensorListLength + */ + public fun tensorListLength(inputHandle: Operand): TensorListLength = + java.tensorListLength( + inputHandle + ) + + /** + * Returns the last element of the input list as well as a list with all but that element. + * Fails if the list is empty. + * + * input_handle: the input list + * tensor: the withdrawn last element of the list + * element_dtype: the type of elements in the list + * element_shape: the shape of the output tensor + * + * @param data type for `tensor` output + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListPopBack` output and operands + * @return a new instance of TensorListPopBack + * @see org.tensorflow.op.Ops.tensorListPopBack + */ + public fun tensorListPopBack( + inputHandle: Operand, + elementShape: Operand, + elementDtype: Class + ): TensorListPopBack = java.tensorListPopBack( + inputHandle, + elementShape, + elementDtype + ) + + /** + * Returns a list which has the passed-in `Tensor` as last element and the other elements of the + * given list in `input_handle`. + * tensor: The tensor to put on the list. + * input_handle: The old list. + * output_handle: A list with the elements of the old list followed by tensor. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. + * + * @param inputHandle The inputHandle value + * @param tensor The tensor value + * @return a new instance of TensorListPushBack + * @see org.tensorflow.op.Ops.tensorListPushBack + */ + public fun tensorListPushBack(inputHandle: Operand, tensor: Operand): + TensorListPushBack = java.tensorListPushBack( + inputHandle, + tensor + ) + + /** + * The TensorListPushBackBatch operation + * + * @param inputHandles The inputHandles value + * @param tensor The tensor value + * @return a new instance of TensorListPushBackBatch + * @see org.tensorflow.op.Ops.tensorListPushBackBatch + */ + public fun tensorListPushBackBatch(inputHandles: Operand, tensor: Operand): TensorListPushBackBatch = java.tensorListPushBackBatch( + inputHandles, + tensor + ) + + /** + * List of the given size with empty elements. + * element_shape: the shape of the future elements of the list + * num_elements: the number of elements to reserve + * handle: the output list + * element_dtype: the desired type of elements in the list. + * + * @param elementShape The elementShape value + * @param numElements The numElements value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListReserve` output and operands + * @return a new instance of TensorListReserve + * @see org.tensorflow.op.Ops.tensorListReserve + */ + public fun tensorListReserve( + elementShape: Operand, + numElements: Operand, + elementDtype: Class + ): TensorListReserve = java.tensorListReserve( + elementShape, + numElements, + elementDtype + ) + + /** + * Resizes the list. + * input_handle: the input list + * size: size of the output list + * + * @param inputHandle The inputHandle value + * @param sizeOutput The sizeOutput value + * @return a new instance of TensorListResize + * @see org.tensorflow.op.Ops.tensorListResize + */ + public fun tensorListResize(inputHandle: Operand, sizeOutput: Operand): + TensorListResize = java.tensorListResize( + inputHandle, + sizeOutput + ) + + /** + * Creates a TensorList by indexing into a Tensor. + * Each member of the TensorList corresponds to one row of the input tensor, + * specified by the given index (see `tf.gather`). + * + * tensor: The input tensor. + * indices: The indices used to index into the list. + * element_shape: The shape of the elements in the list (can be less specified than + * the shape of the tensor). + * num_elements: The size of the output list. Must be large enough to accommodate + * the largest index in indices. If -1, the list is just large enough to include + * the largest index in indices. + * output_handle: The TensorList. + * + * @param tensor The tensor value + * @param indices The indices value + * @param elementShape The elementShape value + * @param numElements The numElements value + * @return a new instance of TensorListScatter + * @see org.tensorflow.op.Ops.tensorListScatter + */ + public fun tensorListScatter( + tensor: Operand, + indices: Operand, + elementShape: Operand, + numElements: Operand + ): TensorListScatter = java.tensorListScatter( + tensor, + indices, + elementShape, + numElements + ) + + /** + * Scatters tensor at indices in an input list. + * Each member of the TensorList corresponds to one row of the input tensor, + * specified by the given index (see `tf.gather`). + * + * input_handle: The list to scatter into. + * tensor: The input tensor. + * indices: The indices used to index into the list. + * output_handle: The TensorList. + * + * @param inputHandle The inputHandle value + * @param tensor The tensor value + * @param indices The indices value + * @return a new instance of TensorListScatterIntoExistingList + * @see org.tensorflow.op.Ops.tensorListScatterIntoExistingList + */ + public fun tensorListScatterIntoExistingList( + inputHandle: Operand, + tensor: Operand, + indices: Operand + ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( + inputHandle, + tensor, + indices + ) + + /** + * The TensorListSetItem operation + * + * @param inputHandle The inputHandle value + * @param index The index value + * @param item The item value + * @return a new instance of TensorListSetItem + * @see org.tensorflow.op.Ops.tensorListSetItem + */ + public fun tensorListSetItem( + inputHandle: Operand, + index: Operand, + item: Operand + ): TensorListSetItem = java.tensorListSetItem( + inputHandle, + index, + item + ) + + /** + * Splits a tensor into a list. + * list[i] corresponds to lengths[i] tensors from the input tensor. + * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. + * + * tensor: The input tensor. + * element_shape: A shape compatible with that of elements in the tensor. + * lengths: Vector of sizes of the 0th dimension of tensors in the list. + * output_handle: The list. + * + * @param tensor The tensor value + * @param elementShape The elementShape value + * @param lengths The lengths value + * @return a new instance of TensorListSplit + * @see org.tensorflow.op.Ops.tensorListSplit + */ + public fun tensorListSplit( + tensor: Operand, + elementShape: Operand, + lengths: Operand + ): TensorListSplit = java.tensorListSplit( + tensor, + elementShape, + lengths + ) + + /** + * Stacks all tensors in the list. + * Requires that all tensors have the same shape. + * + * input_handle: the input list + * tensor: the gathered result + * num_elements: optional. If not -1, the number of elements in the list. + * + * @param data type for `tensor` output + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute + * @param options carries optional attribute values + * @param data type for `TensorListStack` output and operands + * @return a new instance of TensorListStack + * @see org.tensorflow.op.Ops.tensorListStack + * @param numElements Sets the numElements option. + * + * @param numElements the numElements option + * @return this Options instance. + */ + public fun tensorListStack( + inputHandle: Operand, + elementShape: Operand, + elementDtype: Class, + numElements: Long? = null + ): TensorListStack = java.tensorListStack( + inputHandle, + elementShape, + elementDtype, + *listOfNotNull( + numElements?.let{ org.tensorflow.op.core.TensorListStack.numElements(it) } + ).toTypedArray() + ) + + /** + * Returns a tensor map with item from given key erased. + * input_handle: the original map + * output_handle: the map with value from given key removed + * key: the key of the value to be erased + * + * @param inputHandle The inputHandle value + * @param key The key value + * @param valueDtype The value of the valueDtype attribute + * @param data type for `TensorMapErase` output and operands + * @return a new instance of TensorMapErase + * @see org.tensorflow.op.Ops.tensorMapErase + */ + public fun tensorMapErase( + inputHandle: Operand, + key: Operand, + valueDtype: Class + ): TensorMapErase = java.tensorMapErase( + inputHandle, + key, + valueDtype + ) + + /** + * Returns whether the given key exists in the map. + * input_handle: the input map + * key: the key to check + * has_key: whether the key is already in the map or not + * + * @param inputHandle The inputHandle value + * @param key The key value + * @return a new instance of TensorMapHasKey + * @see org.tensorflow.op.Ops.tensorMapHasKey + */ + public fun tensorMapHasKey(inputHandle: Operand, key: Operand): + TensorMapHasKey = java.tensorMapHasKey( + inputHandle, + key + ) + + /** + * Returns a map that is the 'input_handle' with the given key-value pair inserted. + * input_handle: the original map + * output_handle: the map with key and value inserted + * key: the key to be inserted + * value: the value to be inserted + * + * @param inputHandle The inputHandle value + * @param key The key value + * @param value The value value + * @return a new instance of TensorMapInsert + * @see org.tensorflow.op.Ops.tensorMapInsert + */ + public fun tensorMapInsert( + inputHandle: Operand, + key: Operand, + value: Operand + ): TensorMapInsert = java.tensorMapInsert( + inputHandle, + key, + value + ) + + /** + * Returns the value from a given key in a tensor map. + * input_handle: the input map + * key: the key to be looked up + * value: the value found from the given key + * + * @param data type for `value` output + * @param inputHandle The inputHandle value + * @param key The key value + * @param valueDtype The value of the valueDtype attribute + * @param data type for `TensorMapLookup` output and operands + * @return a new instance of TensorMapLookup + * @see org.tensorflow.op.Ops.tensorMapLookup + */ + public fun tensorMapLookup( + inputHandle: Operand, + key: Operand, + valueDtype: Class + ): TensorMapLookup = java.tensorMapLookup( + inputHandle, + key, + valueDtype + ) + + /** + * Returns the number of tensors in the input tensor map. + * input_handle: the input map + * size: the number of tensors in the map + * + * @param inputHandle The inputHandle value + * @return a new instance of TensorMapSize + * @see org.tensorflow.op.Ops.tensorMapSize + */ + public fun tensorMapSize(inputHandle: Operand): TensorMapSize = java.tensorMapSize( + inputHandle + ) + + /** + * Returns a Tensor stack of all keys in a tensor map. + * input_handle: the input map + * keys: the returned Tensor of all keys in the map + * + * @param data type for `keys` output + * @param inputHandle The inputHandle value + * @param keyDtype The value of the keyDtype attribute + * @param data type for `TensorMapStackKeys` output and operands + * @return a new instance of TensorMapStackKeys + * @see org.tensorflow.op.Ops.tensorMapStackKeys + */ + public fun tensorMapStackKeys(inputHandle: Operand, keyDtype: Class): + TensorMapStackKeys = java.tensorMapStackKeys( + inputHandle, + keyDtype + ) + + /** + * Adds sparse `updates` to an existing tensor according to `indices`. + * This operation creates a new tensor by adding sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the updates + * are added onto an existing tensor (as opposed to a variable). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. + * + * `indices` is an integer tensor containing indices into a new tensor of shape + * `tensor.shape`. The last dimension of `indices` can be at most the rank of + * `tensor.shape`: + * ``` + * indices.shape[-1] <= tensor.shape.rank + * + * ``` + * + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = tensor.shape.rank`) or slices + * (if `indices.shape[-1] < tensor.shape.rank`) along dimension + * `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape + * ``` + * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * + * ``` + * + * The simplest form of tensor_scatter_add is to add individual elements to a + * tensor by index. For example, say we want to add 4 elements in a rank-1 + * tensor with 8 elements. + * + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * tensor = tf.ones([8], dtype=tf.int32) + * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + * print(updated) + * + * ``` + * + * The resulting tensor would look like this: + * ``` + * [1, 12, 1, 11, 10, 1, 1, 13] + * + * ``` + * + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + * + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * tensor = tf.ones([4, 4, 4],dtype=tf.int32) + * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + * print(updated) + * + * ``` + * + * The resulting tensor would look like this: + * ``` + * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + * + * ``` + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. + * + * @param data type for `output` output + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @param data type for `TensorScatterAdd` output and operands + * @return a new instance of TensorScatterNdAdd + * @see org.tensorflow.op.Ops.tensorScatterNdAdd + */ + public fun tensorScatterNdAdd( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdAdd = java.tensorScatterNdAdd( + tensor, + indices, + updates + ) + + /** + * The TensorScatterMax operation + * + * @param data type for `output` output + * @param tensor Tensor to update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @param data type for `TensorScatterMax` output and operands + * @return a new instance of TensorScatterNdMax + * @see org.tensorflow.op.Ops.tensorScatterNdMax + */ + public fun tensorScatterNdMax( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdMax = java.tensorScatterNdMax( + tensor, + indices, + updates + ) + + /** + * The TensorScatterMin operation + * + * @param data type for `output` output + * @param tensor Tensor to update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @param data type for `TensorScatterMin` output and operands + * @return a new instance of TensorScatterNdMin + * @see org.tensorflow.op.Ops.tensorScatterNdMin + */ + public fun tensorScatterNdMin( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdMin = java.tensorScatterNdMin( + tensor, + indices, + updates + ) + + /** + * Subtracts sparse `updates` from an existing tensor according to `indices`. + * This operation creates a new tensor by subtracting sparse `updates` from the + * passed in `tensor`. + * This operation is very similar to `tf.scatter_nd_sub`, except that the updates + * are subtracted from an existing tensor (as opposed to a variable). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. + * + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * ``` + * indices.shape[-1] <= shape.rank + * + * ``` + * + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + * ``` + * indices.shape[:-1] + shape[indices.shape[-1]:] + * + * ``` + * + * The simplest form of tensor_scatter_sub is to subtract individual elements + * from a tensor by index. For example, say we want to insert 4 scattered elements + * in a rank-1 tensor with 8 elements. + * + * In Python, this scatter subtract operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * tensor = tf.ones([8], dtype=tf.int32) + * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + * print(updated) + * + * ``` + * + * The resulting tensor would look like this: + * ``` + * [1, -10, 1, -9, -8, 1, 1, -11] + * + * ``` + * + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + * + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * tensor = tf.ones([4, 4, 4],dtype=tf.int32) + * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + * print(updated) + * + * ``` + * + * The resulting tensor would look like this: + * ``` + * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + * + * ``` + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. + * + * @param data type for `output` output + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @param data type for `TensorScatterSub` output and operands + * @return a new instance of TensorScatterNdSub + * @see org.tensorflow.op.Ops.tensorScatterNdSub + */ + public fun tensorScatterNdSub( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdSub = java.tensorScatterNdSub( + tensor, + indices, + updates + ) + + /** + * Scatter `updates` into an existing tensor according to `indices`. + * This operation creates a new tensor by applying sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd`, except that the updates are + * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. + * + * If `indices` contains duplicates, then we pick the last update for the index. + * + * If an out of bound index is found on CPU, an error is returned. + * + * **WARNING**: There are some GPU specific semantics for this operation. + *
    + *
  • If an out of bound index is found, the index is ignored.
  • + *
  • The order in which updates are applied is nondeterministic, so the output + * will be nondeterministic if `indices` contains duplicates.
  • + *
+ * + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. + *
    + *
  • `indices` must have at least 2 axes: `(num_updates, index_depth)`.
  • + *
  • The last axis of `indices` is how deep to index into `tensor` so this index + * depth must be less than the rank of `tensor`: `indices.shape[-1] <= + * tensor.ndim`
  • + *
+ * + * if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. + * if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input + * `tensor`. + * + * Each `update` has a rank of `tensor.rank - indices.shape[-1]`. + * The overall shape of `updates` is: + * ``` + * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * + * ``` + * + * For usage examples see the python tf.tensor_scatter_nd_update + * [org.tensorflow.op.Ops.tensorScatterNdUpdate] function + * + * @param data type for `output` output + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @param data type for `TensorScatterUpdate` output and operands + * @return a new instance of TensorScatterNdUpdate + * @see org.tensorflow.op.Ops.tensorScatterNdUpdate + */ + public fun tensorScatterNdUpdate( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( + tensor, + indices, + updates + ) + + /** + * Assign `value` to the sliced l-value reference of `input`. + * The values of `value` are assigned to the positions in the tensor `input` that + * are selected by the slice parameters. The slice parameters `begin` `end` + * `strides` etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s shape + * must be exactly the shape produced by the slice of `input`. + * + * @param data type for `output` output + * @param input The input value + * @param begin The begin value + * @param end The end value + * @param strides The strides value + * @param value The value value + * @param options carries optional attribute values + * @param data type for `TensorStridedSliceUpdate` output and operands + * @param data type for `TensorStridedSliceUpdate` output and operands + * @return a new instance of TensorStridedSliceUpdate + * @see org.tensorflow.op.Ops.tensorStridedSliceUpdate + * @param beginMask Sets the beginMask option. + * + * @param beginMask the beginMask option + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask the endMask option + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask the ellipsisMask option + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask the newAxisMask option + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask the shrinkAxisMask option + * @return this Options instance. + */ + public fun tensorStridedSliceUpdate( + input: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( + input, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } + ).toTypedArray() + ) + + /** + * Constructs a tensor by tiling a given tensor. + * This operation creates a new tensor by replicating `input` `multiples` times. + * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + * and the values of `input` are replicated `multiples[i]` times along the 'i'th + * dimension. For example, tiling `[a b c d]` by `[2]` produces + * `[a b c d a b c d]`. + * ``` + * + * a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + * b = tf.constant([1,2], tf.int32) + * tf.tile(a, b) + * + * c = tf.constant([2,1], tf.int32) + * tf.tile(a, c) + * + * d = tf.constant([2,2], tf.int32) + * tf.tile(a, d) + * + * ``` + * + * @param data type for `output` output + * @param input 1-D or higher. + * @param multiples 1-D. Length must be the same as the number of dimensions in `input` + * @param data type for `Tile` output and operands + * @return a new instance of Tile + * @see org.tensorflow.op.Ops.tile + */ + public fun tile(input: Operand, multiples: Operand): Tile = + java.tile( + input, + multiples + ) + + /** + * Provides the time since epoch in seconds. + * Returns the timestamp as a `float64` for seconds since the Unix epoch. + * + * Note: the timestamp is computed when the op is executed, not when it is added + * to the graph. + * + * @return a new instance of Timestamp + * @see org.tensorflow.op.Ops.timestamp + */ + public fun timestamp(): Timestamp = java.timestamp( + + ) + + /** + * Returns the TopK unique values in the array in sorted order. + * The running time is proportional to the product of K and the input + * size. Sorting the whole array is more efficient for sufficiently large + * values of K. The median-of-medians algorithm is probably faster, but + * difficult to implement efficiently in XLA. If there are fewer than K + * unique numbers (not NANs), the results are padded with negative + * infinity. NaNs are never returned. Subnormal numbers are flushed to + * zero. If an element appears at multiple indices, the highest index is + * returned. If a TopK element never appears in the input due to padding + * values, the indices are padded with negative one. If a padding value + * appears in the input and padding is needed, the highest index of the + * padding value will be returned. The semantics are not the same as + * kth_order_statistic. + * + * @param input The input value + * @param k The value of the k attribute + * @return a new instance of TopKUnique + * @see org.tensorflow.op.Ops.topKUnique + */ + public fun topKUnique(input: Operand, k: Long): TopKUnique = java.topKUnique( + input, + k + ) + + /** + * Returns the TopK values in the array in sorted order. + * This is a combination of MakeUnique and TopKUnique. The returned top-K will + * have its lower bits replaced by iota, thus it will be close to the original + * value but not exactly the same. The running time is proportional to the product + * of K and the input size. NaNs are never returned. Subnormal numbers are flushed + * to zero. + * + * @param input The input value + * @param k The value of the k attribute + * @return a new instance of TopKWithUnique + * @see org.tensorflow.op.Ops.topKWithUnique + */ + public fun topKWithUnique(input: Operand, k: Long): TopKWithUnique = + java.topKWithUnique( + input, + k + ) + + /** + * Reverses the operation of Batch for a single output Tensor. + * An instance of Unbatch either receives an empty batched_tensor, in which case it + * asynchronously waits until the values become available from a concurrently + * running instance of Unbatch with the same container and shared_name, or receives + * a non-empty batched_tensor in which case it finalizes all other concurrently + * running instances and outputs its own element from the batch. + * + * batched_tensor: The possibly transformed output of Batch. The size of the first + * dimension should remain unchanged by the transformations for the operation to + * work. + * batch_index: The matching batch_index obtained from Batch. + * id: The id scalar emitted by Batch. + * unbatched_tensor: The Tensor corresponding to this execution. + * timeout_micros: Maximum amount of time (in microseconds) to wait to receive the + * batched input tensor associated with a given invocation of the op. + * container: Container to control resource sharing. + * shared_name: Instances of Unbatch with the same container and shared_name are + * assumed to possibly belong to the same batch. If left empty, the op name will + * be used as the shared name. + * + * @param data type for `unbatched_tensor` output + * @param batchedTensor The batchedTensor value + * @param batchIndex The batchIndex value + * @param id The id value + * @param timeoutMicros The value of the timeoutMicros attribute + * @param options carries optional attribute values + * @param data type for `Unbatch` output and operands + * @return a new instance of Unbatch + * @see org.tensorflow.op.Ops.unbatch + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun unbatch( + batchedTensor: Operand, + batchIndex: Operand, + id: Operand, + timeoutMicros: Long, + container: String? = null, + sharedName: String? = null + ): Unbatch = java.unbatch( + batchedTensor, + batchIndex, + id, + timeoutMicros, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.Unbatch.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Unbatch.sharedName(it) } + ).toTypedArray() + ) + + /** + * Gradient of Unbatch. + * Acts like Batch but using the given batch_index index of batching things as they + * become available. This ensures that the gradients are propagated back in the + * same session which did the forward pass. + * + * original_input: The input to the Unbatch operation this is the gradient of. + * batch_index: The batch_index given to the Unbatch operation this is the gradient + * of. + * grad: The downstream gradient. + * id: The id scalar emitted by Batch. + * batched_grad: The return value, either an empty tensor or the batched gradient. + * container: Container to control resource sharing. + * shared_name: Instances of UnbatchGrad with the same container and shared_name + * are assumed to possibly belong to the same batch. If left empty, the op name + * will be used as the shared name. + * + * @param data type for `batched_grad` output + * @param originalInput The originalInput value + * @param batchIndex The batchIndex value + * @param grad The grad value + * @param id The id value + * @param options carries optional attribute values + * @param data type for `UnbatchGrad` output and operands + * @return a new instance of UnbatchGrad + * @see org.tensorflow.op.Ops.unbatchGrad + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun unbatchGrad( + originalInput: Operand, + batchIndex: Operand, + grad: Operand, + id: Operand, + container: String? = null, + sharedName: String? = null + ): UnbatchGrad = java.unbatchGrad( + originalInput, + batchIndex, + grad, + id, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.UnbatchGrad.container(it) }, + sharedName?.let{ org.tensorflow.op.core.UnbatchGrad.sharedName(it) } + ).toTypedArray() + ) + + /** + * Finds unique elements along an axis of a tensor. + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx = unique(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param data type for `UniqueV2` output and operands + * @return a new instance of Unique, with default output types + * @see org.tensorflow.op.Ops.unique + */ + public fun unique(x: Operand, axis: Operand): Unique = + java.unique( + x, + axis + ) + + /** + * Finds unique elements along an axis of a tensor. + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx = unique(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx The value of the outIdx attribute + * @param data type for `UniqueV2` output and operands + * @param data type for `UniqueV2` output and operands + * @return a new instance of Unique + * @see org.tensorflow.op.Ops.unique + */ + public fun unique( + x: Operand, + axis: Operand, + outIdx: Class + ): Unique = java.unique( + x, + axis, + outIdx + ) + + /** + * Finds unique elements along an axis of a tensor. + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) + * y, idx, count = UniqueWithCountsV2(x, axis = [0]) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] + * + * ``` + * + * For a `2-D` tensor `x` with `axis = 0`: + * ``` + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[0]) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] + * + * ``` + * + * For a `2-D` tensor `x` with `axis = 1`: + * ``` + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[1]) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param data type for `UniqueWithCountsV2` output and operands + * @return a new instance of UniqueWithCounts, with default output types + * @see org.tensorflow.op.Ops.uniqueWithCounts + */ + public fun uniqueWithCounts(x: Operand, axis: Operand): + UniqueWithCounts = java.uniqueWithCounts( + x, + axis + ) + + /** + * Finds unique elements along an axis of a tensor. + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) + * y, idx, count = UniqueWithCountsV2(x, axis = [0]) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] + * + * ``` + * + * For a `2-D` tensor `x` with `axis = 0`: + * ``` + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[0]) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] + * + * ``` + * + * For a `2-D` tensor `x` with `axis = 1`: + * ``` + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[1]) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx The value of the outIdx attribute + * @param data type for `UniqueWithCountsV2` output and operands + * @param data type for `UniqueWithCountsV2` output and operands + * @return a new instance of UniqueWithCounts + * @see org.tensorflow.op.Ops.uniqueWithCounts + */ + public fun uniqueWithCounts( + x: Operand, + axis: Operand, + outIdx: Class + ): UniqueWithCounts = java.uniqueWithCounts( + x, + axis, + outIdx + ) + + /** + * Converts an array of flat indices into a tuple of coordinate arrays. + * Example: + * ``` + * y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) + * # 'dims' represent a hypothetical (3, 3) tensor of indices: + * # [[0, 1, *2*], + * # [3, 4, *5*], + * # [6, *7*, 8]] + * # For each entry from 'indices', this operation returns + * # its coordinates (marked with '*'), such as + * # 2 ==> (0, 2) + * # 5 ==> (1, 2) + * # 7 ==> (2, 1) + * y ==> [[0, 1, 2], [2, 2, 1]] + * + * ``` + * + * `@`compatibility(numpy) + * + * Equivalent to np.unravel_index + * + * `@`end_compatibility + * + * @param data type for `output` output + * @param indices An 0-D or 1-D `int` Tensor whose elements are indices into the + * flattened version of an array of dimensions dims. + * @param dims An 1-D `int` Tensor. The shape of the array to use for unraveling + * indices. + * @param data type for `UnravelIndex` output and operands + * @return a new instance of UnravelIndex + * @see org.tensorflow.op.Ops.unravelIndex + */ + public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = + java.unravelIndex( + indices, + dims + ) + + /** + * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. + * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. + * For example, given a tensor of shape `(A, B, C, D)`; + * + * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` + * and each tensor in `output` will have shape `(B, C, D)`. (Note that the + * dimension unpacked along is gone, unlike `split`). + * + * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` + * and each tensor in `output` will have shape `(A, C, D)`. + * Etc. + * + * This is the opposite of `pack`. + * + * @param data type for `output` output + * @param value 1-D or higher, with `axis` dimension size equal to `num`. + * @param num The value of the num attribute + * @param options carries optional attribute values + * @param data type for `Unpack` output and operands + * @return a new instance of Unstack + * @see org.tensorflow.op.Ops.unstack + * @param axis Sets the axis option. + * + * @param axis Dimension along which to unpack. Negative values wrap around, so the + * valid range is `[-R, R)`. + * @return this Options instance. + */ + public fun unstack( + value: Operand, + num: Long, + axis: Long? = null + ): Unstack = java.unstack( + value, + num, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Unstack.axis(it) } + ).toTypedArray() + ) + + /** + * Op is similar to a lightweight Dequeue. + * The basic functionality is similar to dequeue with many fewer + * capabilities and options. This Op is optimized for performance. + * + * @param dtypes The value of the dtypes attribute + * @param options carries optional attribute values + * @return a new instance of Unstage + * @see org.tensorflow.op.Ops.unstage + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun unstage( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): Unstage = java.unstage( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.Unstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.Unstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.Unstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Unstage.sharedName(it) } + ).toTypedArray() + ) + + /** + * Creates a handle to a Variable resource. + * + * @param dtype the type of this variable. Must agree with the dtypes + * of all ops using this variable. + * @param shape The (possibly partially specified) shape of this variable. + * @param options carries optional attribute values + * @param data type for `VarHandleOp` output and operands + * @return a new instance of VarHandleOp + * @see org.tensorflow.op.Ops.varHandleOp + * @param container Sets the container option. + * + * @param container the container this variable is placed in. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the name by which this variable is referred to. + * @return this Options instance. + * @param allowedDevices Sets the allowedDevices option. + * + * @param allowedDevices DEPRECATED. The allowed devices containing the resource variable. Set + * when the + * output ResourceHandle represents a per-replica/partitioned resource variable. + * @return this Options instance. + */ + public fun varHandleOp( + dtype: Class, + shape: Shape, + container: String? = null, + sharedName: String? = null, + allowedDevices: List? = null + ): VarHandleOp = java.varHandleOp( + dtype, + shape, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.VarHandleOp.container(it) }, + sharedName?.let{ org.tensorflow.op.core.VarHandleOp.sharedName(it) }, + allowedDevices?.let{ org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } + ).toTypedArray() + ) + + /** + * Checks whether a resource handle-based variable has been initialized. + * + * @param resource the input resource handle. + * @return a new instance of VarIsInitializedOp + * @see org.tensorflow.op.Ops.varIsInitializedOp + */ + public fun varIsInitializedOp(resource: Operand): VarIsInitializedOp = + java.varIsInitializedOp( + resource + ) + + /** + * Factory method to create a new Variable with its initializer. Both the creation and + * assignment + * are done in the init scope. + * + * + * Only supported on Graph sessions as the [org.tensorflow.op.core.Assign] op does not + * work in an EagerSession. + * + * @param init The op to use to initialise this variable. + * @param options carries optional attributes values + * @return a new instance of Variable + * @see org.tensorflow.op.Ops.variable + * @param container Sets the container option. + * + * @param container If non-empty, this variable is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this variable is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + */ + public fun variable( + `init`: Operand, + container: String? = null, + sharedName: String? = null + ): Variable = java.variable( + init, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } + ).toTypedArray() + ) + + /** + * Holds state in the form of a tensor that persists across steps. + * Outputs a ref to the tensor state so it may be read or modified. + * TODO(zhifengc/mrry): Adds a pointer to a more detail document + * about sharing states in tensorflow. + * + * @param data type for `ref` output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. + * @param options carries optional attribute values + * @param data type for `VariableV2` output and operands + * @return a new instance of Variable + * @see org.tensorflow.op.Ops.variable + * @param container Sets the container option. + * + * @param container If non-empty, this variable is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this variable is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + */ + public fun variable( + shape: Shape, + dtype: Class, + container: String? = null, + sharedName: String? = null + ): Variable = java.variable( + shape, + dtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } + ).toTypedArray() + ) + + /** + * Returns the shape of the variable pointed to by `resource`. + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @return a new instance of VariableShape, with default output types + * @see org.tensorflow.op.Ops.variableShape + */ + public fun variableShape(input: Operand): VariableShape = + java.variableShape( + input + ) + + /** + * Returns the shape of the variable pointed to by `resource`. + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param outType The value of the outType attribute + * @param data type for `VariableShape` output and operands + * @return a new instance of VariableShape + * @see org.tensorflow.op.Ops.variableShape + */ + public fun variableShape(input: Operand, outType: Class): + VariableShape = java.variableShape( + input, + outType + ) + + /** + * Returns locations of nonzero / true values in a tensor. + * This operation returns the coordinates of true elements in `condition`. The + * coordinates are returned in a 2-D tensor where the first dimension (rows) + * represents the number of true elements, and the second dimension (columns) + * represents the coordinates of the true elements. Keep in mind, the shape of + * the output tensor can vary depending on how many true values there are in + * `condition`. Indices are output in row-major order. + * + * For example: + * ``` + * # 'input' tensor is [[True, False] + * # [True, False]] + * # 'input' has two true values, so output has two coordinates. + * # 'input' has rank of 2, so coordinates have two indices. + * where(input) ==> [[0, 0], + * [1, 0]] + * + * # `condition` tensor is [[[True, False] + * # [True, False]] + * # [[False, True] + * # [False, True]] + * # [[False, False] + * # [False, True]]] + * # 'input' has 5 true values, so output has 5 coordinates. + * # 'input' has rank of 3, so coordinates have three indices. + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * # `condition` tensor is [[[1.5, 0.0] + * # [-0.5, 0.0]] + * # [[0.0, 0.25] + * # [0.0, 0.75]] + * # [[0.0, 0.0] + * # [0.0, 0.01]]] + * # 'input' has 5 nonzero values, so output has 5 coordinates. + * # 'input' has rank of 3, so coordinates have three indices. + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] + * # [0.0 + 0.5j, 0.0 + 0.0j]] + * # [[0.0 + 0.0j, 0.25 + 1.5j] + * # [0.0 + 0.0j, 0.75 + 0.0j]] + * # [[0.0 + 0.0j, 0.0 + 0.0j] + * # [0.0 + 0.0j, 0.01 + 0.0j]]] + * # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. + * # 'input' has rank of 3, so coordinates have three indices. + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * ``` + * + * @param condition The condition value + * @return a new instance of Where + * @see org.tensorflow.op.Ops.where + */ + public fun `where`(condition: Operand): Where = java.where( + condition + ) + + /** + * output = input; While (Cond(output)) { output = Body(output) } + * + * + * Selects between [StatefulWhile] and [StatelessWhile] based on the statefulness of the + * function arguments. + * + * @param input A list of input tensors whose types are T. + * @param cond ` + * A function takes 'input' and returns a tensor. If the tensor is + * a scalar of non-boolean, the scalar is converted to a boolean + * according to the following rule: if the scalar is a numerical + * value, non-zero means True and zero means False; if the scalar is + * a string, non-empty means True and empty means False. If the + * tensor is not a scalar, non-emptiness means True and False + * otherwise. + * + * ` + * @param body ` + * A function that takes a list of tensors and returns another + * list of tensors. Both lists have the same types as specified + * by T. + * + * ` + * @param options carries optional attribute values + * @return a new instance of While + * @see org.tensorflow.op.Ops.whileOp + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + * @param parallelIterations Sets the parallelIterations option. + * + * @param parallelIterations the parallelIterations option + * @return this Options instance. + */ + public fun whileOp( + input: Iterable>, + cond: ConcreteFunction, + body: ConcreteFunction, + outputShapes: List? = null, + parallelIterations: Long? = null + ): While = java.whileOp( + input, + cond, + body, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.While.outputShapes(it) }, + parallelIterations?.let{ org.tensorflow.op.core.While.parallelIterations(it) } + ).toTypedArray() + ) + + /** + * Creates a zeroed tensor given its type and shape. + * + * @param dims a 1-D operand that represents the shape of the output tensor + * @param type the output tensor datatype + * @return a constant tensor initialized with zeros + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with + * zeros. + * @see org.tensorflow.op.Ops.zeros + */ + public fun zeros(dims: Operand, type: Class): Zeros = + java.zeros( + dims, + type + ) + + /** + * Returns a tensor of zeros with the same shape and type as x. + * + * @param data type for `y` output + * @param x a tensor of type T. + * @param data type for `ZerosLike` output and operands + * @return a new instance of ZerosLike + * @see org.tensorflow.op.Ops.zerosLike + */ + public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( + x + ) + + /** + * Bitcasts a tensor from one type to another without copying data. + * Given a tensor `input`, this operation returns a tensor that has the same buffer + * data as `input` with datatype `type`. + * + * If the input datatype `T` is larger than the output datatype `type` then the + * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + * + * If `T` is smaller than `type`, the operator requires that the rightmost + * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + * [..., sizeof(`type`)/sizeof(`T`)] to [...]. + * + * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() + * gives module error. + * For example, + * + * Example 1: + * ``` + * + * a = [1., 2., 3.] + * equality_bitcast = tf.bitcast(a, tf.complex128) + * Traceback (most recent call last): + * ... + * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + * equality_cast = tf.cast(a, tf.complex128) + * print(equality_cast) + * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + * ``` + * + * Example 2: + * ``` + * + * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + * + * ``` + * + * Example 3: + * ``` + * + * x = [1., 2., 3.] + * y = [0., 2., 3.] + * equality= tf.equal(x,y) + * equality_cast = tf.cast(equality,tf.float32) + * equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + * print(equality) + * tf.Tensor([False True True], shape=(3,), dtype=bool) + * print(equality_cast) + * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + * print(equality_bitcast) + * tf.Tensor( + * [[ 0 0 0 0] + * [ 0 0 128 63] + * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + * ``` + * + * _NOTE_: Bitcast is implemented as a low-level cast, so machines with different + * endian orderings will give different results. + * + * @param data type for `output` output + * @param input The input value + * @param type The value of the type attribute + * @param data type for `Bitcast` output and operands + * @return a new instance of Bitcast + * @see org.tensorflow.op.Ops.bitcast + */ + @JvmName("bitcastReified") + public inline fun bitcast(input: Operand): Bitcast = + bitcast(input, U::class.java) + + /** + * Creates a scalar of `type`, with the value of `number`. `number` may be + * truncated if it does not fit in the target type. + * + * @param type the type of tensor to create. Must be concrete (i.e. not + * [org.tensorflow.types.family.TFloating]) + * @param number the value of the tensor + * @return a constant of the passed type + * @throws IllegalArgumentException if the type is abstract (i.e. + * [org.tensorflow.types.family.TFloating]) or unknown. + * @see org.tensorflow.op.Ops.constant + */ + @JvmName("constantReified") + public inline fun constant(number: Number): Constant = + constant(T::class.java, number) + + /** + * Create a constant with data from the given buffer. + * + * @param the tensor type + * @param type the tensor type class + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a constant of type `type` + * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the + * buffer + * @see org.tensorflow.op.Ops.constant + */ + @JvmName("constantReified") + public inline fun constantTyped(shape: Shape, `data`: ByteDataBuffer): + Constant = constant(T::class.java, shape, data) + + /** + * Creates a tensor with the given shape. + * + * This operation creates a tensor of `shape` and `dtype`. + * + * @param data type for `output` output + * @param shape 1-D. Represents the shape of the output tensor. + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `Empty` output and operands + * @return a new instance of Empty + * @see org.tensorflow.op.Ops.empty + * @param init Sets the init option. + * + * @param init If True, initialize the returned tensor with the default value of dtype. + * Otherwise, the implementation is free not to initializethe tensor's content. + * @return this Options instance. + */ + @JvmName("emptyReified") + public inline fun empty(shape: Operand, `init`: Boolean? = null): + Empty = empty(shape, T::class.java, init) + + /** + * Creates and returns an empty tensor list. + * All list elements must be tensors of dtype element_dtype and shape compatible + * with element_shape. + * + * handle: an empty tensor list. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. + * + * @param elementShape The elementShape value + * @param maxNumElements The maxNumElements value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `EmptyTensorList` output and operands + * @return a new instance of EmptyTensorList + * @see org.tensorflow.op.Ops.emptyTensorList + */ + @JvmName("emptyTensorListReified") + public inline fun emptyTensorList(elementShape: Operand, + maxNumElements: Operand): EmptyTensorList = emptyTensorList(elementShape, + maxNumElements, U::class.java) + + /** + * Get the value of the tensor specified by its handle. + * + * @param data type for `value` output + * @param handle The handle for a tensor stored in the session state. + * @param dtype The type of the output value. + * @param data type for `GetSessionTensor` output and operands + * @return a new instance of GetSessionTensor + * @see org.tensorflow.op.Ops.getSessionTensor + */ + @JvmName("getSessionTensorReified") + public inline fun getSessionTensor(handle: Operand): + GetSessionTensor = getSessionTensor(handle, T::class.java) + + /** + * Creates a non-initialized hash table. + * This op creates a hash table, specifying the type of its keys and values. + * Before using the table you will have to initialize it. After initialization the + * table will be immutable. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for `HashTableV2` output and operands + * @param data type for `HashTableV2` output and operands + * @return a new instance of HashTable + * @see org.tensorflow.op.Ops.hashTable + * + * @param container Sets the container option. + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing If true and shared_name is empty, the table is shared + * using the node name. + * @return this Options instance. + */ + @JvmName("hashTableReified") + public inline fun hashTable( + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): HashTable = hashTable(T::class.java, U::class.java, container, sharedName, + useNodeNameSharing) + + /** + * Return histogram of values. + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * nbins = 5 + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * + * with tf.get_default_session() as sess: + * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + * variables.global_variables_initializer().run() + * sess.run(hist) => [2, 1, 1, 0, 2] + * + * ``` + * + * @param data type for `out` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @param dtype The value of the dtype attribute + * @param data type for `HistogramFixedWidth` output and operands + * @param data type for `HistogramFixedWidth` output and operands + * @return a new instance of HistogramFixedWidth + * @see org.tensorflow.op.Ops.histogramFixedWidth + */ + @JvmName("histogramFixedWidthReified") + public inline fun histogramFixedWidthTyped( + values: Operand, + valueRange: Operand, + nbins: Operand + ): HistogramFixedWidth = histogramFixedWidth(values, valueRange, nbins, U::class.java) + + /** + * Returns immutable tensor from memory region. + * The current implementation memmaps the tensor from a file. + * + * @param data type for `tensor` output + * @param dtype Type of the returned tensor. + * @param shape Shape of the returned tensor. + * @param memoryRegionName Name of readonly memory region used by the tensor, see + * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * @param data type for `ImmutableConst` output and operands + * @return a new instance of ImmutableConst + * @see org.tensorflow.op.Ops.immutableConst + */ + @JvmName("immutableConstReified") + public inline fun immutableConst(shape: Shape, memoryRegionName: String): + ImmutableConst = immutableConst(T::class.java, shape, memoryRegionName) + + /** + * Outputs all keys and values in the table. + * + * @param data type for `keys` output + * @param data type for `values` output + * @param tableHandle Handle to the table. + * @param Tkeys The value of the Tkeys attribute + * @param Tvalues The value of the Tvalues attribute + * @param data type for `LookupTableExportV2` output and operands + * @param data type for `LookupTableExportV2` output and operands + * @return a new instance of LookupTableExport + * @see org.tensorflow.op.Ops.lookupTableExport + */ + @JvmName("lookupTableExportReified") + public inline fun + lookupTableExport(tableHandle: Operand): LookupTableExport = + lookupTableExport(tableHandle, T::class.java, U::class.java) + + /** + * Creates an empty hash table that uses tensors as the backing store. + * It uses "open addressing" with quadratic reprobing to resolve + * collisions. + * + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param emptyKey The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + * @param deletedKey The deletedKey value + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for `MutableDenseHashTableV2` output and operands + * @param data type for `MutableDenseHashTableV2` output and operands + * @return a new instance of MutableDenseHashTable + * @see org.tensorflow.op.Ops.mutableDenseHashTable + * @param container Sets the container option. + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing the useNodeNameSharing option + * @return this Options instance. + * @param valueShape Sets the valueShape option. + * + * @param valueShape The shape of each value. + * @return this Options instance. + * @param initialNumBuckets Sets the initialNumBuckets option. + * + * @param initialNumBuckets The initial number of hash table buckets. Must be a power + * to 2. + * @return this Options instance. + * @param maxLoadFactor Sets the maxLoadFactor option. + * + * @param maxLoadFactor The maximum ratio between number of entries and number of + * buckets before growing the table. Must be between 0 and 1. + * @return this Options instance. + */ + @JvmName("mutableDenseHashTableReified") + public inline fun mutableDenseHashTable( + emptyKey: Operand, + deletedKey: Operand, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null, + initialNumBuckets: Long? = null, + maxLoadFactor: Float? = null + ): MutableDenseHashTable = mutableDenseHashTable(emptyKey, deletedKey, U::class.java, + container, sharedName, useNodeNameSharing, valueShape, initialNumBuckets, maxLoadFactor) + + /** + * Creates an empty hash table. + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for `MutableHashTableV2` output and operands + * @param data type for `MutableHashTableV2` output and operands + * @return a new instance of MutableHashTable + * @see org.tensorflow.op.Ops.mutableHashTable + * + * @param container Sets the container option. + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing If true and shared_name is empty, the table is shared + * using the node name. + * @return this Options instance. + */ + @JvmName("mutableHashTableReified") + public inline fun mutableHashTable( + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): MutableHashTable = mutableHashTable(T::class.java, U::class.java, container, + sharedName, useNodeNameSharing) + + /** + * Creates an empty hash table. + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a vector. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for `MutableHashTableOfTensorsV2` output and operands + * @param data type for `MutableHashTableOfTensorsV2` output and operands + * @return a new instance of MutableHashTableOfTensors + * @see org.tensorflow.op.Ops.mutableHashTableOfTensors + * + * @param container Sets the container option. + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing the useNodeNameSharing option + * @return this Options instance. + * @param valueShape Sets the valueShape option. + * + * @param valueShape the valueShape option + * @return this Options instance. + */ + @JvmName("mutableHashTableOfTensorsReified") + public inline fun mutableHashTableOfTensors( + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null + ): MutableHashTableOfTensors = mutableHashTableOfTensors(T::class.java, U::class.java, + container, sharedName, useNodeNameSharing, valueShape) + + /** + * Creates a one valued tensor given its type and shape. + * + * @param dims a 1-D operand that represents the shape of the output tensor + * @param type the output tensor type class. Can not be TString. + * @return a constant tensor initialized with ones + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with ones. + * @see org.tensorflow.op.Ops.ones + */ + @JvmName("onesReified") + public inline fun ones(dims: Operand): Ones = ones(dims, + T::class.java) + + /** + * A placeholder op for a value that will be fed into the computation. + * N.B. This operation will fail with an error if it is executed. It is + * intended as a way to represent a value that will always be fed, and to + * provide attrs that enable the fed value to be checked at runtime. + * + * @param data type for `output` output + * @param dtype The type of elements in the tensor. + * @param options carries optional attribute values + * @param data type for `Placeholder` output and operands + * @return a new instance of Placeholder + * @see org.tensorflow.op.Ops.placeholder + * + * @param shape Sets the shape option. + * + * @param shape (Optional) The shape of the tensor. If the shape has 0 dimensions, the + * shape is unconstrained. + * @return this Options instance. + */ + @JvmName("placeholderReified") + public inline fun placeholder(shape: Shape? = null): Placeholder = + placeholder(T::class.java, shape) + + /** + * Reads the value of a variable. + * The tensor returned by this operation is immutable. + * + * The value returned by this operation is guaranteed to be influenced by all the + * writes on which this operation depends directly or indirectly, and to not be + * influenced by any of the writes which depend directly or indirectly on this + * operation. + * + * @param data type for `value` output + * @param resource handle to the resource in which to store the variable. + * @param dtype the dtype of the value. + * @param data type for `ReadVariableOp` output and operands + * @return a new instance of ReadVariableOp + * @see org.tensorflow.op.Ops.readVariableOp + */ + @JvmName("readVariableOpReified") + public inline fun readVariableOp(resource: Operand): + ReadVariableOp = readVariableOp(resource, T::class.java) + + /** + * Increments variable pointed to by 'resource' until it reaches 'limit'. + * + * @param data type for `output` output + * @param resource Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @param T The value of the T attribute + * @param data type for `ResourceCountUpTo` output and operands + * @return a new instance of ResourceCountUpTo + * @see org.tensorflow.op.Ops.resourceCountUpTo + */ + @JvmName("resourceCountUpToReified") + public inline fun resourceCountUpTo(resource: Operand, + limit: Long): ResourceCountUpTo = resourceCountUpTo(resource, limit, + T::class.java) + + /** + * Gather slices from the variable pointed to by `resource` according to `indices`. + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + * ``` + * # Scalar indices + * output[:, ..., :] = params[indices, :, ... :] + * + * # Vector indices + * output[i, :, ..., :] = params[indices[i], :, ... :] + * + * # Higher rank indices + * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + * + * ``` + * + * @param data type for `output` output + * @param resource The resource value + * @param indices The indices value + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `ResourceGather` output and operands + * @return a new instance of ResourceGather + * @see org.tensorflow.op.Ops.resourceGather + * @param batchDims Sets the batchDims option. + * + * @param batchDims the batchDims option + * @return this Options instance. + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. + */ + @JvmName("resourceGatherReified") + public inline fun resourceGather( + resource: Operand, + indices: Operand, + batchDims: Long? = null, + validateIndices: Boolean? = null + ): ResourceGather = resourceGather(resource, indices, U::class.java, batchDims, + validateIndices) + + /** + * The ResourceGatherNd operation + * + * @param data type for `output` output + * @param resource The resource value + * @param indices The indices value + * @param dtype The value of the dtype attribute + * @param data type for `ResourceGatherNd` output and operands + * @return a new instance of ResourceGatherNd + * @see org.tensorflow.op.Ops.resourceGatherNd + */ + @JvmName("resourceGatherNdReified") + public inline fun resourceGatherNd(resource: Operand, + indices: Operand): ResourceGatherNd = resourceGatherNd(resource, + indices, U::class.java) + + /** + * Computes the difference between two lists of numbers or strings. + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + * + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] + * + * ``` + * + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] + * + * ``` + * + * @param data type for `out` output + * @param data type for `idx` output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @param outIdx The value of the outIdx attribute + * @param data type for `ListDiff` output and operands + * @param data type for `ListDiff` output and operands + * @return a new instance of SetDiff1d + * @see org.tensorflow.op.Ops.setDiff1d + */ + @JvmName("setDiff1dReified") + public inline fun setDiff1dTyped(x: Operand, y: Operand): + SetDiff1d = setDiff1d(x, y, U::class.java) + + /** + * Returns the shape of a tensor. + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param outType The value of the outType attribute + * @param data type for `Shape` output and operands + * @return a new instance of Shape + * @see org.tensorflow.op.Ops.shape + */ + @JvmName("shapeReified") + public inline fun shapeTyped(input: Operand): + org.tensorflow.op.core.Shape = shape(input, U::class.java) + + /** + * Returns shape of tensors. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param data type for `output` output + * @param input The input value + * @param outType The value of the outType attribute + * @param data type for `ShapeN` output and operands + * @return a new instance of ShapeN + * @see org.tensorflow.op.Ops.shapeN + */ + @JvmName("shapeNReified") + public inline fun shapeNTyped(input: Iterable>): + ShapeN = shapeN(input, U::class.java) + + /** + * Returns the size of a tensor. + * This operation returns an integer representing the number of elements in + * `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param outType The value of the outType attribute + * @param data type for `Size` output and operands + * @return a new instance of Size + * @see org.tensorflow.op.Ops.size + */ + @JvmName("sizeReified") + public inline fun sizeTyped(input: Operand): Size = + size(input, U::class.java) + + /** + * Returns a tensor that may be mutated, but only persists within a single step. + * This is an experimental op for internal use only and it is possible to use this + * op in unsafe ways. DO NOT USE unless you fully understand the risks. + * + * It is the caller's responsibility to ensure that 'ref' is eventually passed to a + * matching 'DestroyTemporaryVariable' op after all other uses have completed. + * + * Outputs a ref to the tensor state so it may be read or modified. + * + * E.g. + * var = state_ops._temporary_variable([1, 2], types.float_) + * var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * + * @param data type for `ref` output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. + * @param options carries optional attribute values + * @param data type for `TemporaryVariable` output and operands + * @return a new instance of TemporaryVariable + * @see org.tensorflow.op.Ops.temporaryVariable + * @param varName Sets the varName option. + * + * @param varName Overrides the name used for the temporary variable resource. Default + * value is the name of the 'TemporaryVariable' op (which is guaranteed unique). + * @return this Options instance. + */ + @JvmName("temporaryVariableReified") + public inline fun temporaryVariable(shape: Shape, varName: String? = null): + TemporaryVariable = temporaryVariable(shape, T::class.java, varName) + + /** + * An array of Tensors of given size. + * Write data via Write and read via Read or Pack. + * + * @param sizeOutput The size of the array. + * @param dtype The type of the elements on the tensor_array. + * @param options carries optional attribute values + * @param data type for `TensorArrayV3` output and operands + * @return a new instance of TensorArray + * @see org.tensorflow.op.Ops.tensorArray + * @param elementShape Sets the elementShape option. + * + * @param elementShape The expected shape of an element, if known. Used to + * validate the shapes of TensorArray elements. If this shape is not + * fully specified, gathering zero-size TensorArrays is an error. + * @return this Options instance. + * @param dynamicSize Sets the dynamicSize option. + * + * @param dynamicSize A boolean that determines whether writes to the TensorArray + * are allowed to grow the size. By default, this is not allowed. + * @return this Options instance. + * @param clearAfterRead Sets the clearAfterRead option. + * + * @param clearAfterRead If true (default), Tensors in the TensorArray are cleared + * after being read. This disables multiple read semantics but allows early + * release of memory. + * @return this Options instance. + * @param identicalElementShapes Sets the identicalElementShapes option. + * + * @param identicalElementShapes If true (default is false), then all + * elements in the TensorArray will be expected to have identical shapes. + * This allows certain behaviors, like dynamically checking for + * consistent shapes on write, and being able to fill in properly + * shaped zero tensors on stack -- even if the element_shape attribute + * is not fully defined. + * @return this Options instance. + * @param tensorArrayName Sets the tensorArrayName option. + * + * @param tensorArrayName Overrides the name used for the temporary tensor_array + * resource. Default value is the name of the 'TensorArray' op (which + * is guaranteed unique). + * @return this Options instance. + */ + @JvmName("tensorArrayReified") + public inline fun tensorArray( + sizeOutput: Operand, + elementShape: Shape? = null, + dynamicSize: Boolean? = null, + clearAfterRead: Boolean? = null, + identicalElementShapes: Boolean? = null, + tensorArrayName: String? = null + ): TensorArray = tensorArray(sizeOutput, T::class.java, elementShape, dynamicSize, + clearAfterRead, identicalElementShapes, tensorArrayName) + + /** + * Concat the elements from the TensorArray into value `value`. + * Takes `T` elements of shapes + * ``` + * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + * + * ``` + * + * and concatenates them into a Tensor of shape: + * + * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)` + * + * All elements must have the same shape (excepting the first dimension). + * + * @param data type for `value` output + * @param handle The handle to a TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attribute values + * @param data type for `TensorArrayConcatV3` output and operands + * @return a new instance of TensorArrayConcat + * @see org.tensorflow.op.Ops.tensorArrayConcat + * @param elementShapeExcept0 Sets the elementShapeExcept0 option. + * + * @param elementShapeExcept0 The expected shape of an element, if known, + * excluding the first dimension. Used to validate the shapes of + * TensorArray elements. If this shape is not fully specified, concatenating + * zero-size TensorArrays is an error. + * @return this Options instance. + */ + @JvmName("tensorArrayConcatReified") + public inline fun tensorArrayConcat( + handle: Operand, + flowIn: Operand, + elementShapeExcept0: Shape? = null + ): TensorArrayConcat = tensorArrayConcat(handle, flowIn, T::class.java, + elementShapeExcept0) + + /** + * Gather specific elements from the TensorArray into output `value`. + * All elements selected by `indices` must have the same shape. + * + * @param data type for `value` output + * @param handle The handle to a TensorArray. + * @param indices The locations in the TensorArray from which to read tensor elements. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attribute values + * @param data type for `TensorArrayGatherV3` output and operands + * @return a new instance of TensorArrayGather + * @see org.tensorflow.op.Ops.tensorArrayGather + * @param elementShape Sets the elementShape option. + * + * @param elementShape The expected shape of an element, if known. Used to + * validate the shapes of TensorArray elements. If this shape is not + * fully specified, gathering zero-size TensorArrays is an error. + * @return this Options instance. + */ + @JvmName("tensorArrayGatherReified") + public inline fun tensorArrayGather( + handle: Operand, + indices: Operand, + flowIn: Operand, + elementShape: Shape? = null + ): TensorArrayGather = tensorArrayGather(handle, indices, flowIn, T::class.java, + elementShape) + + /** + * The TensorArrayPack operation + * + * @param data type for `value` output + * @param handle The handle value + * @param flowIn The flowIn value + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `TensorArrayPack` output and operands + * @return a new instance of TensorArrayPack + * @see org.tensorflow.op.Ops.tensorArrayPack + * @param elementShape Sets the elementShape option. + * + * @param elementShape the elementShape option + * @return this Options instance. + */ + @JvmName("tensorArrayPackReified") + public inline fun tensorArrayPack( + handle: Operand, + flowIn: Operand, + elementShape: Shape? = null + ): TensorArrayPack = tensorArrayPack(handle, flowIn, T::class.java, elementShape) + + /** + * Read an element from the TensorArray into output `value`. + * + * @param data type for `value` output + * @param handle The handle to a TensorArray. + * @param index The index value + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param data type for `TensorArrayReadV3` output and operands + * @return a new instance of TensorArrayRead + * @see org.tensorflow.op.Ops.tensorArrayRead + */ + @JvmName("tensorArrayReadReified") + public inline fun tensorArrayRead( + handle: Operand, + index: Operand, + flowIn: Operand + ): TensorArrayRead = tensorArrayRead(handle, index, flowIn, T::class.java) + + /** + * Concats all tensors in the list along the 0th dimension. + * Requires that all tensors have the same shape except the first dimension. + * + * input_handle: The input list. + * element_shape: The shape of the uninitialized elements in the list. If the first + * dimension is not -1, it is assumed that all list elements have the same + * leading dim. + * leading_dims: The list of leading dims of uninitialized list elements. Used if + * the leading dim of input_handle.element_shape or the element_shape input arg + * is not already set. + * tensor: The concated result. + * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used + * for computing the gradient. + * + * @param data type for `tensor` output + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param leadingDims The leadingDims value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListConcatV2` output and operands + * @return a new instance of TensorListConcat + * @see org.tensorflow.op.Ops.tensorListConcat + */ + @JvmName("tensorListConcatReified") + public inline fun tensorListConcat( + inputHandle: Operand, + elementShape: Operand, + leadingDims: Operand + ): TensorListConcat = tensorListConcat(inputHandle, elementShape, leadingDims, + U::class.java) + + /** + * The TensorListConcatLists operation + * + * @param inputA The inputA value + * @param inputB The inputB value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListConcatLists` output and operands + * @return a new instance of TensorListConcatLists + * @see org.tensorflow.op.Ops.tensorListConcatLists + */ + @JvmName("tensorListConcatListsReified") + public inline fun tensorListConcatLists(inputA: Operand, + inputB: Operand): TensorListConcatLists = tensorListConcatLists(inputA, + inputB, T::class.java) + + /** + * The shape of the elements of the given list, as a tensor. + * input_handle: the list + * element_shape: the shape of elements of the list + * + * @param data type for `element_shape` output + * @param inputHandle The inputHandle value + * @param shapeType The value of the shapeType attribute + * @param data type for `TensorListElementShape` output and operands + * @return a new instance of TensorListElementShape + * @see org.tensorflow.op.Ops.tensorListElementShape + */ + @JvmName("tensorListElementShapeReified") + public inline fun tensorListElementShape(inputHandle: Operand): + TensorListElementShape = tensorListElementShape(inputHandle, T::class.java) + + /** + * Creates a Tensor by indexing into the TensorList. + * Each row in the produced Tensor corresponds to the element in the TensorList + * specified by the given index (see `tf.gather`). + * + * input_handle: The input tensor list. + * indices: The indices used to index into the list. + * values: The tensor. + * + * @param data type for `values` output + * @param inputHandle The inputHandle value + * @param indices The indices value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListGather` output and operands + * @return a new instance of TensorListGather + * @see org.tensorflow.op.Ops.tensorListGather + */ + @JvmName("tensorListGatherReified") + public inline fun tensorListGather( + inputHandle: Operand, + indices: Operand, + elementShape: Operand + ): TensorListGather = tensorListGather(inputHandle, indices, elementShape, T::class.java) + + /** + * The TensorListGetItem operation + * + * @param data type for `item` output + * @param inputHandle The inputHandle value + * @param index The index value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListGetItem` output and operands + * @return a new instance of TensorListGetItem + * @see org.tensorflow.op.Ops.tensorListGetItem + */ + @JvmName("tensorListGetItemReified") + public inline fun tensorListGetItem( + inputHandle: Operand, + index: Operand, + elementShape: Operand + ): TensorListGetItem = tensorListGetItem(inputHandle, index, elementShape, T::class.java) + + /** + * Returns the last element of the input list as well as a list with all but that element. + * Fails if the list is empty. + * + * input_handle: the input list + * tensor: the withdrawn last element of the list + * element_dtype: the type of elements in the list + * element_shape: the shape of the output tensor + * + * @param data type for `tensor` output + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListPopBack` output and operands + * @return a new instance of TensorListPopBack + * @see org.tensorflow.op.Ops.tensorListPopBack + */ + @JvmName("tensorListPopBackReified") + public inline fun tensorListPopBack(inputHandle: Operand, + elementShape: Operand): TensorListPopBack = tensorListPopBack(inputHandle, + elementShape, T::class.java) + + /** + * List of the given size with empty elements. + * element_shape: the shape of the future elements of the list + * num_elements: the number of elements to reserve + * handle: the output list + * element_dtype: the desired type of elements in the list. + * + * @param elementShape The elementShape value + * @param numElements The numElements value + * @param elementDtype The value of the elementDtype attribute + * @param data type for `TensorListReserve` output and operands + * @return a new instance of TensorListReserve + * @see org.tensorflow.op.Ops.tensorListReserve + */ + @JvmName("tensorListReserveReified") + public inline fun tensorListReserve(elementShape: Operand, + numElements: Operand): TensorListReserve = tensorListReserve(elementShape, + numElements, U::class.java) + + /** + * Stacks all tensors in the list. + * Requires that all tensors have the same shape. + * + * input_handle: the input list + * tensor: the gathered result + * num_elements: optional. If not -1, the number of elements in the list. + * + * @param data type for `tensor` output + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute + * @param options carries optional attribute values + * @param data type for `TensorListStack` output and operands + * @return a new instance of TensorListStack + * @see org.tensorflow.op.Ops.tensorListStack + * @param numElements Sets the numElements option. + * + * @param numElements the numElements option + * @return this Options instance. + */ + @JvmName("tensorListStackReified") + public inline fun tensorListStack( + inputHandle: Operand, + elementShape: Operand, + numElements: Long? = null + ): TensorListStack = tensorListStack(inputHandle, elementShape, T::class.java, + numElements) + + /** + * Returns a tensor map with item from given key erased. + * input_handle: the original map + * output_handle: the map with value from given key removed + * key: the key of the value to be erased + * + * @param inputHandle The inputHandle value + * @param key The key value + * @param valueDtype The value of the valueDtype attribute + * @param data type for `TensorMapErase` output and operands + * @return a new instance of TensorMapErase + * @see org.tensorflow.op.Ops.tensorMapErase + */ + @JvmName("tensorMapEraseReified") + public inline fun tensorMapErase(inputHandle: Operand, + key: Operand): TensorMapErase = tensorMapErase(inputHandle, key, + U::class.java) + + /** + * Returns the value from a given key in a tensor map. + * input_handle: the input map + * key: the key to be looked up + * value: the value found from the given key + * + * @param data type for `value` output + * @param inputHandle The inputHandle value + * @param key The key value + * @param valueDtype The value of the valueDtype attribute + * @param data type for `TensorMapLookup` output and operands + * @return a new instance of TensorMapLookup + * @see org.tensorflow.op.Ops.tensorMapLookup + */ + @JvmName("tensorMapLookupReified") + public inline fun tensorMapLookup(inputHandle: Operand, + key: Operand): TensorMapLookup = tensorMapLookup(inputHandle, key, + U::class.java) + + /** + * Returns a Tensor stack of all keys in a tensor map. + * input_handle: the input map + * keys: the returned Tensor of all keys in the map + * + * @param data type for `keys` output + * @param inputHandle The inputHandle value + * @param keyDtype The value of the keyDtype attribute + * @param data type for `TensorMapStackKeys` output and operands + * @return a new instance of TensorMapStackKeys + * @see org.tensorflow.op.Ops.tensorMapStackKeys + */ + @JvmName("tensorMapStackKeysReified") + public inline fun tensorMapStackKeys(inputHandle: Operand): + TensorMapStackKeys = tensorMapStackKeys(inputHandle, T::class.java) + + /** + * Finds unique elements along an axis of a tensor. + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx = unique(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx The value of the outIdx attribute + * @param data type for `UniqueV2` output and operands + * @param data type for `UniqueV2` output and operands + * @return a new instance of Unique + * @see org.tensorflow.op.Ops.unique + */ + @JvmName("uniqueReified") + public inline fun uniqueTyped(x: Operand, axis: Operand): Unique = unique(x, axis, V::class.java) + + /** + * Finds unique elements along an axis of a tensor. + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) + * y, idx, count = UniqueWithCountsV2(x, axis = [0]) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] + * + * ``` + * + * For a `2-D` tensor `x` with `axis = 0`: + * ``` + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[0]) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] + * + * ``` + * + * For a `2-D` tensor `x` with `axis = 1`: + * ``` + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[1]) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx The value of the outIdx attribute + * @param data type for `UniqueWithCountsV2` output and operands + * @param data type for `UniqueWithCountsV2` output and operands + * @return a new instance of UniqueWithCounts + * @see org.tensorflow.op.Ops.uniqueWithCounts + */ + @JvmName("uniqueWithCountsReified") + public inline fun uniqueWithCountsTyped(x: Operand, + axis: Operand): UniqueWithCounts = uniqueWithCounts(x, axis, + V::class.java) + + /** + * Creates a handle to a Variable resource. + * + * @param dtype the type of this variable. Must agree with the dtypes + * of all ops using this variable. + * @param shape The (possibly partially specified) shape of this variable. + * @param options carries optional attribute values + * @param data type for `VarHandleOp` output and operands + * @return a new instance of VarHandleOp + * @see org.tensorflow.op.Ops.varHandleOp + * @param container Sets the container option. + * + * @param container the container this variable is placed in. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the name by which this variable is referred to. + * @return this Options instance. + * @param allowedDevices Sets the allowedDevices option. + * + * @param allowedDevices DEPRECATED. The allowed devices containing the resource variable. Set + * when the + * output ResourceHandle represents a per-replica/partitioned resource variable. + * @return this Options instance. + */ + @JvmName("varHandleOpReified") + public inline fun varHandleOp( + shape: Shape, + container: String? = null, + sharedName: String? = null, + allowedDevices: List? = null + ): VarHandleOp = varHandleOp(T::class.java, shape, container, sharedName, allowedDevices) + + /** + * Holds state in the form of a tensor that persists across steps. + * Outputs a ref to the tensor state so it may be read or modified. + * TODO(zhifengc/mrry): Adds a pointer to a more detail document + * about sharing states in tensorflow. + * + * @param data type for `ref` output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. + * @param options carries optional attribute values + * @param data type for `VariableV2` output and operands + * @return a new instance of Variable + * @see org.tensorflow.op.Ops.variable + * @param container Sets the container option. + * + * @param container If non-empty, this variable is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this variable is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + */ + @JvmName("variableReified") + public inline fun variable( + shape: Shape, + container: String? = null, + sharedName: String? = null + ): Variable = variable(shape, T::class.java, container, sharedName) + + /** + * Returns the shape of the variable pointed to by `resource`. + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param outType The value of the outType attribute + * @param data type for `VariableShape` output and operands + * @return a new instance of VariableShape + * @see org.tensorflow.op.Ops.variableShape + */ + @JvmName("variableShapeReified") + public inline fun variableShapeTyped(input: Operand): + VariableShape = variableShape(input, T::class.java) + + /** + * Creates a zeroed tensor given its type and shape. + * + * @param dims a 1-D operand that represents the shape of the output tensor + * @param type the output tensor datatype + * @return a constant tensor initialized with zeros + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with + * zeros. + * @see org.tensorflow.op.Ops.zeros + */ + @JvmName("zerosReified") + public inline fun zeros(dims: Operand): Zeros = + zeros(dims, T::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt new file mode 100644 index 00000000000..e62b9d64e55 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -0,0 +1,2211 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.linalg.BandPart +import org.tensorflow.op.linalg.BatchCholesky +import org.tensorflow.op.linalg.BatchCholeskyGrad +import org.tensorflow.op.linalg.BatchMatrixBandPart +import org.tensorflow.op.linalg.BatchMatrixDeterminant +import org.tensorflow.op.linalg.BatchMatrixDiag +import org.tensorflow.op.linalg.BatchMatrixDiagPart +import org.tensorflow.op.linalg.BatchMatrixInverse +import org.tensorflow.op.linalg.BatchMatrixSetDiag +import org.tensorflow.op.linalg.BatchMatrixSolve +import org.tensorflow.op.linalg.BatchMatrixSolveLs +import org.tensorflow.op.linalg.BatchMatrixTriangularSolve +import org.tensorflow.op.linalg.BatchSelfAdjointEig +import org.tensorflow.op.linalg.BatchSvd +import org.tensorflow.op.linalg.Cholesky +import org.tensorflow.op.linalg.CholeskyGrad +import org.tensorflow.op.linalg.ConjugateTranspose +import org.tensorflow.op.linalg.Cross +import org.tensorflow.op.linalg.Det +import org.tensorflow.op.linalg.Eig +import org.tensorflow.op.linalg.Einsum +import org.tensorflow.op.linalg.EuclideanNorm +import org.tensorflow.op.linalg.Inv +import org.tensorflow.op.linalg.LoadAndRemapMatrix +import org.tensorflow.op.linalg.LogMatrixDeterminant +import org.tensorflow.op.linalg.Lu +import org.tensorflow.op.linalg.MatMul +import org.tensorflow.op.linalg.MatrixDiag +import org.tensorflow.op.linalg.MatrixDiagPart +import org.tensorflow.op.linalg.MatrixDiagPartV3 +import org.tensorflow.op.linalg.MatrixDiagV3 +import org.tensorflow.op.linalg.MatrixSetDiag +import org.tensorflow.op.linalg.MatrixSolveLs +import org.tensorflow.op.linalg.Qr +import org.tensorflow.op.linalg.QuantizedMatMul +import org.tensorflow.op.linalg.SelfAdjointEig +import org.tensorflow.op.linalg.Solve +import org.tensorflow.op.linalg.Sqrtm +import org.tensorflow.op.linalg.Svd +import org.tensorflow.op.linalg.TensorDiag +import org.tensorflow.op.linalg.TensorDiagPart +import org.tensorflow.op.linalg.Transpose +import org.tensorflow.op.linalg.TriangularSolve +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TFloat64 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `linalg` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class LinalgOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.LinalgOps = ops.java.linalg + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Copy a tensor setting everything outside a central band in each innermost matrix to zero. + * The `band` part is computed as follows: + * Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + * tensor with the same shape where + * + * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. + * + * The indicator function + * + * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= + * num_upper)`. + * + * For example: + * ``` + * # if 'input' is [[ 0, 1, 2, 3] + * # [-1, 0, 1, 2] + * # [-2, -1, 0, 1] + * # [-3, -2, -1, 0]], + * + * tf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + * [-1, 0, 1, 2] + * [ 0, -1, 0, 1] + * [ 0, 0, -1, 0]], + * + * tf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + * [-1, 0, 1, 0] + * [-2, -1, 0, 1] + * [ 0, -2, -1, 0]] + * + * ``` + * + * Useful special cases: + * ``` + * tf.linalg.band_part(input, 0, -1) ==> Upper triangular part. + * tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. + * tf.linalg.band_part(input, 0, 0) ==> Diagonal. + * + * ``` + * + * @param data type for `band` output + * @param input Rank `k` tensor. + * @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + * lower triangle. + * @param numUpper 0-D tensor. Number of superdiagonals to keep. If negative, keep + * entire upper triangle. + * @param data type for `MatrixBandPart` output and operands + * @param data type for `MatrixBandPart` output and operands + * @return a new instance of BandPart + * @see org.tensorflow.op.LinalgOps.bandPart + */ + public fun bandPart( + input: Operand, + numLower: Operand, + numUpper: Operand + ): BandPart = java.bandPart( + input, + numLower, + numUpper + ) + + /** + * The BatchCholesky operation + * + * @param data type for `output` output + * @param input The input value + * @param data type for `BatchCholesky` output and operands + * @return a new instance of BatchCholesky + * @see org.tensorflow.op.LinalgOps.batchCholesky + */ + public fun batchCholesky(input: Operand): BatchCholesky = + java.batchCholesky( + input + ) + + /** + * The BatchCholeskyGrad operation + * + * @param data type for `output` output + * @param l The l value + * @param grad The grad value + * @param data type for `BatchCholeskyGrad` output and operands + * @return a new instance of BatchCholeskyGrad + * @see org.tensorflow.op.LinalgOps.batchCholeskyGrad + */ + public fun batchCholeskyGrad(l: Operand, grad: Operand): + BatchCholeskyGrad = java.batchCholeskyGrad( + l, + grad + ) + + /** + * The BatchMatrixBandPart operation + * + * @param data type for `band` output + * @param input The input value + * @param numLower The numLower value + * @param numUpper The numUpper value + * @param data type for `BatchMatrixBandPart` output and operands + * @return a new instance of BatchMatrixBandPart + * @see org.tensorflow.op.LinalgOps.batchMatrixBandPart + */ + public fun batchMatrixBandPart( + input: Operand, + numLower: Operand, + numUpper: Operand + ): BatchMatrixBandPart = java.batchMatrixBandPart( + input, + numLower, + numUpper + ) + + /** + * The BatchMatrixDeterminant operation + * + * @param data type for `output` output + * @param input The input value + * @param data type for `BatchMatrixDeterminant` output and operands + * @return a new instance of BatchMatrixDeterminant + * @see org.tensorflow.op.LinalgOps.batchMatrixDeterminant + */ + public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = + java.batchMatrixDeterminant( + input + ) + + /** + * The BatchMatrixDiag operation + * + * @param data type for `output` output + * @param diagonal The diagonal value + * @param data type for `BatchMatrixDiag` output and operands + * @return a new instance of BatchMatrixDiag + * @see org.tensorflow.op.LinalgOps.batchMatrixDiag + */ + public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = + java.batchMatrixDiag( + diagonal + ) + + /** + * The BatchMatrixDiagPart operation + * + * @param data type for `diagonal` output + * @param input The input value + * @param data type for `BatchMatrixDiagPart` output and operands + * @return a new instance of BatchMatrixDiagPart + * @see org.tensorflow.op.LinalgOps.batchMatrixDiagPart + */ + public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = + java.batchMatrixDiagPart( + input + ) + + /** + * The BatchMatrixInverse operation + * + * @param data type for `output` output + * @param input The input value + * @param options carries optional attribute values + * @param data type for `BatchMatrixInverse` output and operands + * @return a new instance of BatchMatrixInverse + * @see org.tensorflow.op.LinalgOps.batchMatrixInverse + * @param adjoint Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. + */ + public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): + BatchMatrixInverse = java.batchMatrixInverse( + input, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } + ).toTypedArray() + ) + + /** + * The BatchMatrixSetDiag operation + * + * @param data type for `output` output + * @param input The input value + * @param diagonal The diagonal value + * @param data type for `BatchMatrixSetDiag` output and operands + * @return a new instance of BatchMatrixSetDiag + * @see org.tensorflow.op.LinalgOps.batchMatrixSetDiag + */ + public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): + BatchMatrixSetDiag = java.batchMatrixSetDiag( + input, + diagonal + ) + + /** + * The BatchMatrixSolve operation + * + * @param data type for `output` output + * @param matrix The matrix value + * @param rhs The rhs value + * @param options carries optional attribute values + * @param data type for `BatchMatrixSolve` output and operands + * @return a new instance of BatchMatrixSolve + * @see org.tensorflow.op.LinalgOps.batchMatrixSolve + * @param adjoint Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. + */ + public fun batchMatrixSolve( + matrix: Operand, + rhs: Operand, + adjoint: Boolean? = null + ): BatchMatrixSolve = java.batchMatrixSolve( + matrix, + rhs, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } + ).toTypedArray() + ) + + /** + * The BatchMatrixSolveLs operation + * + * @param data type for `output` output + * @param matrix The matrix value + * @param rhs The rhs value + * @param l2Regularizer The l2Regularizer value + * @param options carries optional attribute values + * @param data type for `BatchMatrixSolveLs` output and operands + * @return a new instance of BatchMatrixSolveLs + * @see org.tensorflow.op.LinalgOps.batchMatrixSolveLs + * @param fast Sets the fast option. + * + * @param fast the fast option + * @return this Options instance. + */ + public fun batchMatrixSolveLs( + matrix: Operand, + rhs: Operand, + l2Regularizer: Operand, + fast: Boolean? = null + ): BatchMatrixSolveLs = java.batchMatrixSolveLs( + matrix, + rhs, + l2Regularizer, + *listOfNotNull( + fast?.let{ org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } + ).toTypedArray() + ) + + /** + * The BatchMatrixTriangularSolve operation + * + * @param data type for `output` output + * @param matrix The matrix value + * @param rhs The rhs value + * @param options carries optional attribute values + * @param data type for `BatchMatrixTriangularSolve` output and operands + * @return a new instance of BatchMatrixTriangularSolve + * @see org.tensorflow.op.LinalgOps.batchMatrixTriangularSolve + * @param lower Sets the lower option. + * + * @param lower the lower option + * @return this Options instance. + * @param adjoint Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. + */ + public fun batchMatrixTriangularSolve( + matrix: Operand, + rhs: Operand, + lower: Boolean? = null, + adjoint: Boolean? = null + ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( + matrix, + rhs, + *listOfNotNull( + lower?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } + ).toTypedArray() + ) + + /** + * The BatchSelfAdjointEigV2 operation + * + * @param data type for `e` output + * @param input The input value + * @param options carries optional attribute values + * @param data type for `BatchSelfAdjointEigV2` output and operands + * @return a new instance of BatchSelfAdjointEig + * @see org.tensorflow.op.LinalgOps.batchSelfAdjointEig + * @param computeV Sets the computeV option. + * + * @param computeV the computeV option + * @return this Options instance. + */ + public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): + BatchSelfAdjointEig = java.batchSelfAdjointEig( + input, + *listOfNotNull( + computeV?.let{ org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } + ).toTypedArray() + ) + + /** + * The BatchSvd operation + * + * @param data type for `s` output + * @param input The input value + * @param options carries optional attribute values + * @param data type for `BatchSvd` output and operands + * @return a new instance of BatchSvd + * @see org.tensorflow.op.LinalgOps.batchSvd + * @param computeUv Sets the computeUv option. + * + * @param computeUv the computeUv option + * @return this Options instance. + * @param fullMatrices Sets the fullMatrices option. + * + * @param fullMatrices the fullMatrices option + * @return this Options instance. + */ + public fun batchSvd( + input: Operand, + computeUv: Boolean? = null, + fullMatrices: Boolean? = null + ): BatchSvd = java.batchSvd( + input, + *listOfNotNull( + computeUv?.let{ org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, + fullMatrices?.let{ org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } + ).toTypedArray() + ) + + /** + * Computes the Cholesky decomposition of one or more square matrices. + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. + * + * The input has to be symmetric and positive definite. Only the lower-triangular + * part of the input will be used for this operation. The upper-triangular part + * will not be read. + * + * The output is a tensor of the same shape as the input + * containing the Cholesky decompositions for all input submatrices `[..., :, :]`. + * + * **Note**: The gradient computation on GPU is faster for large matrices but + * not for large batch dimensions when the submatrices are small. In this + * case it might be faster to use the CPU. + * + * @param data type for `output` output + * @param input Shape is `[..., M, M]`. + * @param data type for `Cholesky` output and operands + * @return a new instance of Cholesky + * @see org.tensorflow.op.LinalgOps.cholesky + */ + public fun cholesky(input: Operand): Cholesky = java.cholesky( + input + ) + + /** + * Computes the reverse mode backpropagated gradient of the Cholesky algorithm. + * For an explanation see "Differentiation of the Cholesky algorithm" by + * Iain Murray http://arxiv.org/abs/1602.07527. + * + * @param data type for `output` output + * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + * Algorithm depends only on lower triangular part of the innermost matrices of + * this tensor. + * @param grad df/dl where f is some scalar function. Shape is `[..., M, M]`. + * Algorithm depends only on lower triangular part of the innermost matrices of + * this tensor. + * @param data type for `CholeskyGrad` output and operands + * @return a new instance of CholeskyGrad + * @see org.tensorflow.op.LinalgOps.choleskyGrad + */ + public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = + java.choleskyGrad( + l, + grad + ) + + /** + * Shuffle dimensions of x according to a permutation and conjugate the result. + * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + * `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], + * perm[t], perm[u]])` + * + * @param data type for `y` output + * @param x The x value + * @param perm The perm value + * @param data type for `ConjugateTranspose` output and operands + * @return a new instance of ConjugateTranspose + * @see org.tensorflow.op.LinalgOps.conjugateTranspose + */ + public fun conjugateTranspose(x: Operand, perm: Operand): + ConjugateTranspose = java.conjugateTranspose( + x, + perm + ) + + /** + * Compute the pairwise cross product. + * `a` and `b` must be the same shape; they can either be simple 3-element vectors, + * or any shape where the innermost dimension is 3. In the latter case, each pair + * of corresponding 3-element vectors is cross-multiplied independently. + * + * @param data type for `product` output + * @param a A tensor containing 3-element vectors. + * @param b Another tensor, of same type and shape as `a`. + * @param data type for `Cross` output and operands + * @return a new instance of Cross + * @see org.tensorflow.op.LinalgOps.cross + */ + public fun cross(a: Operand, b: Operand): Cross = java.cross( + a, + b + ) + + /** + * Computes the determinant of one or more square matrices. + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. The output is a tensor containing the determinants + * for all input submatrices `[..., :, :]`. + * + * @param data type for `output` output + * @param input Shape is `[..., M, M]`. + * @param data type for `MatrixDeterminant` output and operands + * @return a new instance of Det + * @see org.tensorflow.op.LinalgOps.det + */ + public fun det(input: Operand): Det = java.det( + input + ) + + /** + * Computes the eigen decomposition of one or more square matrices. + * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., + * :])`. The eigenvalues + * are sorted in non-decreasing order. + * ``` + * # a is a tensor. + * # e is a tensor of eigenvalues. + * # v is a tensor of eigenvectors. + * e, v = eig(a) + * e = eig(a, compute_v=False) + * + * ``` + * + * @param data type for `e` output + * @param input `Tensor` input of shape `[N, N]`. + * @param Tout The value of the Tout attribute + * @param options carries optional attribute values + * @param data type for `Eig` output and operands + * @return a new instance of Eig + * @see org.tensorflow.op.LinalgOps.eig + * @param computeV Sets the computeV option. + * + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * Otherwise, only the eigenvalues will be computed. + * @return this Options instance. + */ + public fun eig( + input: Operand, + Tout: Class, + computeV: Boolean? = null + ): Eig = java.eig( + input, + Tout, + *listOfNotNull( + computeV?.let{ org.tensorflow.op.linalg.Eig.computeV(it) } + ).toTypedArray() + ) + + /** + * Tensor contraction according to Einstein summation convention. + * Implements generalized Tensor contraction and reduction. Each input Tensor must + * have a corresponding input subscript appearing in the comma-separated left-hand + * side of the equation. The right-hand side of the equation consists of the + * output subscript. The input subscripts and the output subscript should consist + * of zero or more named axis labels and at most one ellipsis (`...`). + * + * The named axis labels may be any single character other than those having + * special meaning, namely `,.->`. The behavior of this Op is undefined if it + * receives an ill-formatted equation; since the validation is done at + * graph-building time, we omit format validation checks at runtime. + * + * Note: This Op is _not_ intended to be called by the user; instead users should + * call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. + * + * Operations are applied to the input(s) according to the following rules: + * + * (a) Generalized Diagonals: For input dimensions corresponding to axis labels + * appearing more than once in the same input subscript, we take the + * generalized (`k`-dimensional) diagonal. + * For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the + * generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, + * `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. + * + * (b) Reduction: Axes corresponding to labels appearing only in one input + * subscript but not in the output subscript are summed over prior to Tensor + * contraction. + * For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are + * the reduction axis labels. + * + * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the + * input subscripts and also in the output subscript make up the batch + * dimensions in Tensor contraction. Unnamed axis labels corresponding to + * ellipsis (`...`) also correspond to batch dimensions. + * For example, for the equation denoting batch matrix multiplication, + * `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. + * + * (d) Contraction: In case of binary einsum, axes corresponding to labels + * appearing in two different inputs (and not in the output) are contracted + * against each other. + * Considering the batch matrix multiplication equation again + * (`bij,bjk->bik`), the contracted axis label is `j`. + * + * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis + * labels, the opposite operation of (a) is applied. For example, in the + * equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` + * are all zeros, except for the (generalized) diagonal which is populated + * with values from the input. + * Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is + * provided to enable computing the symbolic gradient of `tf.einsum`. + * + * The output subscripts must contain only labels appearing in at least one of the + * input subscripts. Furthermore, all dimensions mapping to the same axis label + * must be equal. + * + * Any of the input and output subscripts may contain at most a single ellipsis + * (`...`). These ellipsis are mapped against dimensions not corresponding to any + * named axis label. If two inputs contain ellipsis, then they are broadcasted + * according to standard NumPy + * broadcasting[rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . + * + * The broadcasted dimensions are placed in the corresponding location of the + * ellipsis in the output subscript. If the broadcasted dimensions are non-empty + * and the output subscripts do not contain ellipsis, then an InvalidArgument error + * is raised. + * + * `@`compatibility(numpy) + * + * Similar to + * [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html) . + * + * Comparison with `numpy.einsum`: + *
    + *
  • This Op only supports unary and binary forms of `numpy.einsum`.
  • + *
  • This Op does not support implicit form. (i.e. equations without `->`).
  • + *
  • This Op also supports repeated indices in the output subscript, which is not + * supported by `numpy.einsum`. + * + * `@`end_compatibility
  • + *
+ * + * @param data type for `output` output + * @param inputs List of 1 or 2 Tensors. + * @param equation String describing the Einstein Summation operation; in the format of + * np.einsum. + * @param data type for `Einsum` output and operands + * @return a new instance of Einsum + * @see org.tensorflow.op.LinalgOps.einsum + */ + public fun einsum(inputs: Iterable>, equation: String): Einsum = + java.einsum( + inputs, + equation + ) + + /** + * Computes the euclidean norm of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `EuclideanNorm` output and operands + * @return a new instance of EuclideanNorm + * @see org.tensorflow.op.LinalgOps.euclideanNorm + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun euclideanNorm( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): EuclideanNorm = java.euclideanNorm( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the inverse of one or more square invertible matrices or their adjoints (conjugate + * transposes). + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. The output is a tensor of the same shape as the input + * containing the inverse for all input submatrices `[..., :, :]`. + * + * The op uses LU decomposition with partial pivoting to compute the inverses. + * + * If a matrix is not invertible there is no guarantee what the op does. It + * may detect the condition and raise an exception or it may simply return a + * garbage result. + * + * @param data type for `output` output + * @param input Shape is `[..., M, M]`. + * @param options carries optional attribute values + * @param data type for `MatrixInverse` output and operands + * @return a new instance of Inv + * @see org.tensorflow.op.LinalgOps.inv + * @param adjoint Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. + */ + public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( + input, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.Inv.adjoint(it) } + ).toTypedArray() + ) + + /** + * Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint + * at `ckpt_path` and potentially reorders its rows and columns using the + * specified remappings. + * + * Most users should use one of the wrapper initializers (such as + * `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this + * function directly. + * + * The remappings are 1-D tensors with the following properties: + *
    + *
  • `row_remapping` must have exactly `num_rows` entries. Row `i` of the output + * matrix will be initialized from the row corresponding to index + * `row_remapping[i]` in the old `Tensor` from the checkpoint.
  • + *
  • `col_remapping` must have either 0 entries (indicating that no column + * reordering is needed) or `num_cols` entries. If specified, column `j` of the + * output matrix will be initialized from the column corresponding to index + * `col_remapping[j]` in the old `Tensor` from the checkpoint.
  • + *
  • A value of -1 in either of the remappings signifies a "missing" entry. In that + * case, values from the `initializing_values` tensor will be used to fill that + * missing row or column. If `row_remapping` has `r` missing entries and + * `col_remapping` has `c` missing entries, then the following condition must be + * true:
  • + *
+ * + * `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` + * + * The remapping tensors can be generated using the GenerateVocabRemapping op. + * + * As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], + * initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing + * the value from row i, column j of the old tensor in the checkpoint, the output + * matrix will look like the following: + * + * [[w(1, 0), w(1, 2), 0.5], + * [w(0, 0), w(0, 2), -0.5], + * [0.25, -0.25, 42]] + * + * @param ckptPath Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from + * which the old matrix `Tensor` will be loaded. + * @param oldTensorName Name of the 2-D `Tensor` to load from checkpoint. + * @param rowRemapping An int `Tensor` of row remappings (generally created by + * `generate_vocab_remapping`). Even if no row remapping is needed, this must + * still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted + * index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`). + * @param colRemapping An int `Tensor` of column remappings (generally created by + * `generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping + * is to be done (e.g. column ordering is the same). + * @param initializingValues A float `Tensor` containing values to fill in for cells + * in the output matrix that are not loaded from the checkpoint. Length must be + * exactly the same as the number of missing / new cells. + * @param numRows Number of rows (length of the 1st dimension) in the output matrix. + * @param numCols Number of columns (length of the 2nd dimension) in the output matrix. + * @param options carries optional attribute values + * @return a new instance of LoadAndRemapMatrix + * @see org.tensorflow.op.LinalgOps.loadAndRemapMatrix + * @param maxRowsInMemory Sets the maxRowsInMemory option. + * + * @param maxRowsInMemory The maximum number of rows to load from the checkpoint at + * once. If less than or equal to 0, the entire matrix will be loaded into + * memory. Setting this arg trades increased disk reads for lower memory usage. + * @return this Options instance. + */ + public fun loadAndRemapMatrix( + ckptPath: Operand, + oldTensorName: Operand, + rowRemapping: Operand, + colRemapping: Operand, + initializingValues: Operand, + numRows: Long, + numCols: Long, + maxRowsInMemory: Long? = null + ): LoadAndRemapMatrix = java.loadAndRemapMatrix( + ckptPath, + oldTensorName, + rowRemapping, + colRemapping, + initializingValues, + numRows, + numCols, + *listOfNotNull( + maxRowsInMemory?.let{ org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } + ).toTypedArray() + ) + + /** + * Computes the sign and the log of the absolute value of the determinant of + * one or more square matrices. + * + * The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions + * form square matrices. The outputs are two tensors containing the signs and + * absolute values of the log determinants for all N input submatrices + * `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`. + * The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU` + * is the `LU` decomposition of the input and `P` is the corresponding + * permutation matrix. + * + * @param data type for `sign` output + * @param input Shape is `[N, M, M]`. + * @param data type for `LogMatrixDeterminant` output and operands + * @return a new instance of LogMatrixDeterminant + * @see org.tensorflow.op.LinalgOps.logMatrixDeterminant + */ + public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = + java.logMatrixDeterminant( + input + ) + + /** + * Computes the LU decomposition of one or more square matrices. + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * upper triangular factors. + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * entries correspond to the upper triangular part, including the diagonal, of LU. + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P, then the L, U and P satisfies P_mat * input = L * U. + * + * @param data type for `lu` output + * @param data type for `p` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * matrices of + * size `[M, M]`. + * @param data type for `Lu` output and operands + * @return a new instance of Lu, with default output types + * @see org.tensorflow.op.LinalgOps.lu + */ + public fun lu(input: Operand): Lu = java.lu( + input + ) + + /** + * Computes the LU decomposition of one or more square matrices. + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * upper triangular factors. + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * entries correspond to the upper triangular part, including the diagonal, of LU. + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P, then the L, U and P satisfies P_mat * input = L * U. + * + * @param data type for `lu` output + * @param data type for `p` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * matrices of + * size `[M, M]`. + * @param outputIdxType The value of the outputIdxType attribute + * @param data type for `Lu` output and operands + * @param data type for `Lu` output and operands + * @return a new instance of Lu + * @see org.tensorflow.op.LinalgOps.lu + */ + public fun lu(input: Operand, outputIdxType: Class): Lu = + java.lu( + input, + outputIdxType + ) + + /** + * Multiply the matrix "a" by the matrix "b". + * The inputs must be two-dimensional matrices and the inner dimension of + * "a" (after being transposed if transpose_a is true) must match the + * outer dimension of "b" (after being transposed if transposed_b is + * true). + * + * _Note_: The default kernel implementation for MatMul on GPUs uses + * cublas. + * + * @param data type for `product` output + * @param a The a value + * @param b The b value + * @param options carries optional attribute values + * @param data type for `MatMul` output and operands + * @return a new instance of MatMul + * @see org.tensorflow.op.LinalgOps.matMul + * @param transposeA Sets the transposeA option. + * + * @param transposeA If true, "a" is transposed before multiplication. + * @return this Options instance. + * @param transposeB Sets the transposeB option. + * + * @param transposeB If true, "b" is transposed before multiplication. + * @return this Options instance. + */ + public fun matMul( + a: Operand, + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): MatMul = java.matMul( + a, + b, + *listOfNotNull( + transposeA?.let{ org.tensorflow.op.linalg.MatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.linalg.MatMul.transposeB(it) } + ).toTypedArray() + ) + + /** + * Returns a batched diagonal tensor with given batched diagonal values. + * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + * diagonals of a matrix, with everything else padded with `padding`. `num_rows` + * and `num_cols` specify the dimension of the innermost matrix of the output. If + * both are not specified, the op assumes the innermost matrix is square and infers + * its size from `k` and the innermost dimension of `diagonal`. If only one of them + * is specified, the op assumes the unspecified value is the smallest possible + * based on other criteria. + * + * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + * + * The second innermost dimension of `diagonal` has double meaning. + * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + * [I, J, ..., M], and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + * padding_value ; otherwise + * + * ``` + * + * Otherwise, `M` is treated as the number of diagonals for the matrix in the + * same batch (`M = k[1]-k[0]+1`), and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + * padding_value ; otherwise + * + * ``` + * + * where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + * + * For example: + * ``` + * # The main diagonal. + * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + * [5, 6, 7, 8]]) + * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + * [0, 2, 0, 0], + * [0, 0, 3, 0], + * [0, 0, 0, 4]], + * [[5, 0, 0, 0], + * [0, 6, 0, 0], + * [0, 0, 7, 0], + * [0, 0, 0, 8]]] + * + * # A superdiagonal (per batch). + * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + * [4, 5, 6]]) + * tf.matrix_diag(diagonal, k = 1) + * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + * [0, 0, 2, 0], + * [0, 0, 0, 3], + * [0, 0, 0, 0]], + * [[0, 4, 0, 0], + * [0, 0, 5, 0], + * [0, 0, 0, 6], + * [0, 0, 0, 0]]] + * + * # A band of diagonals. + * diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) + * [4, 5, 0]], + * [[6, 7, 9], + * [9, 1, 0]]]) + * tf.matrix_diag(diagonals, k = (-1, 0)) + * ==> [[[1, 0, 0], # Output shape: (2, 3, 3) + * [4, 2, 0], + * [0, 5, 3]], + * [[6, 0, 0], + * [9, 7, 0], + * [0, 1, 9]]] + * + * # Rectangular matrix. + * diagonal = np.array([1, 2]) # Input shape: (2) + * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + * ==> [[0, 0, 0, 0], # Output shape: (3, 4) + * [1, 0, 0, 0], + * [0, 2, 0, 0]] + * + * # Rectangular matrix with inferred num_cols and padding_value = 9. + * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + * ==> [[9, 9], # Output shape: (3, 2) + * [1, 9], + * [9, 2]] + * + * ``` + * + * @param data type for `output` output + * @param diagonal Rank `r`, where `r >= 1` + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param numRows The number of rows of the output matrix. If it is not provided, the op assumes + * the output matrix is a square matrix and infers the matrix size from k and the + * innermost dimension of `diagonal`. + * @param numCols The number of columns of the output matrix. If it is not provided, the op + * assumes the output matrix is a square matrix and infers the matrix size from + * k and the innermost dimension of `diagonal`. + * @param paddingValue The number to fill the area outside the specified diagonal band with. + * Default is 0. + * @param data type for `MatrixDiagV2` output and operands + * @return a new instance of MatrixDiag + * @see org.tensorflow.op.LinalgOps.matrixDiag + */ + public fun matrixDiag( + diagonal: Operand, + k: Operand, + numRows: Operand, + numCols: Operand, + paddingValue: Operand + ): MatrixDiag = java.matrixDiag( + diagonal, + k, + numRows, + numCols, + paddingValue + ) + + /** + * Returns the batched diagonal part of a batched tensor. + * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + * `input`. + * + * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + * Let `max_diag_len` be the maximum length among all diagonals to be extracted, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * Let `num_diags` be the number of diagonals to extract, + * `num_diags = k[1] - k[0] + 1`. + * + * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + * `[I, J, ..., L, max_diag_len]` and values: + * ``` + * diagonal[i, j, ..., l, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * padding_value ; otherwise. + * + * ``` + * + * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + * + * Otherwise, the output tensor has rank `r` with dimensions + * `[I, J, ..., L, num_diags, max_diag_len]` with values: + * ``` + * diagonal[i, j, ..., l, m, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * padding_value ; otherwise. + * + * ``` + * + * where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. + * + * The input must be at least a matrix. + * + * For example: + * ``` + * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + * [5, 6, 7, 8], + * [9, 8, 7, 6]], + * [[5, 4, 3, 2], + * [1, 2, 3, 4], + * [5, 6, 7, 8]]]) + * + * # A main diagonal from each batch. + * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + * [5, 2, 7]] + * + * # A superdiagonal from each batch. + * tf.matrix_diag_part(input, k = 1) + * ==> [[2, 7, 6], # Output shape: (2, 3) + * [4, 3, 8]] + * + * # A tridiagonal band from each batch. + * tf.matrix_diag_part(input, k = (-1, 1)) + * ==> [[[2, 7, 6], # Output shape: (2, 3, 3) + * [1, 6, 7], + * [5, 8, 0]], + * [[4, 3, 8], + * [5, 2, 7], + * [1, 6, 0]]] + * + * # Padding value = 9 + * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + * ==> [[[4, 9, 9], # Output shape: (2, 3, 3) + * [3, 8, 9], + * [2, 7, 6]], + * [[2, 9, 9], + * [3, 4, 9], + * [4, 3, 8]]] + * + * ``` + * + * @param data type for `diagonal` output + * @param input Rank `r` tensor where `r >= 2`. + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param paddingValue The value to fill the area outside the specified diagonal band with. + * Default is 0. + * @param data type for `MatrixDiagPartV2` output and operands + * @return a new instance of MatrixDiagPart + * @see org.tensorflow.op.LinalgOps.matrixDiagPart + */ + public fun matrixDiagPart( + input: Operand, + k: Operand, + paddingValue: Operand + ): MatrixDiagPart = java.matrixDiagPart( + input, + k, + paddingValue + ) + + /** + * Returns the batched diagonal part of a batched tensor. + * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + * `input`. + * + * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + * Let `max_diag_len` be the maximum length among all diagonals to be extracted, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * Let `num_diags` be the number of diagonals to extract, + * `num_diags = k[1] - k[0] + 1`. + * + * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + * `[I, J, ..., L, max_diag_len]` and values: + * ``` + * diagonal[i, j, ..., l, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * padding_value ; otherwise. + * + * ``` + * + * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + * + * Otherwise, the output tensor has rank `r` with dimensions + * `[I, J, ..., L, num_diags, max_diag_len]` with values: + * ``` + * diagonal[i, j, ..., l, m, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * padding_value ; otherwise. + * + * ``` + * + * where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. + * + * `offset` is zero except when the alignment of the diagonal is to the right. + * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) + * 0 ; otherwise + * } + * + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * + * The input must be at least a matrix. + * + * For example: + * ``` + * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + * [5, 6, 7, 8], + * [9, 8, 7, 6]], + * [[5, 4, 3, 2], + * [1, 2, 3, 4], + * [5, 6, 7, 8]]]) + * + * # A main diagonal from each batch. + * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + * [5, 2, 7]] + * + * # A superdiagonal from each batch. + * tf.matrix_diag_part(input, k = 1) + * ==> [[2, 7, 6], # Output shape: (2, 3) + * [4, 3, 8]] + * + * # A band from each batch. + * tf.matrix_diag_part(input, k = (-1, 2)) + * ==> [[[0, 3, 8], # Output shape: (2, 4, 3) + * [2, 7, 6], + * [1, 6, 7], + * [5, 8, 0]], + * [[0, 3, 4], + * [4, 3, 8], + * [5, 2, 7], + * [1, 6, 0]]] + * + * # LEFT_RIGHT alignment. + * tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") + * ==> [[[3, 8, 0], # Output shape: (2, 4, 3) + * [2, 7, 6], + * [1, 6, 7], + * [0, 5, 8]], + * [[3, 4, 0], + * [4, 3, 8], + * [5, 2, 7], + * [0, 1, 6]]] + * + * # max_diag_len can be shorter than the main diagonal. + * tf.matrix_diag_part(input, k = (-2, -1)) + * ==> [[[5, 8], + * [9, 0]], + * [[1, 6], + * [5, 0]]] + * + * # padding_value = 9 + * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + * ==> [[[9, 9, 4], # Output shape: (2, 3, 3) + * [9, 3, 8], + * [2, 7, 6]], + * [[9, 9, 2], + * [9, 3, 4], + * [4, 3, 8]]] + * + * + * ``` + * + * @param data type for `diagonal` output + * @param input Rank `r` tensor where `r >= 2`. + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param paddingValue The value to fill the area outside the specified diagonal band with. + * Default is 0. + * @param options carries optional attribute values + * @param data type for `MatrixDiagPartV3` output and operands + * @return a new instance of MatrixDiagPartV3 + * @see org.tensorflow.op.LinalgOps.matrixDiagPartV3 + * @param align Sets the align option. + * + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + * a string specifying how superdiagonals and subdiagonals should be aligned, + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". + * "RIGHT_LEFT" aligns superdiagonals + * to the right (left-pads the row) and subdiagonals to the left (right-pads the + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * the opposite alignment. + * @return this Options instance. + */ + public fun matrixDiagPartV3( + input: Operand, + k: Operand, + paddingValue: Operand, + align: String? = null + ): MatrixDiagPartV3 = java.matrixDiagPartV3( + input, + k, + paddingValue, + *listOfNotNull( + align?.let{ org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } + ).toTypedArray() + ) + + /** + * Returns a batched diagonal tensor with given batched diagonal values. + * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + * diagonals of a matrix, with everything else padded with `padding`. `num_rows` + * and `num_cols` specify the dimension of the innermost matrix of the output. If + * both are not specified, the op assumes the innermost matrix is square and infers + * its size from `k` and the innermost dimension of `diagonal`. If only one of them + * is specified, the op assumes the unspecified value is the smallest possible + * based on other criteria. + * + * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + * + * The second innermost dimension of `diagonal` has double meaning. + * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + * [I, J, ..., M], and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + * padding_value ; otherwise + * + * ``` + * + * Otherwise, `M` is treated as the number of diagonals for the matrix in the + * same batch (`M = k[1]-k[0]+1`), and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + * padding_value ; otherwise + * + * ``` + * + * where `d = n - m`, `diag_index = [k] - d`, and + * `index_in_diag = n - max(d, 0) + offset`. + * + * `offset` is zero except when the alignment of the diagonal is to the right. + * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) + * 0 ; otherwise + * } + * + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * + * For example: + * ``` + * # The main diagonal. + * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + * [5, 6, 7, 8]]) + * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + * [0, 2, 0, 0], + * [0, 0, 3, 0], + * [0, 0, 0, 4]], + * [[5, 0, 0, 0], + * [0, 6, 0, 0], + * [0, 0, 7, 0], + * [0, 0, 0, 8]]] + * + * # A superdiagonal (per batch). + * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + * [4, 5, 6]]) + * tf.matrix_diag(diagonal, k = 1) + * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + * [0, 0, 2, 0], + * [0, 0, 0, 3], + * [0, 0, 0, 0]], + * [[0, 4, 0, 0], + * [0, 0, 5, 0], + * [0, 0, 0, 6], + * [0, 0, 0, 0]]] + * + * # A tridiagonal band (per batch). + * diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) + * [1, 2, 3], + * [4, 5, 0]], + * [[0, 2, 3], + * [6, 7, 9], + * [9, 1, 0]]]) + * tf.matrix_diag(diagonals, k = (-1, 1)) + * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + * [4, 2, 9], + * [0, 5, 3]], + * [[6, 2, 0], + * [9, 7, 3], + * [0, 1, 9]]] + * + * # LEFT_RIGHT alignment. + * diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) + * [1, 2, 3], + * [0, 4, 5]], + * [[2, 3, 0], + * [6, 7, 9], + * [0, 9, 1]]]) + * tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") + * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + * [4, 2, 9], + * [0, 5, 3]], + * [[6, 2, 0], + * [9, 7, 3], + * [0, 1, 9]]] + * + * # Rectangular matrix. + * diagonal = np.array([1, 2]) # Input shape: (2) + * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + * ==> [[0, 0, 0, 0], # Output shape: (3, 4) + * [1, 0, 0, 0], + * [0, 2, 0, 0]] + * + * # Rectangular matrix with inferred num_cols and padding_value = 9. + * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + * ==> [[9, 9], # Output shape: (3, 2) + * [1, 9], + * [9, 2]] + * + * + * ``` + * + * @param data type for `output` output + * @param diagonal Rank `r`, where `r >= 1` + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param numRows The number of rows of the output matrix. If it is not provided, the op assumes + * the output matrix is a square matrix and infers the matrix size from k and the + * innermost dimension of `diagonal`. + * @param numCols The number of columns of the output matrix. If it is not provided, the op + * assumes the output matrix is a square matrix and infers the matrix size from + * k and the innermost dimension of `diagonal`. + * @param paddingValue The number to fill the area outside the specified diagonal band with. + * Default is 0. + * @param options carries optional attribute values + * @param data type for `MatrixDiagV3` output and operands + * @return a new instance of MatrixDiagV3 + * @see org.tensorflow.op.LinalgOps.matrixDiagV3 + * @param align Sets the align option. + * + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + * a string specifying how superdiagonals and subdiagonals should be aligned, + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". + * "RIGHT_LEFT" aligns superdiagonals + * to the right (left-pads the row) and subdiagonals to the left (right-pads the + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * the opposite alignment. + * @return this Options instance. + */ + public fun matrixDiagV3( + diagonal: Operand, + k: Operand, + numRows: Operand, + numCols: Operand, + paddingValue: Operand, + align: String? = null + ): MatrixDiagV3 = java.matrixDiagV3( + diagonal, + k, + numRows, + numCols, + paddingValue, + *listOfNotNull( + align?.let{ org.tensorflow.op.linalg.MatrixDiagV3.align(it) } + ).toTypedArray() + ) + + /** + * Returns a batched matrix tensor with new batched diagonal values. + * Given `input` and `diagonal`, this operation returns a tensor with the + * same shape and values as `input`, except for the specified diagonals of the + * innermost matrices. These will be overwritten by the values in `diagonal`. + * + * `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or + * `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. + * Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. + * `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. + * `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * + * The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. + * If `k` is scalar or `k[0] == k[1]`: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + * input[i, j, ..., l, m, n] ; otherwise + * + * ``` + * + * Otherwise, + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + * input[i, j, ..., l, m, n] ; otherwise + * + * ``` + * + * where `d = n - m`, `diag_index = k[1] - d`, and + * `index_in_diag = n - max(d, 0) + offset`. + * + * `offset` is zero except when the alignment of the diagonal is to the right. + * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) + * 0 ; otherwise + * } + * + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * + * For example: + * ``` + * # The main diagonal. + * input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + * [7, 7, 7, 7], + * [7, 7, 7, 7]], + * [[7, 7, 7, 7], + * [7, 7, 7, 7], + * [7, 7, 7, 7]]]) + * diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + * [4, 5, 6]]) + * tf.matrix_set_diag(input, diagonal) + * ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + * [7, 2, 7, 7], + * [7, 7, 3, 7]], + * [[4, 7, 7, 7], + * [7, 5, 7, 7], + * [7, 7, 6, 7]]] + * + * # A superdiagonal (per batch). + * tf.matrix_set_diag(input, diagonal, k = 1) + * ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + * [7, 7, 2, 7], + * [7, 7, 7, 3]], + * [[7, 4, 7, 7], + * [7, 7, 5, 7], + * [7, 7, 7, 6]]] + * + * # A band of diagonals. + * diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) + * [6, 5, 8], + * [1, 2, 3], + * [4, 5, 0]], + * [[0, 1, 2], + * [5, 6, 4], + * [6, 1, 2], + * [3, 4, 0]]]) + * tf.matrix_set_diag(input, diagonals, k = (-1, 2)) + * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + * [4, 2, 5, 1], + * [7, 5, 3, 8]], + * [[6, 5, 1, 7], + * [3, 1, 6, 2], + * [7, 4, 2, 4]]] + * + * # LEFT_RIGHT alignment. + * diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) + * [6, 5, 8], + * [1, 2, 3], + * [0, 4, 5]], + * [[1, 2, 0], + * [5, 6, 4], + * [6, 1, 2], + * [0, 3, 4]]]) + * tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") + * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + * [4, 2, 5, 1], + * [7, 5, 3, 8]], + * [[6, 5, 1, 7], + * [3, 1, 6, 2], + * [7, 4, 2, 4]]] + * + * + * ``` + * + * @param data type for `output` output + * @param input Rank `r+1`, where `r >= 1`. + * @param diagonal Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank + * `r+1`. + * `k >= 1`. + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param options carries optional attribute values + * @param data type for `MatrixSetDiagV3` output and operands + * @return a new instance of MatrixSetDiag + * @see org.tensorflow.op.LinalgOps.matrixSetDiag + * @param align Sets the align option. + * + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + * a string specifying how superdiagonals and subdiagonals should be aligned, + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". + * "RIGHT_LEFT" aligns superdiagonals + * to the right (left-pads the row) and subdiagonals to the left (right-pads the + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * the opposite alignment. + * @return this Options instance. + */ + public fun matrixSetDiag( + input: Operand, + diagonal: Operand, + k: Operand, + align: String? = null + ): MatrixSetDiag = java.matrixSetDiag( + input, + diagonal, + k, + *listOfNotNull( + align?.let{ org.tensorflow.op.linalg.MatrixSetDiag.align(it) } + ).toTypedArray() + ) + + /** + * Solves one or more linear least-squares problems. + * `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same + * type as `matrix` and shape `[..., M, K]`. + * The output is a tensor shape `[..., N, K]` where each output matrix solves + * each of the equations + * `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` + * in the least squares sense. + * + * We use the following notation for (complex) matrix and right-hand sides + * in the batch: + * + * `matrix`=`\(A \in \mathbb{C}^{m \times n}\)`, + * `rhs`=`\(B \in \mathbb{C}^{m \times k}\)`, + * `output`=`\(X \in \mathbb{C}^{n \times k}\)`, + * `l2_regularizer`=`\(\lambda \in \mathbb{R}\)`. + * + * If `fast` is `True`, then the solution is computed by solving the normal + * equations using Cholesky decomposition. Specifically, if `\(m \ge n\)` then + * `\(X = (A^H A + \lambda I)^{-1} A^H B\)`, which solves the least-squares + * problem `\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda + * ||Z||_F^2\)`. + * If `\(m \lt n\)` then `output` is computed as + * `\(X = A^H (A A^H + \lambda I)^{-1} B\)`, which (for `\(\lambda = 0\)`) is the + * minimum-norm solution to the under-determined linear system, i.e. + * `\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \)`, + * subject to `\(A Z = B\)`. Notice that the fast path is only numerically stable + * when `\(A\)` is numerically full rank and has a condition number + * `\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\)` or `\(\lambda\)` is + * sufficiently large. + * + * If `fast` is `False` an algorithm based on the numerically robust complete + * orthogonal decomposition is used. This computes the minimum-norm + * least-squares solution, even when `\(A\)` is rank deficient. This path is + * typically 6-7 times slower than the fast path. If `fast` is `False` then + * `l2_regularizer` is ignored. + * + * @param data type for `output` output + * @param matrix Shape is `[..., M, N]`. + * @param rhs Shape is `[..., M, K]`. + * @param l2Regularizer Scalar tensor. + * + * `@`compatibility(numpy) + * + * Equivalent to np.linalg.lstsq + * + * `@`end_compatibility + * @param options carries optional attribute values + * @param data type for `MatrixSolveLs` output and operands + * @return a new instance of MatrixSolveLs + * @see org.tensorflow.op.LinalgOps.matrixSolveLs + * @param fast Sets the fast option. + * + * @param fast the fast option + * @return this Options instance. + */ + public fun matrixSolveLs( + matrix: Operand, + rhs: Operand, + l2Regularizer: Operand, + fast: Boolean? = null + ): MatrixSolveLs = java.matrixSolveLs( + matrix, + rhs, + l2Regularizer, + *listOfNotNull( + fast?.let{ org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } + ).toTypedArray() + ) + + /** + * Computes the QR decompositions of one or more matrices. + * Computes the QR decomposition of each inner matrix in `tensor` such that + * `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` + * + * Currently, the gradient for the QR decomposition is well-defined only when + * the first `P` columns of the inner matrix are linearly independent, where + * `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`. + * ``` + * # a is a tensor. + * # q is a tensor of orthonormal matrices. + * # r is a tensor of upper triangular matrices. + * q, r = qr(a) + * q_full, r_full = qr(a, full_matrices=True) + * + * ``` + * + * @param data type for `q` output + * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. + * @param options carries optional attribute values + * @param data type for `Qr` output and operands + * @return a new instance of Qr + * @see org.tensorflow.op.LinalgOps.qr + * @param fullMatrices Sets the fullMatrices option. + * + * @param fullMatrices If true, compute full-sized `q` and `r`. If false + * (the default), compute only the leading `P` columns of `q`. + * @return this Options instance. + */ + public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = + java.qr( + input, + *listOfNotNull( + fullMatrices?.let{ org.tensorflow.op.linalg.Qr.fullMatrices(it) } + ).toTypedArray() + ) + + /** + * Perform a quantized matrix multiplication of `a` by the matrix `b`. + * The inputs must be two-dimensional matrices and the inner dimension of + * `a` (after being transposed if `transpose_a` is non-zero) must match the + * outer dimension of `b` (after being transposed if `transposed_b` is + * non-zero). + * + * @param data type for `out` output + * @param a Must be a two-dimensional tensor. + * @param b Must be a two-dimensional tensor. + * @param minA The float value that the lowest quantized `a` value represents. + * @param maxA The float value that the highest quantized `a` value represents. + * @param minB The float value that the lowest quantized `b` value represents. + * @param maxB The float value that the highest quantized `b` value represents. + * @param Toutput The value of the Toutput attribute + * @param Tactivation The type of output produced by activation function + * following this operation. + * @param options carries optional attribute values + * @param data type for `QuantizedMatMul` output and operands + * @param data type for `QuantizedMatMul` output and operands + * @return a new instance of QuantizedMatMul + * @see org.tensorflow.op.LinalgOps.quantizedMatMul + * @param transposeA Sets the transposeA option. + * + * @param transposeA If true, `a` is transposed before multiplication. + * @return this Options instance. + * @param transposeB Sets the transposeB option. + * + * @param transposeB If true, `b` is transposed before multiplication. + * @return this Options instance. + */ + public fun quantizedMatMul( + a: Operand, + b: Operand, + minA: Operand, + maxA: Operand, + minB: Operand, + maxB: Operand, + Toutput: Class, + Tactivation: Class, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): QuantizedMatMul = java.quantizedMatMul( + a, + b, + minA, + maxA, + minB, + maxB, + Toutput, + Tactivation, + *listOfNotNull( + transposeA?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } + ).toTypedArray() + ) + + /** + * Computes the eigen decomposition of one or more square self-adjoint matrices. + * Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., + * :])`. The eigenvalues + * are sorted in non-decreasing order. + * ``` + * # a is a tensor. + * # e is a tensor of eigenvalues. + * # v is a tensor of eigenvectors. + * e, v = self_adjoint_eig(a) + * e = self_adjoint_eig(a, compute_v=False) + * + * ``` + * + * @param data type for `e` output + * @param input `Tensor` input of shape `[N, N]`. + * @param options carries optional attribute values + * @param data type for `SelfAdjointEigV2` output and operands + * @return a new instance of SelfAdjointEig + * @see org.tensorflow.op.LinalgOps.selfAdjointEig + * @param computeV Sets the computeV option. + * + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * Otherwise, only the eigenvalues will be computed. + * @return this Options instance. + */ + public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): + SelfAdjointEig = java.selfAdjointEig( + input, + *listOfNotNull( + computeV?.let{ org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } + ).toTypedArray() + ) + + /** + * Solves systems of linear equations. + * `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is + * a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix + * satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + * If `adjoint` is `True` then each output matrix satisfies + * `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. + * + * @param data type for `output` output + * @param matrix Shape is `[..., M, M]`. + * @param rhs Shape is `[..., M, K]`. + * @param options carries optional attribute values + * @param data type for `MatrixSolve` output and operands + * @return a new instance of Solve + * @see org.tensorflow.op.LinalgOps.solve + * @param adjoint Sets the adjoint option. + * + * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) + * adjoint. + * @return this Options instance. + */ + public fun solve( + matrix: Operand, + rhs: Operand, + adjoint: Boolean? = null + ): Solve = java.solve( + matrix, + rhs, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.Solve.adjoint(it) } + ).toTypedArray() + ) + + /** + * Computes the matrix square root of one or more square matrices: + * matmul(sqrtm(A), sqrtm(A)) = A + * + * The input matrix should be invertible. If the input matrix is real, it should + * have no eigenvalues which are real and negative (pairs of complex conjugate + * eigenvalues are allowed). + * + * The matrix square root is computed by first reducing the matrix to + * quasi-triangular form with the real Schur decomposition. The square root + * of the quasi-triangular matrix is then computed directly. Details of + * the algorithm can be found in: Nicholas J. Higham, "Computing real + * square roots of a real matrix", Linear Algebra Appl., 1987. + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. The output is a tensor of the same shape as the input + * containing the matrix square root for all input submatrices `[..., :, :]`. + * + * @param data type for `output` output + * @param input Shape is `[..., M, M]`. + * @param data type for `MatrixSquareRoot` output and operands + * @return a new instance of Sqrtm + * @see org.tensorflow.op.LinalgOps.sqrtm + */ + public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( + input + ) + + /** + * Computes the singular value decompositions of one or more matrices. + * Computes the SVD of each inner matrix in `input` such that + * `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * + * transpose(v[..., :, :])` + * ``` + * # a is a tensor containing a batch of matrices. + * # s is a tensor of singular values for each matrix. + * # u is the tensor containing the left singular vectors for each matrix. + * # v is the tensor containing the right singular vectors for each matrix. + * s, u, v = svd(a) + * s, _, _ = svd(a, compute_uv=False) + * + * ``` + * + * @param data type for `s` output + * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. + * @param options carries optional attribute values + * @param data type for `Svd` output and operands + * @return a new instance of Svd + * @see org.tensorflow.op.LinalgOps.svd + * @param computeUv Sets the computeUv option. + * + * @param computeUv If true, left and right singular vectors will be + * computed and returned in `u` and `v`, respectively. + * If false, `u` and `v` are not set and should never referenced. + * @return this Options instance. + * @param fullMatrices Sets the fullMatrices option. + * + * @param fullMatrices If true, compute full-sized `u` and `v`. If false + * (the default), compute only the leading `P` singular vectors. + * Ignored if `compute_uv` is `False`. + * @return this Options instance. + */ + public fun svd( + input: Operand, + computeUv: Boolean? = null, + fullMatrices: Boolean? = null + ): Svd = java.svd( + input, + *listOfNotNull( + computeUv?.let{ org.tensorflow.op.linalg.Svd.computeUv(it) }, + fullMatrices?.let{ org.tensorflow.op.linalg.Svd.fullMatrices(it) } + ).toTypedArray() + ) + + /** + * Returns a diagonal tensor with a given diagonal values. + * Given a `diagonal`, this operation returns a tensor with the `diagonal` and + * everything else padded with zeros. The diagonal is computed as follows: + * + * Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of + * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: + * + * `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere + * else. + * + * For example: + * ``` + * # 'diagonal' is [1, 2, 3, 4] + * tf.diag(diagonal) ==> [[1, 0, 0, 0] + * [0, 2, 0, 0] + * [0, 0, 3, 0] + * [0, 0, 0, 4]] + * + * ``` + * + * @param data type for `output` output + * @param diagonal Rank k tensor where k is at most 1. + * @param data type for `Diag` output and operands + * @return a new instance of TensorDiag + * @see org.tensorflow.op.LinalgOps.tensorDiag + */ + public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( + diagonal + ) + + /** + * Returns the diagonal part of the tensor. + * This operation returns a tensor with the `diagonal` part + * of the `input`. The `diagonal` part is computed as follows: + * + * Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a + * tensor of rank `k` with dimensions `[D1,..., Dk]` where: + * + * `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. + * + * For example: + * ``` + * # 'input' is [[1, 0, 0, 0] + * [0, 2, 0, 0] + * [0, 0, 3, 0] + * [0, 0, 0, 4]] + * + * tf.diag_part(input) ==> [1, 2, 3, 4] + * + * ``` + * + * @param data type for `diagonal` output + * @param input Rank k tensor where k is even and not zero. + * @param data type for `DiagPart` output and operands + * @return a new instance of TensorDiagPart + * @see org.tensorflow.op.LinalgOps.tensorDiagPart + */ + public fun tensorDiagPart(input: Operand): TensorDiagPart = + java.tensorDiagPart( + input + ) + + /** + * Shuffle dimensions of x according to a permutation. + * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + * + * @param data type for `y` output + * @param x The x value + * @param perm The perm value + * @param data type for `Transpose` output and operands + * @return a new instance of Transpose + * @see org.tensorflow.op.LinalgOps.transpose + */ + public fun transpose(x: Operand, perm: Operand): Transpose = + java.transpose( + x, + perm + ) + + /** + * Solves systems of linear equations with upper or lower triangular matrices by + * backsubstitution. + * `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * square matrices. If `lower` is `True` then the strictly upper triangular part + * of each inner-most matrix is assumed to be zero and not accessed. + * If `lower` is False then the strictly lower triangular part of each inner-most + * matrix is assumed to be zero and not accessed. + * `rhs` is a tensor of shape `[..., M, N]`. + * + * The output is a tensor of shape `[..., M, N]`. If `adjoint` is + * `True` then the innermost matrices in `output` satisfy matrix equations + * `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + * If `adjoint` is `False` then the strictly then the innermost matrices in + * `output` satisfy matrix equations + * `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. + * + * Note, the batch shapes for the inputs only need to broadcast. + * + * Example: + * ``` + * a = tf.constant([[3, 0, 0, 0], + * [2, 1, 0, 0], + * [1, 0, 1, 0], + * [1, 1, 1, 1]], dtype=tf.float32) + * + * b = tf.constant([[4], + * [2], + * [4], + * [2]], dtype=tf.float32) + * + * x = tf.linalg.triangular_solve(a, b, lower=True) + * x + * # + * + * # in python3 one can use `a@x` + * tf.matmul(a, x) + * # + * + * ``` + * + * @param data type for `output` output + * @param matrix Shape is `[..., M, M]`. + * @param rhs Shape is `[..., M, K]`. + * @param options carries optional attribute values + * @param data type for `MatrixTriangularSolve` output and operands + * @return a new instance of TriangularSolve + * @see org.tensorflow.op.LinalgOps.triangularSolve + * @param lower Sets the lower option. + * + * @param lower Boolean indicating whether the innermost matrices in `matrix` are + * lower or upper triangular. + * @return this Options instance. + * @param adjoint Sets the adjoint option. + * + * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) + * adjoint. + * + * `@`compatibility(numpy) + * + * Equivalent to scipy.linalg.solve_triangular + * + * `@`end_compatibility + * @return this Options instance. + */ + public fun triangularSolve( + matrix: Operand, + rhs: Operand, + lower: Boolean? = null, + adjoint: Boolean? = null + ): TriangularSolve = java.triangularSolve( + matrix, + rhs, + *listOfNotNull( + lower?.let{ org.tensorflow.op.linalg.TriangularSolve.lower(it) }, + adjoint?.let{ org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } + ).toTypedArray() + ) + + /** + * Computes the eigen decomposition of one or more square matrices. + * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., + * :])`. The eigenvalues + * are sorted in non-decreasing order. + * ``` + * # a is a tensor. + * # e is a tensor of eigenvalues. + * # v is a tensor of eigenvectors. + * e, v = eig(a) + * e = eig(a, compute_v=False) + * + * ``` + * + * @param data type for `e` output + * @param input `Tensor` input of shape `[N, N]`. + * @param Tout The value of the Tout attribute + * @param options carries optional attribute values + * @param data type for `Eig` output and operands + * @return a new instance of Eig + * @see org.tensorflow.op.LinalgOps.eig + * @param computeV Sets the computeV option. + * + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * Otherwise, only the eigenvalues will be computed. + * @return this Options instance. + */ + @JvmName("eigReified") + public inline fun eig(input: Operand, computeV: Boolean? = null): + Eig = eig(input, U::class.java, computeV) + + /** + * Computes the LU decomposition of one or more square matrices. + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * upper triangular factors. + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * entries correspond to the upper triangular part, including the diagonal, of LU. + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P, then the L, U and P satisfies P_mat * input = L * U. + * + * @param data type for `lu` output + * @param data type for `p` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * matrices of + * size `[M, M]`. + * @param outputIdxType The value of the outputIdxType attribute + * @param data type for `Lu` output and operands + * @param data type for `Lu` output and operands + * @return a new instance of Lu + * @see org.tensorflow.op.LinalgOps.lu + */ + @JvmName("luReified") + public inline fun luTyped(input: Operand): Lu = lu(input, U::class.java) + + /** + * Perform a quantized matrix multiplication of `a` by the matrix `b`. + * The inputs must be two-dimensional matrices and the inner dimension of + * `a` (after being transposed if `transpose_a` is non-zero) must match the + * outer dimension of `b` (after being transposed if `transposed_b` is + * non-zero). + * + * @param data type for `out` output + * @param a Must be a two-dimensional tensor. + * @param b Must be a two-dimensional tensor. + * @param minA The float value that the lowest quantized `a` value represents. + * @param maxA The float value that the highest quantized `a` value represents. + * @param minB The float value that the lowest quantized `b` value represents. + * @param maxB The float value that the highest quantized `b` value represents. + * @param Toutput The value of the Toutput attribute + * @param Tactivation The type of output produced by activation function + * following this operation. + * @param options carries optional attribute values + * @param data type for `QuantizedMatMul` output and operands + * @param data type for `QuantizedMatMul` output and operands + * @return a new instance of QuantizedMatMul + * @see org.tensorflow.op.LinalgOps.quantizedMatMul + * @param transposeA Sets the transposeA option. + * + * @param transposeA If true, `a` is transposed before multiplication. + * @return this Options instance. + * @param transposeB Sets the transposeB option. + * + * @param transposeB If true, `b` is transposed before multiplication. + * @return this Options instance. + */ + @JvmName("quantizedMatMulReified") + public inline fun quantizedMatMul( + a: Operand, + b: Operand, + minA: Operand, + maxA: Operand, + minB: Operand, + maxB: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): QuantizedMatMul = quantizedMatMul(a, b, minA, maxA, minB, maxB, V::class.java, + W::class.java, transposeA, transposeB) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt new file mode 100644 index 00000000000..c41608c9fa0 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -0,0 +1,3034 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Float +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.math.Abs +import org.tensorflow.op.math.AccumulateN +import org.tensorflow.op.math.Acos +import org.tensorflow.op.math.Acosh +import org.tensorflow.op.math.Add +import org.tensorflow.op.math.AddN +import org.tensorflow.op.math.Angle +import org.tensorflow.op.math.ApproximateEqual +import org.tensorflow.op.math.ArgMax +import org.tensorflow.op.math.ArgMin +import org.tensorflow.op.math.Asin +import org.tensorflow.op.math.Asinh +import org.tensorflow.op.math.Atan +import org.tensorflow.op.math.Atan2 +import org.tensorflow.op.math.Atanh +import org.tensorflow.op.math.Betainc +import org.tensorflow.op.math.Bincount +import org.tensorflow.op.math.Ceil +import org.tensorflow.op.math.ComplexAbs +import org.tensorflow.op.math.Conj +import org.tensorflow.op.math.Cos +import org.tensorflow.op.math.Cosh +import org.tensorflow.op.math.Cumprod +import org.tensorflow.op.math.Cumsum +import org.tensorflow.op.math.DenseBincount +import org.tensorflow.op.math.Digamma +import org.tensorflow.op.math.Div +import org.tensorflow.op.math.DivNoNan +import org.tensorflow.op.math.Equal +import org.tensorflow.op.math.Erf +import org.tensorflow.op.math.Erfc +import org.tensorflow.op.math.Exp +import org.tensorflow.op.math.Expm1 +import org.tensorflow.op.math.Fact +import org.tensorflow.op.math.Floor +import org.tensorflow.op.math.FloorDiv +import org.tensorflow.op.math.FloorMod +import org.tensorflow.op.math.Greater +import org.tensorflow.op.math.GreaterEqual +import org.tensorflow.op.math.Igamma +import org.tensorflow.op.math.Igammac +import org.tensorflow.op.math.Imag +import org.tensorflow.op.math.InvertPermutation +import org.tensorflow.op.math.IsFinite +import org.tensorflow.op.math.IsInf +import org.tensorflow.op.math.IsNan +import org.tensorflow.op.math.Less +import org.tensorflow.op.math.LessEqual +import org.tensorflow.op.math.Lgamma +import org.tensorflow.op.math.Log +import org.tensorflow.op.math.Log1p +import org.tensorflow.op.math.LogicalAnd +import org.tensorflow.op.math.LogicalNot +import org.tensorflow.op.math.LogicalOr +import org.tensorflow.op.math.Maximum +import org.tensorflow.op.math.Mean +import org.tensorflow.op.math.Minimum +import org.tensorflow.op.math.Mod +import org.tensorflow.op.math.Mul +import org.tensorflow.op.math.MulNoNan +import org.tensorflow.op.math.Ndtri +import org.tensorflow.op.math.Neg +import org.tensorflow.op.math.NextAfter +import org.tensorflow.op.math.NotEqual +import org.tensorflow.op.math.Polygamma +import org.tensorflow.op.math.PopulationCount +import org.tensorflow.op.math.Pow +import org.tensorflow.op.math.QuantizedAdd +import org.tensorflow.op.math.QuantizedMul +import org.tensorflow.op.math.Real +import org.tensorflow.op.math.RealDiv +import org.tensorflow.op.math.Reciprocal +import org.tensorflow.op.math.Rint +import org.tensorflow.op.math.Round +import org.tensorflow.op.math.Rsqrt +import org.tensorflow.op.math.SegmentMax +import org.tensorflow.op.math.SegmentMean +import org.tensorflow.op.math.SegmentMin +import org.tensorflow.op.math.SegmentProd +import org.tensorflow.op.math.SegmentSum +import org.tensorflow.op.math.Sigmoid +import org.tensorflow.op.math.Sign +import org.tensorflow.op.math.Sin +import org.tensorflow.op.math.Sinh +import org.tensorflow.op.math.Softplus +import org.tensorflow.op.math.Sqrt +import org.tensorflow.op.math.Square +import org.tensorflow.op.math.SquaredDifference +import org.tensorflow.op.math.Sub +import org.tensorflow.op.math.Tan +import org.tensorflow.op.math.Tanh +import org.tensorflow.op.math.TruncateDiv +import org.tensorflow.op.math.TruncateMod +import org.tensorflow.op.math.UnsortedSegmentMax +import org.tensorflow.op.math.UnsortedSegmentMin +import org.tensorflow.op.math.UnsortedSegmentProd +import org.tensorflow.op.math.UnsortedSegmentSum +import org.tensorflow.op.math.Xdivy +import org.tensorflow.op.math.Xlog1py +import org.tensorflow.op.math.Xlogy +import org.tensorflow.op.math.Zeta +import org.tensorflow.op.math.erfinv +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `math` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class MathOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.MathOps = ops.java.math + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Computes the absolute value of a tensor. + * Given a tensor `x`, this operation returns a tensor containing the absolute + * value of each element in `x`. For example, if x is an input element and y is + * an output element, this operation computes `\(y = |x|\)`. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Abs` output and operands + * @return a new instance of Abs + * @see org.tensorflow.op.MathOps.abs + */ + public fun abs(x: Operand): Abs = java.abs( + x + ) + + /** + * Returns the element-wise sum of a list of tensors. + * `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not + * wait for all of its inputs to be ready before beginning to sum. This can + * save memory if inputs are ready at different times, since minimum temporary + * storage is proportional to the output size rather than the inputs size. + * + * Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. + * + * Returns a `Tensor` of same shape and type as the elements of `inputs`. + * + * @param data type for `sum` output + * @param inputs A list of `Tensor` objects, each with same shape and type. + * @param shape Shape of elements of `inputs`. + * @param data type for `AccumulateNV2` output and operands + * @return a new instance of AccumulateN + * @see org.tensorflow.op.MathOps.accumulateN + */ + public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = + java.accumulateN( + inputs, + shape + ) + + /** + * Computes acos of x element-wise. + * Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each + * element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. + * + * Input range is `[-1, 1]` and the output has a range of `[0, pi]`. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Acos` output and operands + * @return a new instance of Acos + * @see org.tensorflow.op.MathOps.acos + */ + public fun acos(x: Operand): Acos = java.acos( + x + ) + + /** + * Computes inverse hyperbolic cosine of x element-wise. + * Given an input tensor, the function computes inverse hyperbolic cosine of every element. + * Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. + * ``` + * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Acosh` output and operands + * @return a new instance of Acosh + * @see org.tensorflow.op.MathOps.acosh + */ + public fun acosh(x: Operand): Acosh = java.acosh( + x + ) + + /** + * Returns x + y element-wise. + * _NOTE_: `math.Add` supports broadcasting. `AddN` does not. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Given two input tensors, the `tf.add` operation computes the sum for every element in the + * tensor. + * + * Both input and output have a range `(-inf, inf)`. + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Add` output and operands + * @return a new instance of Add + * @see org.tensorflow.op.MathOps.add + */ + public fun add(x: Operand, y: Operand): Add = java.add( + x, + y + ) + + /** + * Add all input tensors element wise. + * Inputs must be of same size and shape. + * ``` + * x = [9, 7, 10] + * tf.math.add_n(x) ==> 26 + * + * ``` + * + * @param data type for `sum` output + * @param inputs The inputs value + * @param data type for `AddN` output and operands + * @return a new instance of AddN + * @see org.tensorflow.op.MathOps.addN + */ + public fun addN(inputs: Iterable>): AddN = java.addN( + inputs + ) + + /** + * Returns the argument of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part. + * + * The argument returned by this operation is of the form `\(atan2(b, a)\)`. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] + * + * ``` + * + * `@`compatibility(numpy) + * + * Equivalent to np.angle. + * + * `@`end_compatibility + * + * @param data type for `output` output + * @param input The input value + * @return a new instance of Angle, with default output types + * @see org.tensorflow.op.MathOps.angle + */ + public fun angle(input: Operand): Angle = java.angle( + input + ) + + /** + * Returns the argument of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part. + * + * The argument returned by this operation is of the form `\(atan2(b, a)\)`. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] + * + * ``` + * + * `@`compatibility(numpy) + * + * Equivalent to np.angle. + * + * `@`end_compatibility + * + * @param data type for `output` output + * @param input The input value + * @param Tout The value of the Tout attribute + * @param data type for `Angle` output and operands + * @return a new instance of Angle + * @see org.tensorflow.op.MathOps.angle + */ + public fun angle(input: Operand, Tout: Class): Angle = + java.angle( + input, + Tout + ) + + /** + * Returns the truth value of abs(x-y) < tolerance element-wise. + * + * @param x The x value + * @param y The y value + * @param options carries optional attribute values + * @param data type for `ApproximateEqual` output and operands + * @return a new instance of ApproximateEqual + * @see org.tensorflow.op.MathOps.approximateEqual + * @param tolerance Sets the tolerance option. + * + * @param tolerance the tolerance option + * @return this Options instance. + */ + public fun approximateEqual( + x: Operand, + y: Operand, + tolerance: Float? = null + ): ApproximateEqual = java.approximateEqual( + x, + y, + *listOfNotNull( + tolerance?.let{ org.tensorflow.op.math.ApproximateEqual.tolerance(it) } + ).toTypedArray() + ) + + /** + * Returns the index with the largest value across dimensions of a tensor. + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @return a new instance of ArgMax, with default output types + * @see org.tensorflow.op.MathOps.argMax + */ + public fun argMax(input: Operand, dimension: Operand): ArgMax = + java.argMax( + input, + dimension + ) + + /** + * Returns the index with the largest value across dimensions of a tensor. + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @param outputType The value of the outputType attribute + * @param data type for `ArgMax` output and operands + * @return a new instance of ArgMax + * @see org.tensorflow.op.MathOps.argMax + */ + public fun argMax( + input: Operand, + dimension: Operand, + outputType: Class + ): ArgMax = java.argMax( + input, + dimension, + outputType + ) + + /** + * Returns the index with the smallest value across dimensions of a tensor. + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @return a new instance of ArgMin, with default output types + * @see org.tensorflow.op.MathOps.argMin + */ + public fun argMin(input: Operand, dimension: Operand): ArgMin = + java.argMin( + input, + dimension + ) + + /** + * Returns the index with the smallest value across dimensions of a tensor. + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @param outputType The value of the outputType attribute + * @param data type for `ArgMin` output and operands + * @return a new instance of ArgMin + * @see org.tensorflow.op.MathOps.argMin + */ + public fun argMin( + input: Operand, + dimension: Operand, + outputType: Class + ): ArgMin = java.argMin( + input, + dimension, + outputType + ) + + /** + * Computes the trignometric inverse sine of x element-wise. + * The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that + * if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. + * + * **Note**: The output of `tf.math.asin` will lie within the invertible range + * of sine, i.e [-pi/2, pi/2]. + * + * For example: + * ``` + * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + * x = tf.constant([1.047, 0.785]) + * y = tf.math.sin(x) # [0.8659266, 0.7068252] + * + * tf.math.asin(y) # [1.047, 0.785] = x + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Asin` output and operands + * @return a new instance of Asin + * @see org.tensorflow.op.MathOps.asin + */ + public fun asin(x: Operand): Asin = java.asin( + x + ) + + /** + * Computes inverse hyperbolic sine of x element-wise. + * Given an input tensor, this function computes inverse hyperbolic sine + * for every element in the tensor. Both input and output has a range of + * `[-inf, inf]`. + * ``` + * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, + * float("inf")]) + * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Asinh` output and operands + * @return a new instance of Asinh + * @see org.tensorflow.op.MathOps.asinh + */ + public fun asinh(x: Operand): Asinh = java.asinh( + x + ) + + /** + * Computes the trignometric inverse tangent of x element-wise. + * The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that + * if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. + * + * **Note**: The output of `tf.math.atan` will lie within the invertible range + * of tan, i.e (-pi/2, pi/2). + * + * For example: + * ``` + * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + * x = tf.constant([1.047, 0.785]) + * y = tf.math.tan(x) # [1.731261, 0.99920404] + * + * tf.math.atan(y) # [1.047, 0.785] = x + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Atan` output and operands + * @return a new instance of Atan + * @see org.tensorflow.op.MathOps.atan + */ + public fun atan(x: Operand): Atan = java.atan( + x + ) + + /** + * Computes arctangent of `y/x` element-wise, respecting signs of the arguments. + * This is the angle `\( \theta \in [-\pi, \pi] \)` such that + * \[ x = r \cos(\theta) \] + * and + * \[ y = r \sin(\theta) \] + * where `\(r = \sqrt{x^2 + y^2} \)`. + * + * For example: + * ``` + * + * x = [1., 1.] + * y = [1., -1.] + * print((tf.math.atan2(y,x) * (180 / np.pi)).numpy()) + * [ 45. -45.] + * ``` + * + * @param data type for `z` output + * @param y The y value + * @param x The x value + * @param data type for `Atan2` output and operands + * @return a new instance of Atan2 + * @see org.tensorflow.op.MathOps.atan2 + */ + public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( + y, + x + ) + + /** + * Computes inverse hyperbolic tangent of x element-wise. + * Given an input tensor, this function computes inverse hyperbolic tangent + * for every element in the tensor. Input range is `[-1,1]` and output range is + * `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the + * input is `1`, output will be `inf`. Values outside the range will have + * `nan` as output. + * ``` + * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) + * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Atanh` output and operands + * @return a new instance of Atanh + * @see org.tensorflow.op.MathOps.atanh + */ + public fun atanh(x: Operand): Atanh = java.atanh( + x + ) + + /** + * Compute the regularized incomplete beta integral `\(I_x(a, b)\)`. + * The regularized incomplete beta integral is defined as: + * + * `\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\)` + * + * where + * + * `\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\)` + * + * is the incomplete beta function and `\(B(a, b)\)` is the _complete_ + * beta function. + * + * @param data type for `z` output + * @param a The a value + * @param b The b value + * @param x The x value + * @param data type for `Betainc` output and operands + * @return a new instance of Betainc + * @see org.tensorflow.op.MathOps.betainc + */ + public fun betainc( + a: Operand, + b: Operand, + x: Operand + ): Betainc = java.betainc( + a, + b, + x + ) + + /** + * Counts the number of occurrences of each value in an integer array. + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param data type for `bins` output + * @param arr int32 `Tensor`. + * @param sizeOutput non-negative int32 scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + * equal to 1. + * @param data type for `Bincount` output and operands + * @return a new instance of Bincount + * @see org.tensorflow.op.MathOps.bincount + */ + public fun bincount( + arr: Operand, + sizeOutput: Operand, + weights: Operand + ): Bincount = java.bincount( + arr, + sizeOutput, + weights + ) + + /** + * Returns element-wise smallest integer not less than x. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Ceil` output and operands + * @return a new instance of Ceil + * @see org.tensorflow.op.MathOps.ceil + */ + public fun ceil(x: Operand): Ceil = java.ceil( + x + ) + + /** + * Computes the complex absolute value of a tensor. + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute + * value is computed as `\( \sqrt{a^2 + b^2}\)`. + * + * For example: + * ``` + * + * x = tf.complex(3.0, 4.0) + * print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + * 5.0 + * ``` + * + * @param data type for `y` output + * @param x The x value + * @return a new instance of ComplexAbs, with default output types + * @see org.tensorflow.op.MathOps.complexAbs + */ + public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( + x + ) + + /** + * Computes the complex absolute value of a tensor. + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute + * value is computed as `\( \sqrt{a^2 + b^2}\)`. + * + * For example: + * ``` + * + * x = tf.complex(3.0, 4.0) + * print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + * 5.0 + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param Tout The value of the Tout attribute + * @param data type for `ComplexAbs` output and operands + * @return a new instance of ComplexAbs + * @see org.tensorflow.op.MathOps.complexAbs + */ + public fun complexAbs(x: Operand, Tout: Class): ComplexAbs = + java.complexAbs( + x, + Tout + ) + + /** + * Returns the complex conjugate of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * complex numbers that are the complex conjugate of each element in `input`. The + * complex numbers in `input` must be of the form `\(a + bj\)`, where _a_ is the + * real part and _b_ is the imaginary part. + * + * The complex conjugate returned by this operation is of the form `\(a - bj\)`. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param data type for `Conj` output and operands + * @return a new instance of Conj + * @see org.tensorflow.op.MathOps.conj + */ + public fun conj(input: Operand): Conj = java.conj( + input + ) + + /** + * Computes cos of x element-wise. + * Given an input tensor, this function computes cosine of every + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `[-1,1]`. If input lies outside the boundary, `nan` + * is returned. + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, + * float("inf")]) + * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 + * nan] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Cos` output and operands + * @return a new instance of Cos + * @see org.tensorflow.op.MathOps.cos + */ + public fun cos(x: Operand): Cos = java.cos( + x + ) + + /** + * Computes hyperbolic cosine of x element-wise. + * Given an input tensor, this function computes hyperbolic cosine of every + * element in the tensor. Input range is `[-inf, inf]` and output range + * is `[1, inf]`. + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 + * 3.7621956e+00 1.1013233e+04 inf] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Cosh` output and operands + * @return a new instance of Cosh + * @see org.tensorflow.op.MathOps.cosh + */ + public fun cosh(x: Operand): Cosh = java.cosh( + x + ) + + /** + * Compute the cumulative product of the tensor `x` along `axis`. + * By default, this op performs an inclusive cumprod, which means that the first + * element of the input is identical to the first element of the output: + * ``` + * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + * + * ``` + * + * By setting the `exclusive` kwarg to `True`, an exclusive cumprod is + * performed instead: + * ``` + * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + * + * ``` + * + * By setting the `reverse` kwarg to `True`, the cumprod is performed in the + * opposite direction: + * ``` + * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + * + * ``` + * + * This is more efficient than using separate `tf.reverse` ops. + * + * The `reverse` and `exclusive` kwargs can also be combined: + * ``` + * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + * + * ``` + * + * @param data type for `out` output + * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, + * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + * `complex128`, `qint8`, `quint8`, `qint32`, `half`. + * @param axis A `Tensor` of type `int32` (default: 0). Must be in the range + * `[-rank(x), rank(x))`. + * @param options carries optional attribute values + * @param data type for `Cumprod` output and operands + * @return a new instance of Cumprod + * @see org.tensorflow.op.MathOps.cumprod + * @param exclusive Sets the exclusive option. + * + * @param exclusive If `True`, perform exclusive cumprod. + * @return this Options instance. + * @param reverse Sets the reverse option. + * + * @param reverse A `bool` (default: False). + * @return this Options instance. + */ + public fun cumprod( + x: Operand, + axis: Operand, + exclusive: Boolean? = null, + reverse: Boolean? = null + ): Cumprod = java.cumprod( + x, + axis, + *listOfNotNull( + exclusive?.let{ org.tensorflow.op.math.Cumprod.exclusive(it) }, + reverse?.let{ org.tensorflow.op.math.Cumprod.reverse(it) } + ).toTypedArray() + ) + + /** + * Compute the cumulative sum of the tensor `x` along `axis`. + * By default, this op performs an inclusive cumsum, which means that the first + * element of the input is identical to the first element of the output: + * ``` + * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + * + * ``` + * + * By setting the `exclusive` kwarg to `True`, an exclusive cumsum is + * performed instead: + * ``` + * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + * + * ``` + * + * By setting the `reverse` kwarg to `True`, the cumsum is performed in the + * opposite direction: + * ``` + * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + * + * ``` + * + * This is more efficient than using separate `tf.reverse` ops. + * + * The `reverse` and `exclusive` kwargs can also be combined: + * ``` + * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + * + * ``` + * + * @param data type for `out` output + * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, + * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + * `complex128`, `qint8`, `quint8`, `qint32`, `half`. + * @param axis A `Tensor` of type `int32` (default: 0). Must be in the range + * `[-rank(x), rank(x))`. + * @param options carries optional attribute values + * @param data type for `Cumsum` output and operands + * @return a new instance of Cumsum + * @see org.tensorflow.op.MathOps.cumsum + * @param exclusive Sets the exclusive option. + * + * @param exclusive If `True`, perform exclusive cumsum. + * @return this Options instance. + * @param reverse Sets the reverse option. + * + * @param reverse A `bool` (default: False). + * @return this Options instance. + */ + public fun cumsum( + x: Operand, + axis: Operand, + exclusive: Boolean? = null, + reverse: Boolean? = null + ): Cumsum = java.cumsum( + x, + axis, + *listOfNotNull( + exclusive?.let{ org.tensorflow.op.math.Cumsum.exclusive(it) }, + reverse?.let{ org.tensorflow.op.math.Cumsum.reverse(it) } + ).toTypedArray() + ) + + /** + * Counts the number of occurrences of each value in an integer array. + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param data type for `output` output + * @param input 1D or 2D int `Tensor`. + * @param sizeOutput non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + * equal to 1. + * @param options carries optional attribute values + * @param data type for `DenseBincount` output and operands + * @param data type for `DenseBincount` output and operands + * @return a new instance of DenseBincount + * @see org.tensorflow.op.MathOps.denseBincount + * @param binaryOutput Sets the binaryOutput option. + * + * @param binaryOutput bool; Whether the kernel should count the appearance or number of + * occurrences. + * @return this Options instance. + */ + public fun denseBincount( + input: Operand, + sizeOutput: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): DenseBincount = java.denseBincount( + input, + sizeOutput, + weights, + *listOfNotNull( + binaryOutput?.let{ org.tensorflow.op.math.DenseBincount.binaryOutput(it) } + ).toTypedArray() + ) + + /** + * Computes Psi, the derivative of Lgamma (the log of the absolute value of + * `Gamma(x)`), element-wise. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Digamma` output and operands + * @return a new instance of Digamma + * @see org.tensorflow.op.MathOps.digamma + */ + public fun digamma(x: Operand): Digamma = java.digamma( + x + ) + + /** + * Returns x / y element-wise. + * _NOTE_: `math.Div` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Div` output and operands + * @return a new instance of Div + * @see org.tensorflow.op.MathOps.div + */ + public fun div(x: Operand, y: Operand): Div = java.div( + x, + y + ) + + /** + * Returns 0 if the denominator is zero. + * _NOTE_: `math.DivNoNan` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `DivNoNan` output and operands + * @return a new instance of DivNoNan + * @see org.tensorflow.op.MathOps.divNoNan + */ + public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( + x, + y + ) + + /** + * Returns the truth value of (x == y) element-wise. + * _NOTE_: `math.Equal` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * ``` + * x = tf.constant([2, 4]) + * y = tf.constant(2) + * tf.math.equal(x, y) ==> array([True, False]) + * + * x = tf.constant([2, 4]) + * y = tf.constant([2, 4]) + * tf.math.equal(x, y) ==> array([True, True]) + * + * ``` + * + * @param x The x value + * @param y The y value + * @param options carries optional attribute values + * @param data type for `Equal` output and operands + * @return a new instance of Equal + * @see org.tensorflow.op.MathOps.equal + * @param incompatibleShapeError Sets the incompatibleShapeError option. + * + * @param incompatibleShapeError the incompatibleShapeError option + * @return this Options instance. + */ + public fun equal( + x: Operand, + y: Operand, + incompatibleShapeError: Boolean? = null + ): Equal = java.equal( + x, + y, + *listOfNotNull( + incompatibleShapeError?.let{ org.tensorflow.op.math.Equal.incompatibleShapeError(it) } + ).toTypedArray() + ) + + /** + * Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` + * element-wise. In statistics, for non-negative values of $x$, the error function has the + * following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and + * variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, + * x]$. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Erf` output and operands + * @return a new instance of Erf + * @see org.tensorflow.op.MathOps.erf + */ + public fun erf(x: Operand): Erf = java.erf( + x + ) + + /** + * Computes the complementary error function of `x` element-wise. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Erfc` output and operands + * @return a new instance of Erfc + * @see org.tensorflow.op.MathOps.erfc + */ + public fun erfc(x: Operand): Erfc = java.erfc( + x + ) + + /** + * The Erfinv operation + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Erfinv` output and operands + * @return a new instance of erfinv + * @see org.tensorflow.op.MathOps.erfinv + */ + public fun erfinv(x: Operand): erfinv = java.erfinv( + x + ) + + /** + * Computes exponential of x element-wise. `\(y = e^x\)`. + * This function computes the exponential of every element in the input tensor. + * i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. + * `e` denotes Euler's number and is approximately equal to 2.718281. + * Output is positive for any real input. + * ``` + * x = tf.constant(2.0) + * tf.math.exp(x) ==> 7.389056 + * + * x = tf.constant([2.0, 8.0]) + * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) + * + * ``` + * + * For complex numbers, the exponential value is calculated as follows: + * ``` + * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) + * + * ``` + * + * Let's consider complex number 1+1j as an example. + * e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) + * ``` + * x = tf.constant(1 + 1j) + * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Exp` output and operands + * @return a new instance of Exp + * @see org.tensorflow.op.MathOps.exp + */ + public fun exp(x: Operand): Exp = java.exp( + x + ) + + /** + * Computes `exp(x) - 1` element-wise. + * i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. + * `e` denotes Euler's number and is approximately equal to 2.718281. + * ``` + * x = tf.constant(2.0) + * tf.math.expm1(x) ==> 6.389056 + * + * x = tf.constant([2.0, 8.0]) + * tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) + * + * x = tf.constant(1 + 1j) + * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Expm1` output and operands + * @return a new instance of Expm1 + * @see org.tensorflow.op.MathOps.expm1 + */ + public fun expm1(x: Operand): Expm1 = java.expm1( + x + ) + + /** + * Output a fact about factorials. + * + * @return a new instance of Fact + * @see org.tensorflow.op.MathOps.fact + */ + public fun fact(): Fact = java.fact( + + ) + + /** + * Returns element-wise largest integer not greater than x. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Floor` output and operands + * @return a new instance of Floor + * @see org.tensorflow.op.MathOps.floor + */ + public fun floor(x: Operand): Floor = java.floor( + x + ) + + /** + * Returns x // y element-wise. + * _NOTE_: `math.FloorDiv` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `FloorDiv` output and operands + * @return a new instance of FloorDiv + * @see org.tensorflow.op.MathOps.floorDiv + */ + public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( + x, + y + ) + + /** + * Returns element-wise remainder of division. When `x < 0` xor `y < 0` is + * true, this follows Python semantics in that the result here is consistent + * with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. + * + * _NOTE_: `math.FloorMod` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `FloorMod` output and operands + * @return a new instance of FloorMod + * @see org.tensorflow.op.MathOps.floorMod + */ + public fun floorMod(x: Operand, y: Operand): FloorMod = + java.floorMod( + x, + y + ) + + /** + * Returns the truth value of (x > y) element-wise. + * _NOTE_: `math.Greater` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 2, 5]) + * tf.math.greater(x, y) ==> [False, True, True] + * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.greater(x, y) ==> [False, False, True] + * + * ``` + * + * @param x The x value + * @param y The y value + * @param data type for `Greater` output and operands + * @return a new instance of Greater + * @see org.tensorflow.op.MathOps.greater + */ + public fun greater(x: Operand, y: Operand): Greater = java.greater( + x, + y + ) + + /** + * Returns the truth value of (x >= y) element-wise. + * _NOTE_: `math.GreaterEqual` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Example: + * ``` + * x = tf.constant([5, 4, 6, 7]) + * y = tf.constant([5, 2, 5, 10]) + * tf.math.greater_equal(x, y) ==> [True, True, True, False] + * + * x = tf.constant([5, 4, 6, 7]) + * y = tf.constant([5]) + * tf.math.greater_equal(x, y) ==> [True, False, True, True] + * + * ``` + * + * @param x The x value + * @param y The y value + * @param data type for `GreaterEqual` output and operands + * @return a new instance of GreaterEqual + * @see org.tensorflow.op.MathOps.greaterEqual + */ + public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = + java.greaterEqual( + x, + y + ) + + /** + * Compute the lower regularized incomplete Gamma function `P(a, x)`. + * The lower regularized incomplete Gamma function is defined as: + * + * `\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\)` + * + * where + * + * `\(gamma(a, x) = \int_{0}^{x} t^{a-1} exp(-t) dt\)` + * + * is the lower incomplete Gamma function. + * + * Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete + * Gamma function. + * + * @param data type for `z` output + * @param a The a value + * @param x The x value + * @param data type for `Igamma` output and operands + * @return a new instance of Igamma + * @see org.tensorflow.op.MathOps.igamma + */ + public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( + a, + x + ) + + /** + * Compute the upper regularized incomplete Gamma function `Q(a, x)`. + * The upper regularized incomplete Gamma function is defined as: + * + * `\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\)` + * + * where + * + * `\(Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt\)` + * + * is the upper incomplete Gamma function. + * + * Note, above `P(a, x)` (`Igamma`) is the lower regularized complete + * Gamma function. + * + * @param data type for `z` output + * @param a The a value + * @param x The x value + * @param data type for `Igammac` output and operands + * @return a new instance of Igammac + * @see org.tensorflow.op.MathOps.igammac + */ + public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( + a, + x + ) + + /** + * Returns the imaginary part of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part returned by this operation. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @return a new instance of Imag, with default output types + * @see org.tensorflow.op.MathOps.imag + */ + public fun imag(input: Operand): Imag = java.imag( + input + ) + + /** + * Returns the imaginary part of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part returned by this operation. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param Tout The value of the Tout attribute + * @param data type for `Imag` output and operands + * @return a new instance of Imag + * @see org.tensorflow.op.MathOps.imag + */ + public fun imag(input: Operand, Tout: Class): Imag = + java.imag( + input, + Tout + ) + + /** + * Computes the inverse permutation of a tensor. + * This operation computes the inverse of an index permutation. It takes a 1-D + * integer tensor `x`, which represents the indices of a zero-based array, and + * swaps each value with its index position. In other words, for an output tensor + * `y` and an input tensor `x`, this operation computes the following: + * + * `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` + * + * The values must include 0. There can be no duplicate values or negative values. + * + * For example: + * ``` + * # tensor `x` is [3, 4, 0, 2, 1] + * invert_permutation(x) ==> [2, 4, 3, 0, 1] + * + * ``` + * + * @param data type for `y` output + * @param x 1-D. + * @param data type for `InvertPermutation` output and operands + * @return a new instance of InvertPermutation + * @see org.tensorflow.op.MathOps.invertPermutation + */ + public fun invertPermutation(x: Operand): InvertPermutation = + java.invertPermutation( + x + ) + + /** + * Returns which elements of x are finite. + * `@`compatibility(numpy) + * + * Equivalent to np.isfinite + * + * `@`end_compatibility + * + * Example: + * ``` + * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) + * tf.math.is_finite(x) ==> [True, True, True, False, False] + * + * ``` + * + * @param x The x value + * @return a new instance of IsFinite + * @see org.tensorflow.op.MathOps.isFinite + */ + public fun isFinite(x: Operand): IsFinite = java.isFinite( + x + ) + + /** + * Returns which elements of x are Inf. + * `@`compatibility(numpy) + * + * Equivalent to np.isinf + * + * `@`end_compatibility + * + * Example: + * ``` + * x = tf.constant([5.0, np.inf, 6.8, np.inf]) + * tf.math.is_inf(x) ==> [False, True, False, True] + * + * ``` + * + * @param x The x value + * @return a new instance of IsInf + * @see org.tensorflow.op.MathOps.isInf + */ + public fun isInf(x: Operand): IsInf = java.isInf( + x + ) + + /** + * Returns which elements of x are NaN. + * `@`compatibility(numpy) + * + * Equivalent to np.isnan + * + * `@`end_compatibility + * + * Example: + * ``` + * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) + * tf.math.is_nan(x) ==> [False, True, False, True, False] + * + * ``` + * + * @param x The x value + * @return a new instance of IsNan + * @see org.tensorflow.op.MathOps.isNan + */ + public fun isNan(x: Operand): IsNan = java.isNan( + x + ) + + /** + * Returns the truth value of (x < y) element-wise. + * _NOTE_: `math.Less` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.less(x, y) ==> [False, True, False] + * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 6, 7]) + * tf.math.less(x, y) ==> [False, True, True] + * + * ``` + * + * @param x The x value + * @param y The y value + * @param data type for `Less` output and operands + * @return a new instance of Less + * @see org.tensorflow.op.MathOps.less + */ + public fun less(x: Operand, y: Operand): Less = java.less( + x, + y + ) + + /** + * Returns the truth value of (x <= y) element-wise. + * _NOTE_: `math.LessEqual` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.less_equal(x, y) ==> [True, True, False] + * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 6, 6]) + * tf.math.less_equal(x, y) ==> [True, True, True] + * + * ``` + * + * @param x The x value + * @param y The y value + * @param data type for `LessEqual` output and operands + * @return a new instance of LessEqual + * @see org.tensorflow.op.MathOps.lessEqual + */ + public fun lessEqual(x: Operand, y: Operand): LessEqual = + java.lessEqual( + x, + y + ) + + /** + * Computes the log of the absolute value of `Gamma(x)` element-wise. + * For positive numbers, this function computes log((input - 1)!) for every element in the + * tensor. + * `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` + * + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) + * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Lgamma` output and operands + * @return a new instance of Lgamma + * @see org.tensorflow.op.MathOps.lgamma + */ + public fun lgamma(x: Operand): Lgamma = java.lgamma( + x + ) + + /** + * Computes natural logarithm of x element-wise. + * I.e., `\(y = \log_e x\)`. + * + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 5]) + * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Log` output and operands + * @return a new instance of Log + * @see org.tensorflow.op.MathOps.log + */ + public fun log(x: Operand): Log = java.log( + x + ) + + /** + * Computes natural logarithm of (1 + x) element-wise. + * I.e., `\(y = \log_e (1 + x)\)`. + * + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 5]) + * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Log1p` output and operands + * @return a new instance of Log1p + * @see org.tensorflow.op.MathOps.log1p + */ + public fun log1p(x: Operand): Log1p = java.log1p( + x + ) + + /** + * Returns the truth value of x AND y element-wise. + * _NOTE_: `math.LogicalAnd` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param x The x value + * @param y The y value + * @return a new instance of LogicalAnd + * @see org.tensorflow.op.MathOps.logicalAnd + */ + public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( + x, + y + ) + + /** + * Returns the truth value of `NOT x` element-wise. + * + * @param x A `Tensor` of type `bool`. + * @return a new instance of LogicalNot + * @see org.tensorflow.op.MathOps.logicalNot + */ + public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( + x + ) + + /** + * Returns the truth value of x OR y element-wise. + * _NOTE_: `math.LogicalOr` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param x The x value + * @param y The y value + * @return a new instance of LogicalOr + * @see org.tensorflow.op.MathOps.logicalOr + */ + public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( + x, + y + ) + + /** + * Returns the max of x and y (i.e. x > y ? x : y) element-wise. + * _NOTE_: `math.Maximum` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Maximum` output and operands + * @return a new instance of Maximum + * @see org.tensorflow.op.MathOps.maximum + */ + public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( + x, + y + ) + + /** + * Computes the mean of elements across dimensions of a tensor. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param data type for `output` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attribute values + * @param data type for `Mean` output and operands + * @return a new instance of Mean + * @see org.tensorflow.op.MathOps.mean + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun mean( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Mean = java.mean( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.math.Mean.keepDims(it) } + ).toTypedArray() + ) + + /** + * Returns the min of x and y (i.e. x < y ? x : y) element-wise. + * _NOTE_: `math.Minimum` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Minimum` output and operands + * @return a new instance of Minimum + * @see org.tensorflow.op.MathOps.minimum + */ + public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( + x, + y + ) + + /** + * Returns element-wise remainder of division. This emulates C semantics in that + * the result here is consistent with a truncating divide. E.g. + * `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. + * + * _NOTE_: `math.Mod` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Mod` output and operands + * @return a new instance of Mod + * @see org.tensorflow.op.MathOps.mod + */ + public fun mod(x: Operand, y: Operand): Mod = java.mod( + x, + y + ) + + /** + * Returns x * y element-wise. + * _NOTE_: `math.Mul` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Mul` output and operands + * @return a new instance of Mul + * @see org.tensorflow.op.MathOps.mul + */ + public fun mul(x: Operand, y: Operand): Mul = java.mul( + x, + y + ) + + /** + * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. + * _NOTE_: `math.MulNoNan` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `MulNoNan` output and operands + * @return a new instance of MulNoNan + * @see org.tensorflow.op.MathOps.mulNoNan + */ + public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( + x, + y + ) + + /** + * The Ndtri operation + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Ndtri` output and operands + * @return a new instance of Ndtri + * @see org.tensorflow.op.MathOps.ndtri + */ + public fun ndtri(x: Operand): Ndtri = java.ndtri( + x + ) + + /** + * Computes numerical negative value element-wise. + * I.e., `\(y = -x\)`. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Neg` output and operands + * @return a new instance of Neg + * @see org.tensorflow.op.MathOps.neg + */ + public fun neg(x: Operand): Neg = java.neg( + x + ) + + /** + * Returns the next representable value of `x1` in the direction of `x2`, element-wise. + * This operation returns the same result as the C++ std::nextafter function. + * + * It can also return a subnormal number. + * + * `@`compatibility(cpp) + * + * Equivalent to C++ std::nextafter function. + * + * `@`end_compatibility + * + * @param data type for `output` output + * @param x1 The x1 value + * @param x2 The x2 value + * @param data type for `NextAfter` output and operands + * @return a new instance of NextAfter + * @see org.tensorflow.op.MathOps.nextAfter + */ + public fun nextAfter(x1: Operand, x2: Operand): NextAfter = + java.nextAfter( + x1, + x2 + ) + + /** + * Returns the truth value of (x != y) element-wise. + * _NOTE_: `math.NotEqual` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param x The x value + * @param y The y value + * @param options carries optional attribute values + * @param data type for `NotEqual` output and operands + * @return a new instance of NotEqual + * @see org.tensorflow.op.MathOps.notEqual + * @param incompatibleShapeError Sets the incompatibleShapeError option. + * + * @param incompatibleShapeError the incompatibleShapeError option + * @return this Options instance. + */ + public fun notEqual( + x: Operand, + y: Operand, + incompatibleShapeError: Boolean? = null + ): NotEqual = java.notEqual( + x, + y, + *listOfNotNull( + incompatibleShapeError?.let{ org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } + ).toTypedArray() + ) + + /** + * Compute the polygamma function `\(\psi^{(n)}(x)\)`. + * The polygamma function is defined as: + * + * `\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\)` + * + * where `\(\psi(x)\)` is the digamma function. + * The polygamma function is defined only for non-negative integer orders \a\. + * + * @param data type for `z` output + * @param a The a value + * @param x The x value + * @param data type for `Polygamma` output and operands + * @return a new instance of Polygamma + * @see org.tensorflow.op.MathOps.polygamma + */ + public fun polygamma(a: Operand, x: Operand): Polygamma = + java.polygamma( + a, + x + ) + + /** + * Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). + * For each entry in `x`, calculates the number of `1` (on) bits in the binary + * representation of that entry. + * + * **NOTE**: It is more efficient to first `tf.bitcast` your tensors into + * `int32` or `int64` and perform the bitcount on the result, than to feed in + * 8- or 16-bit inputs and then aggregate the resulting counts. + * + * @param x The x value + * @return a new instance of PopulationCount + * @see org.tensorflow.op.MathOps.populationCount + */ + public fun populationCount(x: Operand): PopulationCount = java.populationCount( + x + ) + + /** + * Computes the power of one value to another. + * Given a tensor `x` and a tensor `y`, this operation computes `\(x^y\)` for + * corresponding elements in `x` and `y`. For example: + * ``` + * # tensor 'x' is [[2, 2]], [3, 3]] + * # tensor 'y' is [[8, 16], [2, 3]] + * tf.pow(x, y) ==> [[256, 65536], [9, 27]] + * + * ``` + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Pow` output and operands + * @return a new instance of Pow + * @see org.tensorflow.op.MathOps.pow + */ + public fun pow(x: Operand, y: Operand): Pow = java.pow( + x, + y + ) + + /** + * Returns x + y element-wise, working on quantized buffers. + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. + * @param Toutput The value of the Toutput attribute + * @param data type for `QuantizedAdd` output and operands + * @return a new instance of QuantizedAdd + * @see org.tensorflow.op.MathOps.quantizedAdd + */ + public fun quantizedAdd( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand, + Toutput: Class + ): QuantizedAdd = java.quantizedAdd( + x, + y, + minX, + maxX, + minY, + maxY, + Toutput + ) + + /** + * Returns x * y element-wise, working on quantized buffers. + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. + * @param Toutput The value of the Toutput attribute + * @param data type for `QuantizedMul` output and operands + * @return a new instance of QuantizedMul + * @see org.tensorflow.op.MathOps.quantizedMul + */ + public fun quantizedMul( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand, + Toutput: Class + ): QuantizedMul = java.quantizedMul( + x, + y, + minX, + maxX, + minY, + maxY, + Toutput + ) + + /** + * Returns the real part of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real + * part returned by this operation and _b_ is the imaginary part. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @return a new instance of Real, with default output types + * @see org.tensorflow.op.MathOps.real + */ + public fun real(input: Operand): Real = java.real( + input + ) + + /** + * Returns the real part of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real + * part returned by this operation and _b_ is the imaginary part. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param Tout The value of the Tout attribute + * @param data type for `Real` output and operands + * @return a new instance of Real + * @see org.tensorflow.op.MathOps.real + */ + public fun real(input: Operand, Tout: Class): Real = + java.real( + input, + Tout + ) + + /** + * Returns x / y element-wise for real types. + * If `x` and `y` are reals, this will return the floating-point division. + * + * _NOTE_: `Div` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `RealDiv` output and operands + * @return a new instance of RealDiv + * @see org.tensorflow.op.MathOps.realDiv + */ + public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( + x, + y + ) + + /** + * Computes the reciprocal of x element-wise. + * I.e., `\(y = 1 / x\)`. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Reciprocal` output and operands + * @return a new instance of Reciprocal + * @see org.tensorflow.op.MathOps.reciprocal + */ + public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( + x + ) + + /** + * Returns element-wise integer closest to x. + * If the result is midway between two representable values, + * the even representable is chosen. + * For example: + * ``` + * rint(-1.5) ==> -2.0 + * rint(0.5000001) ==> 1.0 + * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Rint` output and operands + * @return a new instance of Rint + * @see org.tensorflow.op.MathOps.rint + */ + public fun rint(x: Operand): Rint = java.rint( + x + ) + + /** + * Rounds the values of a tensor to the nearest integer, element-wise. + * Rounds half to even. Also known as bankers rounding. If you want to round + * according to the current system rounding mode use std::cint. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Round` output and operands + * @return a new instance of Round + * @see org.tensorflow.op.MathOps.round + */ + public fun round(x: Operand): Round = java.round( + x + ) + + /** + * Computes reciprocal of square root of x element-wise. + * I.e., `\(y = 1 / \sqrt{x}\)`. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Rsqrt` output and operands + * @return a new instance of Rsqrt + * @see org.tensorflow.op.MathOps.rsqrt + */ + public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( + x + ) + + /** + * Computes the maximum along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * `\(output_i = \max_j(data_j)\)` where `max` is over `j` such + * that `segment_ids[j] == i`. + * + * If the max is empty for a given segment ID `i`, `output[i] = 0`. + *
+ * + *
+ * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_max(c, tf.constant([0, 0, 1])) + * # ==> [[4, 3, 3, 4], + * # [5, 6, 7, 8]] + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @param data type for `SegmentMax` output and operands + * @return a new instance of SegmentMax + * @see org.tensorflow.op.MathOps.segmentMax + */ + public fun segmentMax(`data`: Operand, segmentIds: Operand): + SegmentMax = java.segmentMax( + data, + segmentIds + ) + + /** + * Computes the mean along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * `\(output_i = \frac{\sum_j data_j}{N}\)` where `mean` is + * over `j` such that `segment_ids[j] == i` and `N` is the total number of + * values summed. + * + * If the mean is empty for a given segment ID `i`, `output[i] = 0`. + *
+ * + *
+ * + * For example: + * ``` + * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_mean(c, tf.constant([0, 0, 1])) + * # ==> [[2.5, 2.5, 2.5, 2.5], + * # [5, 6, 7, 8]] + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @param data type for `SegmentMean` output and operands + * @return a new instance of SegmentMean + * @see org.tensorflow.op.MathOps.segmentMean + */ + public fun segmentMean(`data`: Operand, segmentIds: Operand): + SegmentMean = java.segmentMean( + data, + segmentIds + ) + + /** + * Computes the minimum along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * `\(output_i = \min_j(data_j)\)` where `min` is over `j` such + * that `segment_ids[j] == i`. + * + * If the min is empty for a given segment ID `i`, `output[i] = 0`. + *
+ * + *
+ * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_min(c, tf.constant([0, 0, 1])) + * # ==> [[1, 2, 2, 1], + * # [5, 6, 7, 8]] + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @param data type for `SegmentMin` output and operands + * @return a new instance of SegmentMin + * @see org.tensorflow.op.MathOps.segmentMin + */ + public fun segmentMin(`data`: Operand, segmentIds: Operand): + SegmentMin = java.segmentMin( + data, + segmentIds + ) + + /** + * Computes the product along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * `\(output_i = \prod_j data_j\)` where the product is over `j` such + * that `segment_ids[j] == i`. + * + * If the product is empty for a given segment ID `i`, `output[i] = 1`. + *
+ * + *
+ * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_prod(c, tf.constant([0, 0, 1])) + * # ==> [[4, 6, 6, 4], + * # [5, 6, 7, 8]] + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @param data type for `SegmentProd` output and operands + * @return a new instance of SegmentProd + * @see org.tensorflow.op.MathOps.segmentProd + */ + public fun segmentProd(`data`: Operand, segmentIds: Operand): + SegmentProd = java.segmentProd( + data, + segmentIds + ) + + /** + * Computes the sum along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * `\(output_i = \sum_j data_j\)` where sum is over `j` such + * that `segment_ids[j] == i`. + * + * If the sum is empty for a given segment ID `i`, `output[i] = 0`. + *
+ * + *
+ * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_sum(c, tf.constant([0, 0, 1])) + * # ==> [[5, 5, 5, 5], + * # [5, 6, 7, 8]] + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @param data type for `SegmentSum` output and operands + * @return a new instance of SegmentSum + * @see org.tensorflow.op.MathOps.segmentSum + */ + public fun segmentSum(`data`: Operand, segmentIds: Operand): + SegmentSum = java.segmentSum( + data, + segmentIds + ) + + /** + * Computes sigmoid of `x` element-wise. + * Specifically, `y = 1 / (1 + exp(-x))`. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Sigmoid` output and operands + * @return a new instance of Sigmoid + * @see org.tensorflow.op.MathOps.sigmoid + */ + public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( + x + ) + + /** + * Returns an element-wise indication of the sign of a number. + * `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. + * + * For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. + * + * Example usage: + * ``` + * + * tf.math.sign([0., 2., -3.]) + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Sign` output and operands + * @return a new instance of Sign + * @see org.tensorflow.op.MathOps.sign + */ + public fun sign(x: Operand): Sign = java.sign( + x + ) + + /** + * Computes sine of x element-wise. + * Given an input tensor, this function computes sine of every + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `[-1,1]`. + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) + * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 + * nan] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Sin` output and operands + * @return a new instance of Sin + * @see org.tensorflow.op.MathOps.sin + */ + public fun sin(x: Operand): Sin = java.sin( + x + ) + + /** + * Computes hyperbolic sine of x element-wise. + * Given an input tensor, this function computes hyperbolic sine of every + * element in the tensor. Input range is `[-inf,inf]` and output range + * is `[-inf,inf]`. + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 + * 3.6268604e+00 1.1013232e+04 inf] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Sinh` output and operands + * @return a new instance of Sinh + * @see org.tensorflow.op.MathOps.sinh + */ + public fun sinh(x: Operand): Sinh = java.sinh( + x + ) + + /** + * The Softplus operation + * + * @param data type for `activations` output + * @param features The features value + * @param data type for `Softplus` output and operands + * @return a new instance of Softplus + * @see org.tensorflow.op.MathOps.softplus + */ + public fun softplus(features: Operand): Softplus = java.softplus( + features + ) + + /** + * Computes square root of x element-wise. + * I.e., `\(y = \sqrt{x} = x^{1/2}\)`. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Sqrt` output and operands + * @return a new instance of Sqrt + * @see org.tensorflow.op.MathOps.sqrt + */ + public fun sqrt(x: Operand): Sqrt = java.sqrt( + x + ) + + /** + * Computes square of x element-wise. + * I.e., `\(y = x * x = x^2\)`. + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Square` output and operands + * @return a new instance of Square + * @see org.tensorflow.op.MathOps.square + */ + public fun square(x: Operand): Square = java.square( + x + ) + + /** + * Returns conj(x - y)(x - y) element-wise. + * _NOTE_: `math.SquaredDifference` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `SquaredDifference` output and operands + * @return a new instance of SquaredDifference + * @see org.tensorflow.op.MathOps.squaredDifference + */ + public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = + java.squaredDifference( + x, + y + ) + + /** + * Returns x - y element-wise. + * _NOTE_: `math.Sub` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Sub` output and operands + * @return a new instance of Sub + * @see org.tensorflow.op.MathOps.sub + */ + public fun sub(x: Operand, y: Operand): Sub = java.sub( + x, + y + ) + + /** + * Computes tan of x element-wise. + * Given an input tensor, this function computes tangent of every + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `(-inf, inf)`. If input lies outside the boundary, `nan` + * is returned. + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, + * float("inf")]) + * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Tan` output and operands + * @return a new instance of Tan + * @see org.tensorflow.op.MathOps.tan + */ + public fun tan(x: Operand): Tan = java.tan( + x + ) + + /** + * Computes hyperbolic tangent of `x` element-wise. + * Given an input tensor, this function computes hyperbolic tangent of every + * element in the tensor. Input range is `[-inf, inf]` and + * output range is `[-1,1]`. + * ``` + * + * x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) + * tf.math.tanh(x) + * + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param data type for `Tanh` output and operands + * @return a new instance of Tanh + * @see org.tensorflow.op.MathOps.tanh + */ + public fun tanh(x: Operand): Tanh = java.tanh( + x + ) + + /** + * Returns x / y element-wise for integer types. + * Truncation designates that negative numbers will round fractional quantities + * toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different + * than Python semantics. See `FloorDiv` for a division function that matches + * Python Semantics. + * + * _NOTE_: `math.TruncateDiv` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `TruncateDiv` output and operands + * @return a new instance of TruncateDiv + * @see org.tensorflow.op.MathOps.truncateDiv + */ + public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = + java.truncateDiv( + x, + y + ) + + /** + * Returns element-wise remainder of division. This emulates C semantics in that + * the result here is consistent with a truncating divide. E.g. `truncate(x / y) * y + + * truncate_mod(x, y) = x`. + * + * _NOTE_: `math.TruncateMod` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `TruncateMod` output and operands + * @return a new instance of TruncateMod + * @see org.tensorflow.op.MathOps.truncateMod + */ + public fun truncateMod(x: Operand, y: Operand): TruncateMod = + java.truncateMod( + x, + y + ) + + /** + * Computes the maximum along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * This operator is similar to the unsorted segment sum operator + * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . + * Instead of computing the sum over segments, it computes the maximum such that: + * + * `\(output_i = \max_{j...} data[j...]\)` where max is over tuples `j...` such + * that `segment_ids[j...] == i`. + * + * If the maximum is empty for a given segment ID `i`, it outputs the smallest + * possible value for the specific numeric type, + * `output[i] = numeric_limits::lowest()`. + * + * If the given segment ID `i` is negative, then the corresponding value is + * dropped, and will not be included in the result. + *
+ * + *
+ * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 4, 3, 3, 4], + * # [5, 6, 7, 8]] + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. + * @param numSegments The numSegments value + * @param data type for `UnsortedSegmentMax` output and operands + * @return a new instance of UnsortedSegmentMax + * @see org.tensorflow.op.MathOps.unsortedSegmentMax + */ + public fun unsortedSegmentMax( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMax = java.unsortedSegmentMax( + data, + segmentIds, + numSegments + ) + + /** + * Computes the minimum along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * This operator is similar to the unsorted segment sum operator + * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . + * Instead of computing the sum over segments, it computes the minimum such that: + * + * `\(output_i = \min_{j...} data_[j...]\)` where min is over tuples `j...` such + * that `segment_ids[j...] == i`. + * + * If the minimum is empty for a given segment ID `i`, it outputs the largest + * possible value for the specific numeric type, + * `output[i] = numeric_limits::max()`. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 1, 2, 2, 1], + * # [5, 6, 7, 8]] + * + * ``` + * + * If the given segment ID `i` is negative, then the corresponding value is + * dropped, and will not be included in the result. + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. + * @param numSegments The numSegments value + * @param data type for `UnsortedSegmentMin` output and operands + * @return a new instance of UnsortedSegmentMin + * @see org.tensorflow.op.MathOps.unsortedSegmentMin + */ + public fun unsortedSegmentMin( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMin = java.unsortedSegmentMin( + data, + segmentIds, + numSegments + ) + + /** + * Computes the product along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * This operator is similar to the unsorted segment sum operator + * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . + * Instead of computing the sum over segments, it computes the product of all + * entries belonging to a segment such that: + * + * `\(output_i = \prod_{j...} data[j...]\)` where the product is over tuples + * `j...` such that `segment_ids[j...] == i`. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 4, 6, 6, 4], + * # [5, 6, 7, 8]] + * + * ``` + * + * If there is no entry for a given segment ID `i`, it outputs 1. + * + * If the given segment ID `i` is negative, then the corresponding value is + * dropped, and will not be included in the result. + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. + * @param numSegments The numSegments value + * @param data type for `UnsortedSegmentProd` output and operands + * @return a new instance of UnsortedSegmentProd + * @see org.tensorflow.op.MathOps.unsortedSegmentProd + */ + public fun unsortedSegmentProd( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentProd = java.unsortedSegmentProd( + data, + segmentIds, + numSegments + ) + + /** + * Computes the sum along segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * `\(output[i] = \sum_{j...} data[j...]\)` where the sum is over tuples `j...` such + * that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` + * need not be sorted and need not cover all values in the full + * range of valid values. + * + * If the sum is empty for a given segment ID `i`, `output[i] = 0`. + * If the given segment ID `i` is negative, the value is dropped and will not be + * added to the sum of the segment. + * + * `num_segments` should equal the number of distinct segment IDs. + *
+ * + *
+ * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 5, 5, 5, 5], + * # [5, 6, 7, 8]] + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. + * @param numSegments The numSegments value + * @param data type for `UnsortedSegmentSum` output and operands + * @return a new instance of UnsortedSegmentSum + * @see org.tensorflow.op.MathOps.unsortedSegmentSum + */ + public fun unsortedSegmentSum( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentSum = java.unsortedSegmentSum( + data, + segmentIds, + numSegments + ) + + /** + * Returns 0 if x == 0, and x / y otherwise, elementwise. + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Xdivy` output and operands + * @return a new instance of Xdivy + * @see org.tensorflow.op.MathOps.xdivy + */ + public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( + x, + y + ) + + /** + * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Xlog1py` output and operands + * @return a new instance of Xlog1py + * @see org.tensorflow.op.MathOps.xlog1py + */ + public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( + x, + y + ) + + /** + * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param data type for `Xlogy` output and operands + * @return a new instance of Xlogy + * @see org.tensorflow.op.MathOps.xlogy + */ + public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( + x, + y + ) + + /** + * Compute the Hurwitz zeta function `\(\zeta(x, q)\)`. + * The Hurwitz zeta function is defined as: + * + * `\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\)` + * + * @param data type for `z` output + * @param x The x value + * @param q The q value + * @param data type for `Zeta` output and operands + * @return a new instance of Zeta + * @see org.tensorflow.op.MathOps.zeta + */ + public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( + x, + q + ) + + /** + * Returns the argument of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part. + * + * The argument returned by this operation is of the form `\(atan2(b, a)\)`. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] + * + * ``` + * + * `@`compatibility(numpy) + * + * Equivalent to np.angle. + * + * `@`end_compatibility + * + * @param data type for `output` output + * @param input The input value + * @param Tout The value of the Tout attribute + * @param data type for `Angle` output and operands + * @return a new instance of Angle + * @see org.tensorflow.op.MathOps.angle + */ + @JvmName("angleReified") + public inline fun angleTyped(input: Operand): Angle = + angle(input, U::class.java) + + /** + * Returns the index with the largest value across dimensions of a tensor. + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @param outputType The value of the outputType attribute + * @param data type for `ArgMax` output and operands + * @return a new instance of ArgMax + * @see org.tensorflow.op.MathOps.argMax + */ + @JvmName("argMaxReified") + public inline fun argMaxTyped(input: Operand, + dimension: Operand): ArgMax = argMax(input, dimension, V::class.java) + + /** + * Returns the index with the smallest value across dimensions of a tensor. + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @param outputType The value of the outputType attribute + * @param data type for `ArgMin` output and operands + * @return a new instance of ArgMin + * @see org.tensorflow.op.MathOps.argMin + */ + @JvmName("argMinReified") + public inline fun argMinTyped(input: Operand, + dimension: Operand): ArgMin = argMin(input, dimension, V::class.java) + + /** + * Computes the complex absolute value of a tensor. + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute + * value is computed as `\( \sqrt{a^2 + b^2}\)`. + * + * For example: + * ``` + * + * x = tf.complex(3.0, 4.0) + * print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + * 5.0 + * ``` + * + * @param data type for `y` output + * @param x The x value + * @param Tout The value of the Tout attribute + * @param data type for `ComplexAbs` output and operands + * @return a new instance of ComplexAbs + * @see org.tensorflow.op.MathOps.complexAbs + */ + @JvmName("complexAbsReified") + public inline fun complexAbsTyped(x: Operand): ComplexAbs = + complexAbs(x, U::class.java) + + /** + * Returns the imaginary part of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part returned by this operation. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param Tout The value of the Tout attribute + * @param data type for `Imag` output and operands + * @return a new instance of Imag + * @see org.tensorflow.op.MathOps.imag + */ + @JvmName("imagReified") + public inline fun imagTyped(input: Operand): Imag = + imag(input, U::class.java) + + /** + * Returns x + y element-wise, working on quantized buffers. + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. + * @param Toutput The value of the Toutput attribute + * @param data type for `QuantizedAdd` output and operands + * @return a new instance of QuantizedAdd + * @see org.tensorflow.op.MathOps.quantizedAdd + */ + @JvmName("quantizedAddReified") + public inline fun quantizedAdd( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand + ): QuantizedAdd = quantizedAdd(x, y, minX, maxX, minY, maxY, V::class.java) + + /** + * Returns x * y element-wise, working on quantized buffers. + * + * @param data type for `z` output + * @param x The x value + * @param y The y value + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. + * @param Toutput The value of the Toutput attribute + * @param data type for `QuantizedMul` output and operands + * @return a new instance of QuantizedMul + * @see org.tensorflow.op.MathOps.quantizedMul + */ + @JvmName("quantizedMulReified") + public inline fun quantizedMul( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand + ): QuantizedMul = quantizedMul(x, y, minX, maxX, minY, maxY, V::class.java) + + /** + * Returns the real part of a complex number. + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real + * part returned by this operation and _b_ is the imaginary part. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param Tout The value of the Tout attribute + * @param data type for `Real` output and operands + * @return a new instance of Real + * @see org.tensorflow.op.MathOps.real + */ + @JvmName("realReified") + public inline fun realTyped(input: Operand): Real = + real(input, U::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt new file mode 100644 index 00000000000..1bb89d4bba1 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -0,0 +1,4049 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.nn.AvgPool +import org.tensorflow.op.nn.AvgPool3d +import org.tensorflow.op.nn.AvgPool3dGrad +import org.tensorflow.op.nn.BatchNormWithGlobalNormalization +import org.tensorflow.op.nn.BatchNormWithGlobalNormalizationGrad +import org.tensorflow.op.nn.BiasAdd +import org.tensorflow.op.nn.BiasAddGrad +import org.tensorflow.op.nn.ComputeAccidentalHits +import org.tensorflow.op.nn.Conv2d +import org.tensorflow.op.nn.Conv2dBackpropFilter +import org.tensorflow.op.nn.Conv2dBackpropInput +import org.tensorflow.op.nn.Conv3d +import org.tensorflow.op.nn.Conv3dBackpropFilter +import org.tensorflow.op.nn.Conv3dBackpropInput +import org.tensorflow.op.nn.CtcBeamSearchDecoder +import org.tensorflow.op.nn.CtcGreedyDecoder +import org.tensorflow.op.nn.CtcLoss +import org.tensorflow.op.nn.CudnnRNNCanonicalToParams +import org.tensorflow.op.nn.CudnnRNNParamsToCanonical +import org.tensorflow.op.nn.CudnnRnnParamsSize +import org.tensorflow.op.nn.DataFormatDimMap +import org.tensorflow.op.nn.DataFormatVecPermute +import org.tensorflow.op.nn.DepthToSpace +import org.tensorflow.op.nn.DepthwiseConv2dNative +import org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter +import org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput +import org.tensorflow.op.nn.Dilation2d +import org.tensorflow.op.nn.Dilation2dBackpropFilter +import org.tensorflow.op.nn.Dilation2dBackpropInput +import org.tensorflow.op.nn.Elu +import org.tensorflow.op.nn.FixedUnigramCandidateSampler +import org.tensorflow.op.nn.FractionalAvgPool +import org.tensorflow.op.nn.FractionalMaxPool +import org.tensorflow.op.nn.FusedBatchNorm +import org.tensorflow.op.nn.FusedBatchNormGrad +import org.tensorflow.op.nn.FusedPadConv2d +import org.tensorflow.op.nn.FusedResizeAndPadConv2d +import org.tensorflow.op.nn.InTopK +import org.tensorflow.op.nn.L2Loss +import org.tensorflow.op.nn.LeakyRelu +import org.tensorflow.op.nn.LearnedUnigramCandidateSampler +import org.tensorflow.op.nn.LocalResponseNormalization +import org.tensorflow.op.nn.LogSoftmax +import org.tensorflow.op.nn.MaxPool +import org.tensorflow.op.nn.MaxPool3d +import org.tensorflow.op.nn.MaxPool3dGrad +import org.tensorflow.op.nn.MaxPool3dGradGrad +import org.tensorflow.op.nn.MaxPoolGrad +import org.tensorflow.op.nn.MaxPoolGradGrad +import org.tensorflow.op.nn.MaxPoolGradGradWithArgmax +import org.tensorflow.op.nn.MaxPoolWithArgmax +import org.tensorflow.op.nn.NthElement +import org.tensorflow.op.nn.QuantizedAvgPool +import org.tensorflow.op.nn.QuantizedBatchNormWithGlobalNormalization +import org.tensorflow.op.nn.QuantizedBiasAdd +import org.tensorflow.op.nn.QuantizedConv2d +import org.tensorflow.op.nn.QuantizedInstanceNorm +import org.tensorflow.op.nn.QuantizedMaxPool +import org.tensorflow.op.nn.QuantizedRelu +import org.tensorflow.op.nn.QuantizedRelu6 +import org.tensorflow.op.nn.QuantizedReluX +import org.tensorflow.op.nn.Relu +import org.tensorflow.op.nn.Relu6 +import org.tensorflow.op.nn.Selu +import org.tensorflow.op.nn.Softmax +import org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits +import org.tensorflow.op.nn.Softsign +import org.tensorflow.op.nn.SpaceToBatch +import org.tensorflow.op.nn.SpaceToDepth +import org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits +import org.tensorflow.op.nn.TopK +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `nn` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class NnOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.NnOps = ops.java.nn + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Performs average pooling on the input. + * Each entry in `output` is the mean of the corresponding size `ksize` + * window in `value`. + * + * @param data type for `output` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param ksize The size of the sliding window for each dimension of `value`. + * @param strides The stride of the sliding window for each dimension of `value`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `AvgPool` output and operands + * @return a new instance of AvgPool + * @see org.tensorflow.op.NnOps.avgPool + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. + */ + public fun avgPool( + value: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool = java.avgPool( + value, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.AvgPool.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Performs 3D average pooling on the input. + * Each entry in `output` is the mean of the corresponding size `ksize` window in + * `value`. + * + * @param data type for `output` output + * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `AvgPool3D` output and operands + * @return a new instance of AvgPool3d + * @see org.tensorflow.op.NnOps.avgPool3d + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + */ + public fun avgPool3d( + input: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool3d = java.avgPool3d( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes gradients of average pooling function. + * + * @param data type for `output` output + * @param origInputShape The original input dimensions. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `AvgPool3DGrad` output and operands + * @return a new instance of AvgPool3dGrad + * @see org.tensorflow.op.NnOps.avgPool3dGrad + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + */ + public fun avgPool3dGrad( + origInputShape: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool3dGrad = java.avgPool3dGrad( + origInputShape, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Batch normalization. + * This op is deprecated. Prefer `tf.nn.batch_normalization`. + * + * @param data type for `result` output + * @param t A 4D input Tensor. + * @param m A 1D mean Tensor with size matching the last dimension of t. + * This is the first output from tf.nn.moments, + * or a saved moving average thereof. + * @param v A 1D variance Tensor with size matching the last dimension of t. + * This is the second output from tf.nn.moments, + * or a saved moving average thereof. + * @param beta A 1D beta Tensor with size matching the last dimension of t. + * An offset to be added to the normalized tensor. + * @param gamma A 1D gamma Tensor with size matching the last dimension of t. + * If "scale_after_normalization" is true, this tensor will be multiplied + * with the normalized tensor. + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param scaleAfterNormalization A bool indicating whether the resulted tensor + * needs to be multiplied with gamma. + * @param data type for `BatchNormWithGlobalNormalization` output and operands + * @return a new instance of BatchNormWithGlobalNormalization + * @see org.tensorflow.op.NnOps.batchNormWithGlobalNormalization + */ + public fun batchNormWithGlobalNormalization( + t: Operand, + m: Operand, + v: Operand, + beta: Operand, + gamma: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( + t, + m, + v, + beta, + gamma, + varianceEpsilon, + scaleAfterNormalization + ) + + /** + * Gradients for batch normalization. + * This op is deprecated. See `tf.nn.batch_normalization`. + * + * @param data type for `dx` output + * @param t A 4D input Tensor. + * @param m A 1D mean Tensor with size matching the last dimension of t. + * This is the first output from tf.nn.moments, + * or a saved moving average thereof. + * @param v A 1D variance Tensor with size matching the last dimension of t. + * This is the second output from tf.nn.moments, + * or a saved moving average thereof. + * @param gamma A 1D gamma Tensor with size matching the last dimension of t. + * If "scale_after_normalization" is true, this Tensor will be multiplied + * with the normalized Tensor. + * @param backprop 4D backprop Tensor. + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param scaleAfterNormalization A bool indicating whether the resulted tensor + * needs to be multiplied with gamma. + * @param data type for `BatchNormWithGlobalNormalizationGrad` output and operands + * @return a new instance of BatchNormWithGlobalNormalizationGrad + * @see org.tensorflow.op.NnOps.batchNormWithGlobalNormalizationGrad + */ + public fun batchNormWithGlobalNormalizationGrad( + t: Operand, + m: Operand, + v: Operand, + gamma: Operand, + backprop: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( + t, + m, + v, + gamma, + backprop, + varianceEpsilon, + scaleAfterNormalization + ) + + /** + * Adds `bias` to `value`. + * This is a special case of `tf.add` where `bias` is restricted to be 1-D. + * Broadcasting is supported, so `value` may have any number of dimensions. + * + * @param data type for `output` output + * @param value Any number of dimensions. + * @param bias 1-D with size the last dimension of `value`. + * @param options carries optional attribute values + * @param data type for `BiasAdd` output and operands + * @return a new instance of BiasAdd + * @see org.tensorflow.op.NnOps.biasAdd + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the bias tensor will be added to the last dimension + * of the value tensor. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * The tensor will be added to "in_channels", the third-to-the-last + * dimension. + * @return this Options instance. + */ + public fun biasAdd( + value: Operand, + bias: Operand, + dataFormat: String? = null + ): BiasAdd = java.biasAdd( + value, + bias, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.BiasAdd.dataFormat(it) } + ).toTypedArray() + ) + + /** + * The backward operation for "BiasAdd" on the "bias" tensor. + * It accumulates all the values from out_backprop into the feature dimension. + * For NHWC data format, the feature dimension is the last. For NCHW data format, + * the feature dimension is the third-to-last. + * + * @param data type for `output` output + * @param outBackprop Any number of dimensions. + * @param options carries optional attribute values + * @param data type for `BiasAddGrad` output and operands + * @return a new instance of BiasAddGrad + * @see org.tensorflow.op.NnOps.biasAddGrad + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the bias tensor will be added to the last dimension + * of the value tensor. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * The tensor will be added to "in_channels", the third-to-the-last + * dimension. + * @return this Options instance. + */ + public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): + BiasAddGrad = java.biasAddGrad( + outBackprop, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes the ids of the positions in sampled_candidates that match true_labels. + * When doing log-odds NCE, the result of this op should be passed through a + * SparseToDense op, then added to the logits of the sampled candidates. This has + * the effect of 'removing' the sampled labels that match the true labels by + * making the classifier sure that they are sampled labels. + * + * @param trueClasses The true_classes output of UnpackSparseLabels. + * @param sampledCandidates The sampled_candidates output of CandidateSampler. + * @param numTrue Number of true labels per context. + * @param options carries optional attribute values + * @return a new instance of ComputeAccidentalHits + * @see org.tensorflow.op.NnOps.computeAccidentalHits + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun computeAccidentalHits( + trueClasses: Operand, + sampledCandidates: Operand, + numTrue: Long, + seed: Long? = null, + seed2: Long? = null + ): ComputeAccidentalHits = java.computeAccidentalHits( + trueClasses, + sampledCandidates, + numTrue, + *listOfNotNull( + seed?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } + ).toTypedArray() + ) + + /** + * Computes a 2-D convolution given 4-D `input` and `filter` tensors. + * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + * and a filter / kernel tensor of shape + * `[filter_height, filter_width, in_channels, out_channels]`, this op + * performs the following: + *
    + *
  1. Flattens the filter to a 2-D matrix with shape + * `[filter_height * filter_width * in_channels, output_channels]`.
  2. + *
  3. Extracts image patches from the input tensor to form a _virtual_ + * tensor of shape `[batch, out_height, out_width, filter_height * filter_width * + * in_channels]`.
  4. + *
  5. For each patch, right-multiplies the filter matrix and the image patch + * vector.
  6. + *
+ * + * In detail, with the default NHWC format, + * ``` + * output[b, i, j, k] = + * sum_{di, dj, q + * ``` input[b, strides[1] * i + di, strides[2] * j + dj, q] * + * filter[di, dj, q, k] + * } + * + * Must have `strides[0] = strides[3] = 1`. For the most common case of the same + * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + * + * @param data type for `output` output + * @param input A 4-D tensor. The dimension order is interpreted according to the value + * of `data_format`, see below for details. + * @param filter A 4-D tensor of shape + * `[filter_height, filter_width, in_channels, out_channels]` + * @param strides 1-D tensor of length 4. The stride of the sliding window for each + * dimension of `input`. The dimension order is determined by the value of + * `data_format`, see below for details. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `Conv2D` output and operands + * @return a new instance of Conv2d + * @see org.tensorflow.op.NnOps.conv2d + * @param useCudnnOnGpu Sets the useCudnnOnGpu option. + * + * @param useCudnnOnGpu the useCudnnOnGpu option + * @return this Options instance. + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * For the ith + * dimension, the amount of padding inserted before and after the dimension is + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. + * If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + * @return this Options instance. + */ + public fun conv2d( + input: Operand, + filter: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2d = java.conv2d( + input, + filter, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2d.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2d.dilations(it) } + ).toTypedArray() + ) + + /** + * Computes the gradients of convolution with respect to the filter. + * + * @param data type for `output` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 4-D + * `[filter_height, filter_width, in_channels, out_channels]` tensor. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. + * Gradients w.r.t. the output of the convolution. + * @param strides The stride of the sliding window for each dimension of the input + * of the convolution. Must be in the same order as the dimension specified with + * format. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `Conv2DBackpropFilter` output and operands + * @return a new instance of Conv2dBackpropFilter + * @see org.tensorflow.op.NnOps.conv2dBackpropFilter + * @param useCudnnOnGpu Sets the useCudnnOnGpu option. + * + * @param useCudnnOnGpu the useCudnnOnGpu option + * @return this Options instance. + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * For the ith + * dimension, the amount of padding inserted before and after the dimension is + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. + * If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + * @return this Options instance. + */ + public fun conv2dBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2dBackpropFilter = java.conv2dBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + /** + * Computes the gradients of convolution with respect to the input. + * + * @param data type for `output` output + * @param inputSizes An integer vector representing the shape of `input`, + * where `input` is a 4-D `[batch, height, width, channels]` tensor. + * @param filter 4-D with shape + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. + * Gradients w.r.t. the output of the convolution. + * @param strides The stride of the sliding window for each dimension of the input + * of the convolution. Must be in the same order as the dimension specified with + * format. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `Conv2DBackpropInput` output and operands + * @return a new instance of Conv2dBackpropInput + * @see org.tensorflow.op.NnOps.conv2dBackpropInput + * @param useCudnnOnGpu Sets the useCudnnOnGpu option. + * + * @param useCudnnOnGpu the useCudnnOnGpu option + * @return this Options instance. + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * For the ith + * dimension, the amount of padding inserted before and after the dimension is + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. + * If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + * @return this Options instance. + */ + public fun conv2dBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2dBackpropInput = java.conv2dBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } + ).toTypedArray() + ) + + /** + * Computes a 3-D convolution given 5-D `input` and `filter` tensors. + * In signal processing, cross-correlation is a measure of similarity of + * two waveforms as a function of a time-lag applied to one of them. This + * is also known as a sliding dot product or sliding inner-product. + * + * Our Conv3D implements a form of cross-correlation. + * + * @param data type for `output` output + * @param input Shape `[batch, in_depth, in_height, in_width, in_channels]`. + * @param filter Shape `[filter_depth, filter_height, filter_width, in_channels, + * out_channels]`. `in_channels` must match between `input` and `filter`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `Conv3D` output and operands + * @return a new instance of Conv3d + * @see org.tensorflow.op.NnOps.conv3d + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + * @return this Options instance. + */ + public fun conv3d( + input: Operand, + filter: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3d = java.conv3d( + input, + filter, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.Conv3d.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3d.dilations(it) } + ).toTypedArray() + ) + + /** + * Computes the gradients of 3-D convolution with respect to the filter. + * + * @param data type for `output` output + * @param input Shape `[batch, depth, rows, cols, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 5-D + * `[filter_depth, filter_height, filter_width, in_channels, out_channels]` + * tensor. + * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + * out_channels]`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `Conv3DBackpropFilterV2` output and operands + * @return a new instance of Conv3dBackpropFilter + * @see org.tensorflow.op.NnOps.conv3dBackpropFilter + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + * @return this Options instance. + */ + public fun conv3dBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3dBackpropFilter = java.conv3dBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + /** + * Computes the gradients of 3-D convolution with respect to the input. + * + * @param data type for `output` output + * @param inputSizes An integer vector representing the tensor shape of `input`, + * where `input` is a 5-D + * `[batch, depth, rows, cols, in_channels]` tensor. + * @param filter Shape `[depth, rows, cols, in_channels, out_channels]`. + * `in_channels` must match between `input` and `filter`. + * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + * out_channels]`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `Conv3DBackpropInputV2` output and operands + * @return a new instance of Conv3dBackpropInput + * @see org.tensorflow.op.NnOps.conv3dBackpropInput + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + * @return this Options instance. + */ + public fun conv3dBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3dBackpropInput = java.conv3dBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } + ).toTypedArray() + ) + + /** + * Performs beam search decoding on the logits given in input. + * A note about the attribute merge_repeated: For the beam search decoder, + * this means that if consecutive entries in a beam are the same, only + * the first of these is emitted. That is, when the top path is "A B B B B", + * "A B" is returned if merge_repeated = True but "A B B B B" is + * returned if merge_repeated = False. + * + * @param data type for `log_probability` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size `(batch)`. + * @param beamWidth A scalar >= 0 (beam search beam width). + * @param topPaths A scalar >= 0, <= beam_width (controls output size). + * @param options carries optional attribute values + * @param data type for `CTCBeamSearchDecoder` output and operands + * @return a new instance of CtcBeamSearchDecoder + * @see org.tensorflow.op.NnOps.ctcBeamSearchDecoder + * @param mergeRepeated Sets the mergeRepeated option. + * + * @param mergeRepeated If true, merge repeated classes in output. + * @return this Options instance. + */ + public fun ctcBeamSearchDecoder( + inputs: Operand, + sequenceLength: Operand, + beamWidth: Long, + topPaths: Long, + mergeRepeated: Boolean? = null + ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( + inputs, + sequenceLength, + beamWidth, + topPaths, + *listOfNotNull( + mergeRepeated?.let{ org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } + ).toTypedArray() + ) + + /** + * Performs greedy decoding on the logits given in inputs. + * A note about the attribute merge_repeated: if enabled, when + * consecutive logits' maximum indices are the same, only the first of + * these is emitted. Labeling the blank '*', the sequence "A B B * B B" + * becomes "A B B" if merge_repeated = True and "A B B B B" if + * merge_repeated = False. + * + * Regardless of the value of merge_repeated, if the maximum index of a given + * time and batch corresponds to the blank, index `(num_classes - 1)`, no new + * element is emitted. + * + * @param data type for `log_probability` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size `(batch_size)`. + * @param options carries optional attribute values + * @param data type for `CTCGreedyDecoder` output and operands + * @return a new instance of CtcGreedyDecoder + * @see org.tensorflow.op.NnOps.ctcGreedyDecoder + * @param mergeRepeated Sets the mergeRepeated option. + * + * @param mergeRepeated If True, merge repeated classes in output. + * @return this Options instance. + * @param blankIndex Sets the blankIndex option. + * + * @param blankIndex the blankIndex option + * @return this Options instance. + */ + public fun ctcGreedyDecoder( + inputs: Operand, + sequenceLength: Operand, + mergeRepeated: Boolean? = null, + blankIndex: Long? = null + ): CtcGreedyDecoder = java.ctcGreedyDecoder( + inputs, + sequenceLength, + *listOfNotNull( + mergeRepeated?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) }, + blankIndex?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.blankIndex(it) } + ).toTypedArray() + ) + + /** + * Calculates the CTC Loss (log probability) for each batch entry. Also calculates + * the gradient. This class performs the softmax operation for you, so inputs + * should be e.g. linear projections of outputs by an LSTM. + * + * @param data type for `loss` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param labelsIndices The indices of a `SparseTensor`. + * `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for + * `(batch b, time t)`. + * @param labelsValues The values (labels) associated with the given batch and time. + * @param sequenceLength A vector containing sequence lengths (batch). + * @param options carries optional attribute values + * @param data type for `CTCLoss` output and operands + * @return a new instance of CtcLoss + * @see org.tensorflow.op.NnOps.ctcLoss + * @param preprocessCollapseRepeated Sets the preprocessCollapseRepeated option. + * + * @param preprocessCollapseRepeated Scalar, if true then repeated labels are + * collapsed prior to the CTC calculation. + * @return this Options instance. + * @param ctcMergeRepeated Sets the ctcMergeRepeated option. + * + * @param ctcMergeRepeated Scalar. If set to false, _during_ CTC calculation + * repeated non-blank labels will not be merged and are interpreted as + * individual labels. This is a simplified version of CTC. + * @return this Options instance. + * @param ignoreLongerOutputsThanInputs Sets the ignoreLongerOutputsThanInputs option. + * + * @param ignoreLongerOutputsThanInputs Scalar. If set to true, during CTC + * calculation, items that have longer output sequences than input sequences + * are skipped: they don't contribute to the loss term and have zero-gradient. + * @return this Options instance. + */ + public fun ctcLoss( + inputs: Operand, + labelsIndices: Operand, + labelsValues: Operand, + sequenceLength: Operand, + preprocessCollapseRepeated: Boolean? = null, + ctcMergeRepeated: Boolean? = null, + ignoreLongerOutputsThanInputs: Boolean? = null + ): CtcLoss = java.ctcLoss( + inputs, + labelsIndices, + labelsValues, + sequenceLength, + *listOfNotNull( + preprocessCollapseRepeated?.let{ org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) + }, + ctcMergeRepeated?.let{ org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, + ignoreLongerOutputsThanInputs?.let{ + org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) } + ).toTypedArray() + ) + + /** + * Converts CudnnRNN params from canonical form to usable form. It supports the projection in + * LSTM. + * Writes a set of weights into the opaque params buffer so they can be used in + * upcoming training or inferences. + * + * Note that the params buffer may not be compatible across different GPUs. So any + * save and restoration should be converted to and from the canonical weights and + * biases. + * + * num_layers: Specifies the number of layers in the RNN model. + * num_units: Specifies the size of the hidden state. + * input_size: Specifies the size of the input state. + * weights: the canonical form of weights that can be used for saving + * and restoration. They are more likely to be compatible across different + * generations. + * biases: the canonical form of biases that can be used for saving + * and restoration. They are more likely to be compatible across different + * generations. + * num_params_weights: number of weight parameter matrix for all layers. + * num_params_biases: number of bias parameter vector for all layers. + * rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicate whether there is a linear projection between the input and + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. + * dir = (direction == bidirectional) ? 2 : 1 + * dropout: dropout probability. When set to 0., dropout is disabled. + * seed: the 1st part of a seed to initialize dropout. + * seed2: the 2nd part of a seed to initialize dropout. + * num_proj: The output dimensionality for the projection matrices. If None or 0, + * no projection is performed. + * + * @param data type for `params` output + * @param numLayers The numLayers value + * @param numUnits The numUnits value + * @param inputSize The inputSize value + * @param weights The weights value + * @param biases The biases value + * @param options carries optional attribute values + * @param data type for `CudnnRNNCanonicalToParamsV2` output and operands + * @return a new instance of CudnnRNNCanonicalToParams + * @see org.tensorflow.op.NnOps.cudnnRNNCanonicalToParams + * @param rnnMode Sets the rnnMode option. + * + * @param rnnMode the rnnMode option + * @return this Options instance. + * @param inputMode Sets the inputMode option. + * + * @param inputMode the inputMode option + * @return this Options instance. + * @param direction Sets the direction option. + * + * @param direction the direction option + * @return this Options instance. + * @param dropout Sets the dropout option. + * + * @param dropout the dropout option + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 the seed2 option + * @return this Options instance. + * @param numProj Sets the numProj option. + * + * @param numProj the numProj option + * @return this Options instance. + */ + public fun cudnnRNNCanonicalToParams( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + weights: Iterable>, + biases: Iterable>, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( + numLayers, + numUnits, + inputSize, + weights, + biases, + *listOfNotNull( + rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } + ).toTypedArray() + ) + + /** + * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. + * Retrieves a set of weights from the opaque params buffer that can be saved and + * restored in a way compatible with future runs. + * + * Note that the params buffer may not be compatible across different GPUs. So any + * save and restoration should be converted to and from the canonical weights and + * biases. + * + * num_layers: Specifies the number of layers in the RNN model. + * num_units: Specifies the size of the hidden state. + * input_size: Specifies the size of the input state. + * num_params_weights: number of weight parameter matrix for all layers. + * num_params_biases: number of bias parameter vector for all layers. + * weights: the canonical form of weights that can be used for saving + * and restoration. They are more likely to be compatible across different + * generations. + * biases: the canonical form of biases that can be used for saving + * and restoration. They are more likely to be compatible across different + * generations. + * rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicate whether there is a linear projection between the input and + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. + * dir = (direction == bidirectional) ? 2 : 1 + * dropout: dropout probability. When set to 0., dropout is disabled. + * seed: the 1st part of a seed to initialize dropout. + * seed2: the 2nd part of a seed to initialize dropout. + * num_proj: The output dimensionality for the projection matrices. If None or 0, + * no projection is performed. + * + * @param data type for `weights` output + * @param numLayers The numLayers value + * @param numUnits The numUnits value + * @param inputSize The inputSize value + * @param params The params value + * @param numParamsWeights The value of the numParamsWeights attribute + * @param numParamsBiases The value of the numParamsBiases attribute + * @param options carries optional attribute values + * @param data type for `CudnnRNNParamsToCanonicalV2` output and operands + * @return a new instance of CudnnRNNParamsToCanonical + * @see org.tensorflow.op.NnOps.cudnnRNNParamsToCanonical + * @param rnnMode Sets the rnnMode option. + * + * @param rnnMode the rnnMode option + * @return this Options instance. + * @param inputMode Sets the inputMode option. + * + * @param inputMode the inputMode option + * @return this Options instance. + * @param direction Sets the direction option. + * + * @param direction the direction option + * @return this Options instance. + * @param dropout Sets the dropout option. + * + * @param dropout the dropout option + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 the seed2 option + * @return this Options instance. + * @param numProj Sets the numProj option. + * + * @param numProj the numProj option + * @return this Options instance. + */ + public fun cudnnRNNParamsToCanonical( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + params: Operand, + numParamsWeights: Long, + numParamsBiases: Long, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( + numLayers, + numUnits, + inputSize, + params, + numParamsWeights, + numParamsBiases, + *listOfNotNull( + rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } + ).toTypedArray() + ) + + /** + * Computes size of weights that can be used by a Cudnn RNN model. + * Return the params size that can be used by the Cudnn RNN model. Subsequent + * weight allocation and initialization should use this size. + * + * num_layers: Specifies the number of layers in the RNN model. + * num_units: Specifies the size of the hidden state. + * input_size: Specifies the size of the input state. + * rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicate whether there is a linear projection between the input and + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. + * dir = (direction == bidirectional) ? 2 : 1 + * dropout: dropout probability. When set to 0., dropout is disabled. + * seed: the 1st part of a seed to initialize dropout. + * seed2: the 2nd part of a seed to initialize dropout. + * params_size: The size of the params buffer that should be allocated and + * initialized for this RNN model. Note that this params buffer may not be + * compatible across GPUs. Please use CudnnRNNParamsWeights and + * CudnnRNNParamsBiases to save and restore them in a way that is compatible + * across different runs. + * + * @param data type for `params_size` output + * @param numLayers The numLayers value + * @param numUnits The numUnits value + * @param inputSize The inputSize value + * @param T The value of the T attribute + * @param S The value of the S attribute + * @param options carries optional attribute values + * @param data type for `CudnnRNNParamsSize` output and operands + * @param data type for `CudnnRNNParamsSize` output and operands + * @return a new instance of CudnnRnnParamsSize + * @see org.tensorflow.op.NnOps.cudnnRnnParamsSize + * @param rnnMode Sets the rnnMode option. + * + * @param rnnMode the rnnMode option + * @return this Options instance. + * @param inputMode Sets the inputMode option. + * + * @param inputMode the inputMode option + * @return this Options instance. + * @param direction Sets the direction option. + * + * @param direction the direction option + * @return this Options instance. + * @param dropout Sets the dropout option. + * + * @param dropout the dropout option + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 the seed2 option + * @return this Options instance. + * @param numProj Sets the numProj option. + * + * @param numProj the numProj option + * @return this Options instance. + */ + public fun cudnnRnnParamsSize( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + T_: Class, + S: Class, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( + numLayers, + numUnits, + inputSize, + T_, + S, + *listOfNotNull( + rnnMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } + ).toTypedArray() + ) + + /** + * Returns the dimension index in the destination data format given the one in + * the source data format. + * + * @param data type for `y` output + * @param x A Tensor with each element as a dimension index in source data format. + * Must be in the range [-4, 4). + * @param options carries optional attribute values + * @param data type for `DataFormatDimMap` output and operands + * @return a new instance of DataFormatDimMap + * @see org.tensorflow.op.NnOps.dataFormatDimMap + * @param srcFormat Sets the srcFormat option. + * + * @param srcFormat source data format. + * @return this Options instance. + * @param dstFormat Sets the dstFormat option. + * + * @param dstFormat destination data format. + * @return this Options instance. + */ + public fun dataFormatDimMap( + x: Operand, + srcFormat: String? = null, + dstFormat: String? = null + ): DataFormatDimMap = java.dataFormatDimMap( + x, + *listOfNotNull( + srcFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, + dstFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } + ).toTypedArray() + ) + + /** + * Permute input tensor from `src_format` to `dst_format`. + * Input tensor must be a vector of size 4, or a 4x2 tensor. + * + * For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs: + * ``` + * [1, 2, 3, 4] + * + * ``` + * + * and + * ``` + * [[1, 2, 3, 4], + * [5, 6, 7, 8]] + * + * ``` + * + * , the outputs will be (respectively): + * ``` + * [1, 4, 2, 3] + * + * ``` + * + * and + * ``` + * [[1, 4, 2, 3], + * [5, 8, 6, 7]] + * + * ``` + * + * @param data type for `y` output + * @param x Vector of size 4 or Tensor of shape (4, 2) in source data format. + * @param options carries optional attribute values + * @param data type for `DataFormatVecPermute` output and operands + * @return a new instance of DataFormatVecPermute + * @see org.tensorflow.op.NnOps.dataFormatVecPermute + * @param srcFormat Sets the srcFormat option. + * + * @param srcFormat source data format. + * @return this Options instance. + * @param dstFormat Sets the dstFormat option. + * + * @param dstFormat destination data format. + * @return this Options instance. + */ + public fun dataFormatVecPermute( + x: Operand, + srcFormat: String? = null, + dstFormat: String? = null + ): DataFormatVecPermute = java.dataFormatVecPermute( + x, + *listOfNotNull( + srcFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, + dstFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } + ).toTypedArray() + ) + + /** + * DepthToSpace for tensors of type T. + * Rearranges data from depth into blocks of spatial data. + * This is the reverse transformation of SpaceToDepth. More specifically, + * this op outputs a copy of the input tensor where values from the `depth` + * dimension are moved in spatial blocks to the `height` and `width` dimensions. + * The attr `block_size` indicates the input block size and how the data is moved. + *
    + *
  • Chunks of data of size `block_size * block_size` from depth are rearranged + * into non-overlapping blocks of size `block_size x block_size`
  • + *
  • The width the output tensor is `input_depth * block_size`, whereas the + * height is `input_height * block_size`.
  • + *
  • The Y, X coordinates within each block of the output image are determined + * by the high order component of the input channel index.
  • + *
  • The depth of the input tensor must be divisible by + * `block_size * block_size`.
  • + *
+ * + * The `data_format` attr specifies the layout of the input and output tensors + * with the following options: + * "NHWC": `[ batch, height, width, channels ]` + * "NCHW": `[ batch, channels, height, width ]` + * "NCHW_VECT_C": + * `qint8 [ batch, channels / 4, height, width, 4 ]` + * + * It is useful to consider the operation as transforming a 6-D Tensor. + * e.g. for data_format = NHWC, + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + * within the input image, bX, bY means coordinates + * within the output block, oC means output channels). + * The output would be the input transposed to the following layout: + * n,iY,bY,iX,bX,oC + * + * This operation is useful for resizing the activations between convolutions + * (but keeping all data), e.g. instead of pooling. It is also useful for training + * purely convolutional models. + * + * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" + * and + * block_size = 2: + * ``` + * x = [[[[1, 2, 3, 4]]]] + * + * + * ``` + * + * This operation will output a tensor of shape `[1, 2, 2, 1]`: + * ``` + * [[[[1], [2]], + * [[3], [4]]]] + * + * ``` + * + * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + * the corresponding output will have 2x2 elements and will have a depth of + * 1 channel (1 = `4 / (block_size * block_size)`). + * The output element shape is `[2, 2, 1]`. + * + * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. + * ``` + * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + * + * ``` + * + * This operation, for block size of 2, will return the following tensor of shape + * `[1, 2, 2, 3]` + * ``` + * [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * + * + * ``` + * + * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: + * ``` + * x = [[[[1, 2, 3, 4], + * [5, 6, 7, 8]], + * [[9, 10, 11, 12], + * [13, 14, 15, 16]]]] + * + * ``` + * + * the operator will return the following tensor of shape `[1 4 4 1]`: + * ``` + * x = [[[ [1], [2], [5], [6]], + * [ [3], [4], [7], [8]], + * [ [9], [10], [13], [14]], + * [ [11], [12], [15], [16]]]] + * + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param blockSize The size of the spatial block, same as in Space2Depth. + * @param options carries optional attribute values + * @param data type for `DepthToSpace` output and operands + * @return a new instance of DepthToSpace + * @see org.tensorflow.op.NnOps.depthToSpace + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat the dataFormat option + * @return this Options instance. + */ + public fun depthToSpace( + input: Operand, + blockSize: Long, + dataFormat: String? = null + ): DepthToSpace = java.depthToSpace( + input, + blockSize, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. + * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + * and a filter / kernel tensor of shape + * `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + * `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies + * a different filter to each input channel (expanding from 1 channel to + * `channel_multiplier` channels for each), then concatenates the results + * together. Thus, the output has `in_channels * channel_multiplier` channels. + * ``` + * for k in 0..in_channels-1 + * for q in 0..channel_multiplier-1 + * output[b, i, j, k * channel_multiplier + q] = + * sum_{di, dj + * ``` input[b, strides[1] * i + di, strides[2] * j + dj, k] * + * filter[di, dj, k, q] + * } + * + * Must have `strides[0] = strides[3] = 1`. For the most common case of the same + * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + * + * @param data type for `output` output + * @param input The input value + * @param filter The filter value + * @param strides 1-D of length 4. The stride of the sliding window for each dimension + * of `input`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `DepthwiseConv2dNative` output and operands + * @return a new instance of DepthwiseConv2dNative + * @see org.tensorflow.op.NnOps.depthwiseConv2dNative + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings the explicitPaddings option + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + * @return this Options instance. + */ + public fun depthwiseConv2dNative( + input: Operand, + filter: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNative = java.depthwiseConv2dNative( + input, + filter, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } + ).toTypedArray() + ) + + /** + * Computes the gradients of depthwise convolution with respect to the filter. + * + * @param data type for `output` output + * @param input 4-D with shape based on `data_format`. For example, if + * `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, in_width, + * in_channels]` tensor. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 4-D + * `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. + * @param outBackprop 4-D with shape based on `data_format`. + * For example, if `data_format` is 'NHWC' then + * out_backprop shape is `[batch, out_height, out_width, out_channels]`. + * Gradients w.r.t. the output of the convolution. + * @param strides The stride of the sliding window for each dimension of the input + * of the convolution. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `DepthwiseConv2dNativeBackpropFilter` output and operands + * @return a new instance of DepthwiseConv2dNativeBackpropFilter + * @see org.tensorflow.op.NnOps.depthwiseConv2dNativeBackpropFilter + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings the explicitPaddings option + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + * @return this Options instance. + */ + public fun depthwiseConv2dNativeBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let{ + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + /** + * Computes the gradients of depthwise convolution with respect to the input. + * + * @param data type for `output` output + * @param inputSizes An integer vector representing the shape of `input`, based + * on `data_format`. For example, if `data_format` is 'NHWC' then + * `input` is a 4-D `[batch, height, width, channels]` tensor. + * @param filter 4-D with shape + * `[filter_height, filter_width, in_channels, depthwise_multiplier]`. + * @param outBackprop 4-D with shape based on `data_format`. + * For example, if `data_format` is 'NHWC' then + * out_backprop shape is `[batch, out_height, out_width, out_channels]`. + * Gradients w.r.t. the output of the convolution. + * @param strides The stride of the sliding window for each dimension of the input + * of the convolution. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `DepthwiseConv2dNativeBackpropInput` output and operands + * @return a new instance of DepthwiseConv2dNativeBackpropInput + * @see org.tensorflow.op.NnOps.depthwiseConv2dNativeBackpropInput + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings the explicitPaddings option + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + * @return this Options instance. + */ + public fun depthwiseConv2dNativeBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let{ + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } + ).toTypedArray() + ) + + /** + * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. + * The `input` tensor has shape `[batch, in_height, in_width, depth]` and the + * `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each + * input channel is processed independently of the others with its own structuring + * function. The `output` tensor has shape + * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + * tensor depend on the `padding` algorithm. We currently only support the default + * "NHWC" `data_format`. + * + * In detail, the grayscale morphological 2-D dilation is the max-sum correlation + * (for consistency with `conv2d`, we use unmirrored filters): + * ``` + * output[b, y, x, c] = + * max_{dy, dx + * ``` input[b, + * strides[1] * y + rates[1] * dy, + * strides[2] * x + rates[2] * dx, + * c] + + * filter[dy, dx, c] + * } + * + * Max-pooling is a special case when the filter has size equal to the pooling + * kernel size and contains all zeros. + * + * Note on duality: The dilation of `input` by the `filter` is equal to the + * negation of the erosion of `-input` by the reflected `filter`. + * + * @param data type for `output` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param strides The stride of the sliding window for each dimension of the input + * tensor. Must be: `[1, stride_height, stride_width, 1]`. + * @param rates The input stride for atrous morphological dilation. Must be: + * `[1, rate_height, rate_width, 1]`. + * @param padding The type of padding algorithm to use. + * @param data type for `Dilation2D` output and operands + * @return a new instance of Dilation2d + * @see org.tensorflow.op.NnOps.dilation2d + */ + public fun dilation2d( + input: Operand, + filter: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2d = java.dilation2d( + input, + filter, + strides, + rates, + padding + ) + + /** + * Computes the gradient of morphological 2-D dilation with respect to the filter. + * + * @param data type for `filter_backprop` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. + * @param strides 1-D of length 4. The stride of the sliding window for each dimension of + * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + * @param rates 1-D of length 4. The input stride for atrous morphological dilation. + * Must be: `[1, rate_height, rate_width, 1]`. + * @param padding The type of padding algorithm to use. + * @param data type for `Dilation2DBackpropFilter` output and operands + * @return a new instance of Dilation2dBackpropFilter + * @see org.tensorflow.op.NnOps.dilation2dBackpropFilter + */ + public fun dilation2dBackpropFilter( + input: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( + input, + filter, + outBackprop, + strides, + rates, + padding + ) + + /** + * Computes the gradient of morphological 2-D dilation with respect to the input. + * + * @param data type for `in_backprop` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. + * @param strides 1-D of length 4. The stride of the sliding window for each dimension of + * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + * @param rates 1-D of length 4. The input stride for atrous morphological dilation. + * Must be: `[1, rate_height, rate_width, 1]`. + * @param padding The type of padding algorithm to use. + * @param data type for `Dilation2DBackpropInput` output and operands + * @return a new instance of Dilation2dBackpropInput + * @see org.tensorflow.op.NnOps.dilation2dBackpropInput + */ + public fun dilation2dBackpropInput( + input: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2dBackpropInput = java.dilation2dBackpropInput( + input, + filter, + outBackprop, + strides, + rates, + padding + ) + + /** + * Computes the exponential linear function. + * The ELU function is defined as: + *
    + *
  • $ e ^ x - 1 $ if $ x < 0 $
  • + *
  • $ x $ if $ x >= 0 $
  • + *
+ * + * Examples: + * ``` + * + * tf.nn.elu(1.0) + * + * tf.nn.elu(0.0) + * + * tf.nn.elu(-1000.0) + * + * ``` + * + * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * ](http://arxiv.org/abs/1511.07289) + * + * @param data type for `activations` output + * @param features The features value + * @param data type for `Elu` output and operands + * @return a new instance of Elu + * @see org.tensorflow.op.NnOps.elu + */ + public fun elu(features: Operand): Elu = java.elu( + features + ) + + /** + * Generates labels for candidate sampling with a learned unigram distribution. + * A unigram sampler could use a fixed unigram distribution read from a + * file or passed in as an in-memory array instead of building up the distribution + * from data on the fly. There is also an option to skew the distribution by + * applying a distortion power to the weights. + * + * The vocabulary file should be in CSV-like format, with the last field + * being the weight associated with the word. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attribute values + * @return a new instance of FixedUnigramCandidateSampler + * @see org.tensorflow.op.NnOps.fixedUnigramCandidateSampler + * @param vocabFile Sets the vocabFile option. + * + * @param vocabFile Each valid line in this file (which should have a CSV-like format) + * corresponds to a valid word ID. IDs are in sequential order, starting from + * num_reserved_ids. The last entry in each line is expected to be a value + * corresponding to the count or relative probability. Exactly one of vocab_file + * and unigrams needs to be passed to this op. + * @return this Options instance. + * @param distortion Sets the distortion option. + * + * @param distortion The distortion is used to skew the unigram probability distribution. + * Each weight is first raised to the distortion's power before adding to the + * internal unigram distribution. As a result, distortion = 1.0 gives regular + * unigram sampling (as defined by the vocab file), and distortion = 0.0 gives + * a uniform distribution. + * @return this Options instance. + * @param numReservedIds Sets the numReservedIds option. + * + * @param numReservedIds Optionally some reserved IDs can be added in the range [0, + * ..., num_reserved_ids) by the users. One use case is that a special unknown + * word token is used as ID 0. These IDs will have a sampling probability of 0. + * @return this Options instance. + * @param numShards Sets the numShards option. + * + * @param numShards A sampler can be used to sample from a subset of the original range + * in order to speed up the whole computation through parallelism. This parameter + * (together with 'shard') indicates the number of partitions that are being + * used in the overall computation. + * @return this Options instance. + * @param shard Sets the shard option. + * + * @param shard A sampler can be used to sample from a subset of the original range + * in order to speed up the whole computation through parallelism. This parameter + * (together with 'num_shards') indicates the particular partition number of a + * sampler op, when partitioning is being used. + * @return this Options instance. + * @param unigrams Sets the unigrams option. + * + * @param unigrams A list of unigram counts or probabilities, one per ID in sequential + * order. Exactly one of vocab_file and unigrams should be passed to this op. + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun fixedUnigramCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + vocabFile: String? = null, + distortion: Float? = null, + numReservedIds: Long? = null, + numShards: Long? = null, + shard: Long? = null, + unigrams: List? = null, + seed: Long? = null, + seed2: Long? = null + ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + vocabFile?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, + distortion?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, + numReservedIds?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, + numShards?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, + shard?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, + unigrams?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, + seed?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + /** + * Performs fractional average pooling on the input. + * Fractional average pooling is similar to Fractional max pooling in the pooling + * region generation step. The only difference is that after pooling regions are + * generated, a mean operation is performed instead of a max operation in each + * pooling region. + * + * @param data type for `output` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of `value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid + * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + * must be 1.0 because we don't allow pooling on batch and channels + * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + * respectively. + * @param options carries optional attribute values + * @param data type for `FractionalAvgPool` output and operands + * @return a new instance of FractionalAvgPool + * @see org.tensorflow.op.NnOps.fractionalAvgPool + * @param pseudoRandom Sets the pseudoRandom option. + * + * @param pseudoRandom When set to True, generates the pooling sequence in a + * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + * difference between pseudorandom and random. + * @return this Options instance. + * @param overlapping Sets the overlapping option. + * + * @param overlapping When set to True, it means when pooling, the values at the boundary + * of adjacent pooling cells are used by both cells. For example: + * + * `index 0 1 2 3 4` + * + * `value 20 5 16 3 7` + * + * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + * The result would be [41/3, 26/3] for fractional avg pooling. + * @return this Options instance. + * @param deterministic Sets the deterministic option. + * + * @param deterministic When set to True, a fixed pooling region will be used when + * iterating over a FractionalAvgPool node in the computation graph. Mainly used + * in unit test to make FractionalAvgPool deterministic. + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun fractionalAvgPool( + value: Operand, + poolingRatio: List, + pseudoRandom: Boolean? = null, + overlapping: Boolean? = null, + deterministic: Boolean? = null, + seed: Long? = null, + seed2: Long? = null + ): FractionalAvgPool = java.fractionalAvgPool( + value, + poolingRatio, + *listOfNotNull( + pseudoRandom?.let{ org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, + overlapping?.let{ org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, + deterministic?.let{ org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, + seed?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } + ).toTypedArray() + ) + + /** + * Performs fractional max pooling on the input. + * Fractional max pooling is slightly different than regular max pooling. In + * regular max pooling, you downsize an input set by taking the maximum value of + * smaller N x N subsections of the set (often 2x2), and try to reduce the set by + * a factor of N, where N is an integer. Fractional max pooling, as you might + * expect from the word "fractional", means that the overall reduction ratio N + * does not have to be an integer. + * + * The sizes of the pooling regions are generated randomly but are fairly uniform. + * For example, let's look at the height dimension, and the constraints on the + * list of rows that will be pool boundaries. + * + * First we define the following: + *
    + *
  1. input_row_length : the number of rows from the input set
  2. + *
  3. output_row_length : which will be smaller than the input
  4. + *
  5. alpha = input_row_length / output_row_length : our reduction ratio
  6. + *
  7. K = floor(alpha)
  8. + *
  9. row_pooling_sequence : this is the result list of pool boundary rows
  10. + *
+ * + * Then, row_pooling_sequence should satisfy: + *
    + *
  1. a[0] = 0 : the first value of the sequence is 0
  2. + *
  3. a[end] = input_row_length : the last value of the sequence is the size
  4. + *
  5. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
  6. + *
  7. length(row_pooling_sequence) = output_row_length+1
  8. + *
+ * + * For more details on fractional max pooling, see this paper:[Benjamin Graham, Fractional + * Max-Pooling](http://arxiv.org/abs/1412.6071) + * + * @param data type for `output` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of `value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid + * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + * must be 1.0 because we don't allow pooling on batch and channels + * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + * respectively. + * @param options carries optional attribute values + * @param data type for `FractionalMaxPool` output and operands + * @return a new instance of FractionalMaxPool + * @see org.tensorflow.op.NnOps.fractionalMaxPool + * @param pseudoRandom Sets the pseudoRandom option. + * + * @param pseudoRandom When set to True, generates the pooling sequence in a + * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + * difference between pseudorandom and random. + * @return this Options instance. + * @param overlapping Sets the overlapping option. + * + * @param overlapping When set to True, it means when pooling, the values at the boundary + * of adjacent pooling cells are used by both cells. For example: + * + * `index 0 1 2 3 4` + * + * `value 20 5 16 3 7` + * + * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + * The result would be [20, 16] for fractional max pooling. + * @return this Options instance. + * @param deterministic Sets the deterministic option. + * + * @param deterministic When set to True, a fixed pooling region will be used when + * iterating over a FractionalMaxPool node in the computation graph. Mainly used + * in unit test to make FractionalMaxPool deterministic. + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun fractionalMaxPool( + value: Operand, + poolingRatio: List, + pseudoRandom: Boolean? = null, + overlapping: Boolean? = null, + deterministic: Boolean? = null, + seed: Long? = null, + seed2: Long? = null + ): FractionalMaxPool = java.fractionalMaxPool( + value, + poolingRatio, + *listOfNotNull( + pseudoRandom?.let{ org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, + overlapping?.let{ org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, + deterministic?.let{ org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, + seed?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } + ).toTypedArray() + ) + + /** + * Batch normalization. + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + * The size of 1D Tensors matches the dimension C of the 4D Tensors. + * + * @param data type for `y` output + * @param data type for `batch_mean` output + * @param x A 4D Tensor for input data. + * @param scale A 1D Tensor for scaling factor, to scale the normalized x. + * @param offset A 1D Tensor for offset, to shift to the normalized x. + * @param mean A 1D Tensor for population mean. Used for inference only; + * must be empty for training. + * @param variance A 1D Tensor for population variance. Used for inference only; + * must be empty for training. + * @param options carries optional attribute values + * @param data type for `FusedBatchNormV3` output and operands + * @param data type for `FusedBatchNormV3` output and operands + * @return a new instance of FusedBatchNorm + * @see org.tensorflow.op.NnOps.fusedBatchNorm + * @param epsilon Sets the epsilon option. + * + * @param epsilon A small float number added to the variance of x. + * @return this Options instance. + * @param exponentialAvgFactor Sets the exponentialAvgFactor option. + * + * @param exponentialAvgFactor the exponentialAvgFactor option + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format for x and y. Either "NHWC" (default) or + * "NCHW". + * @return this Options instance. + * @param isTraining Sets the isTraining option. + * + * @param isTraining A bool value to indicate the operation is for training (default) + * or inference. + * @return this Options instance. + */ + public fun fusedBatchNorm( + x: Operand, + scale: Operand, + offset: Operand, + mean: Operand, + variance: Operand, + epsilon: Float? = null, + exponentialAvgFactor: Float? = null, + dataFormat: String? = null, + isTraining: Boolean? = null + ): FusedBatchNorm = java.fusedBatchNorm( + x, + scale, + offset, + mean, + variance, + *listOfNotNull( + epsilon?.let{ org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, + exponentialAvgFactor?.let{ org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, + dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, + isTraining?.let{ org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } + ).toTypedArray() + ) + + /** + * Gradient for batch normalization. + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + * The size of 1D Tensors matches the dimension C of the 4D Tensors. + * + * @param data type for `x_backprop` output + * @param data type for `scale_backprop` output + * @param yBackprop A 4D Tensor for the gradient with respect to y. + * @param x A 4D Tensor for input data. + * @param scale A 1D Tensor for scaling factor, to scale the normalized x. + * @param reserveSpace1 When is_training is True, a 1D Tensor for the computed batch + * mean to be reused in gradient computation. When is_training is + * False, a 1D Tensor for the population mean to be reused in both + * 1st and 2nd order gradient computation. + * @param reserveSpace2 When is_training is True, a 1D Tensor for the computed batch + * variance (inverted variance in the cuDNN case) to be reused in + * gradient computation. When is_training is False, a 1D Tensor + * for the population variance to be reused in both 1st and 2nd + * order gradient computation. + * @param reserveSpace3 When is_training is True, a 1D Tensor for some intermediate results to + * be reused + * in gradient computation. When is_training is False, a dummy empty Tensor will be + * created. + * @param options carries optional attribute values + * @param data type for `FusedBatchNormGradV3` output and operands + * @param data type for `FusedBatchNormGradV3` output and operands + * @return a new instance of FusedBatchNormGrad + * @see org.tensorflow.op.NnOps.fusedBatchNormGrad + * @param epsilon Sets the epsilon option. + * + * @param epsilon A small float number added to the variance of x. + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format for y_backprop, x, x_backprop. + * Either "NHWC" (default) or "NCHW". + * @return this Options instance. + * @param isTraining Sets the isTraining option. + * + * @param isTraining A bool value to indicate the operation is for training (default) + * or inference. + * @return this Options instance. + */ + public fun fusedBatchNormGrad( + yBackprop: Operand, + x: Operand, + scale: Operand, + reserveSpace1: Operand, + reserveSpace2: Operand, + reserveSpace3: Operand, + epsilon: Float? = null, + dataFormat: String? = null, + isTraining: Boolean? = null + ): FusedBatchNormGrad = java.fusedBatchNormGrad( + yBackprop, + x, + scale, + reserveSpace1, + reserveSpace2, + reserveSpace3, + *listOfNotNull( + epsilon?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, + dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, + isTraining?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } + ).toTypedArray() + ) + + /** + * Performs a padding as a preprocess during a convolution. + * Similar to FusedResizeAndPadConv2d, this op allows for an optimized + * implementation where the spatial padding transformation stage is fused with the + * im2col lookup, but in this case without the bilinear filtering required for + * resizing. Fusing the padding prevents the need to write out the intermediate + * results as whole tensors, reducing memory pressure, and we can get some latency + * gains by merging the transformation calculations. + * The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' + * order is used instead. + * Internally this op uses a single per-graph scratch buffer, which means that it + * will block if multiple versions are being run in parallel. This is because this + * operator is primarily an optimization to minimize memory usage. + * + * @param data type for `output` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of `input`. + * @param filter 4-D with shape + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param mode The value of the mode attribute + * @param strides 1-D of length 4. The stride of the sliding window for each dimension + * of `input`. Must be in the same order as the dimension specified with format. + * @param padding The type of padding algorithm to use. + * @param data type for `FusedPadConv2D` output and operands + * @return a new instance of FusedPadConv2d + * @see org.tensorflow.op.NnOps.fusedPadConv2d + */ + public fun fusedPadConv2d( + input: Operand, + paddings: Operand, + filter: Operand, + mode: String, + strides: List, + padding: String + ): FusedPadConv2d = java.fusedPadConv2d( + input, + paddings, + filter, + mode, + strides, + padding + ) + + /** + * Performs a resize and padding as a preprocess during a convolution. + * It's often possible to do spatial transformations more efficiently as part of + * the packing stage of a convolution, so this op allows for an optimized + * implementation where these stages are fused together. This prevents the need to + * write out the intermediate results as whole tensors, reducing memory pressure, + * and we can get some latency gains by merging the transformation calculations. + * The data_format attribute for Conv2D isn't supported by this op, and defaults to + * 'NHWC' order. + * Internally this op uses a single per-graph scratch buffer, which means that it + * will block if multiple versions are being run in parallel. This is because this + * operator is primarily an optimization to minimize memory usage. + * + * @param data type for `output` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param sizeOutput A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of `input`. + * @param filter 4-D with shape + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param mode The value of the mode attribute + * @param strides 1-D of length 4. The stride of the sliding window for each dimension + * of `input`. Must be in the same order as the dimension specified with format. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `FusedResizeAndPadConv2D` output and operands + * @return a new instance of FusedResizeAndPadConv2d + * @see org.tensorflow.op.NnOps.fusedResizeAndPadConv2d + * @param resizeAlignCorners Sets the resizeAlignCorners option. + * + * @param resizeAlignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @return this Options instance. + */ + public fun fusedResizeAndPadConv2d( + input: Operand, + sizeOutput: Operand, + paddings: Operand, + filter: Operand, + mode: String, + strides: List, + padding: String, + resizeAlignCorners: Boolean? = null + ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( + input, + sizeOutput, + paddings, + filter, + mode, + strides, + padding, + *listOfNotNull( + resizeAlignCorners?.let{ org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } + ).toTypedArray() + ) + + /** + * Says whether the targets are in the top `K` predictions. + * This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + * prediction for the target class is among the top `k` predictions among + * all predictions for example `i`. Note that the behavior of `InTopK` differs + * from the `TopK` op in its handling of ties; if multiple classes have the + * same prediction value and straddle the top-`k` boundary, all of those + * classes are considered to be in the top `k`. + * + * More formally, let + * + * `\(predictions_i\)` be the predictions for all classes for example `i`, + * `\(targets_i\)` be the target class for example `i`, + * `\(out_i\)` be the output for example `i`, + * + * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + * + * @param predictions A `batch_size` x `classes` tensor. + * @param targets A `batch_size` vector of class ids. + * @param k Number of top elements to look at for computing precision. + * @param data type for `InTopKV2` output and operands + * @return a new instance of InTopK + * @see org.tensorflow.op.NnOps.inTopK + */ + public fun inTopK( + predictions: Operand, + targets: Operand, + k: Operand + ): InTopK = java.inTopK( + predictions, + targets, + k + ) + + /** + * L2 Loss. + * Computes half the L2 norm of a tensor without the `sqrt`: + * ``` + * output = sum(t ** 2) / 2 + * + * ``` + * + * @param data type for `output` output + * @param t Typically 2-D, but may have any dimensions. + * @param data type for `L2Loss` output and operands + * @return a new instance of L2Loss + * @see org.tensorflow.op.NnOps.l2Loss + */ + public fun l2Loss(t: Operand): L2Loss = java.l2Loss( + t + ) + + /** + * Computes rectified linear: `max(features, features * alpha)`. + * + * @param data type for `activations` output + * @param features The features value + * @param options carries optional attribute values + * @param data type for `LeakyRelu` output and operands + * @return a new instance of LeakyRelu + * @see org.tensorflow.op.NnOps.leakyRelu + * @param alpha Sets the alpha option. + * + * @param alpha the alpha option + * @return this Options instance. + */ + public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = + java.leakyRelu( + features, + *listOfNotNull( + alpha?.let{ org.tensorflow.op.nn.LeakyRelu.alpha(it) } + ).toTypedArray() + ) + + /** + * Generates labels for candidate sampling with a learned unigram distribution. + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attribute values + * @return a new instance of LearnedUnigramCandidateSampler + * @see org.tensorflow.op.NnOps.learnedUnigramCandidateSampler + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun learnedUnigramCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + /** + * Local Response Normalization. + * The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last + * dimension), and each vector is normalized independently. Within a given vector, + * each component is divided by the weighted, squared sum of inputs within + * `depth_radius`. In detail, + * ``` + * sqr_sum[a, b, c, d] = + * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + * output = input / (bias + alpha * sqr_sum) ** beta + * + * ``` + * + * For details, see [Krizhevsky et al., ImageNet classification with deep + * convolutional neural networks (NIPS + * 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) + * . + * + * @param data type for `output` output + * @param input 4-D. + * @param options carries optional attribute values + * @param data type for `LRN` output and operands + * @return a new instance of LocalResponseNormalization + * @see org.tensorflow.op.NnOps.localResponseNormalization + * @param depthRadius Sets the depthRadius option. + * + * @param depthRadius 0-D. Half-width of the 1-D normalization window. + * @return this Options instance. + * @param bias Sets the bias option. + * + * @param bias An offset (usually positive to avoid dividing by 0). + * @return this Options instance. + * @param alpha Sets the alpha option. + * + * @param alpha A scale factor, usually positive. + * @return this Options instance. + * @param beta Sets the beta option. + * + * @param beta An exponent. + * @return this Options instance. + */ + public fun localResponseNormalization( + input: Operand, + depthRadius: Long? = null, + bias: Float? = null, + alpha: Float? = null, + beta: Float? = null + ): LocalResponseNormalization = java.localResponseNormalization( + input, + *listOfNotNull( + depthRadius?.let{ org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, + bias?.let{ org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, + alpha?.let{ org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, + beta?.let{ org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } + ).toTypedArray() + ) + + /** + * Computes log softmax activations. + * For each batch `i` and class `j` we have + * ``` + * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) + * + * ``` + * + * @param data type for `logsoftmax` output + * @param logits 2-D with shape `[batch_size, num_classes]`. + * @param data type for `LogSoftmax` output and operands + * @return a new instance of LogSoftmax + * @see org.tensorflow.op.NnOps.logSoftmax + */ + public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( + logits + ) + + /** + * Performs max pooling on the input. + * + * @param data type for `output` output + * @param input 4-D input to pool over. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPoolV2` output and operands + * @return a new instance of MaxPool + * @see org.tensorflow.op.NnOps.maxPool + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. + */ + public fun maxPool( + input: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPool = java.maxPool( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPool.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Performs 3D max pooling on the input. + * + * @param data type for `output` output + * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPool3D` output and operands + * @return a new instance of MaxPool3d + * @see org.tensorflow.op.NnOps.maxPool3d + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + */ + public fun maxPool3d( + input: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3d = java.maxPool3d( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes gradients of 3D max pooling function. + * + * @param data type for `output` output + * @param origInput The original input tensor. + * @param origOutput The original output tensor. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPool3DGrad` output and operands + * @param data type for `MaxPool3DGrad` output and operands + * @return a new instance of MaxPool3dGrad + * @see org.tensorflow.op.NnOps.maxPool3dGrad + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + */ + public fun maxPool3dGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3dGrad = java.maxPool3dGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes second-order gradients of the maxpooling function. + * + * @param data type for `output` output + * @param origInput The original input tensor. + * @param origOutput The original output tensor. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPool3DGradGrad` output and operands + * @return a new instance of MaxPool3dGradGrad + * @see org.tensorflow.op.NnOps.maxPool3dGradGrad + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + */ + public fun maxPool3dGradGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3dGradGrad = java.maxPool3dGradGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes gradients of the maxpooling function. + * + * @param data type for `output` output + * @param origInput The original input tensor. + * @param origOutput The original output tensor. + * @param grad 4-D. Gradients w.r.t. the output of `max_pool`. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPoolGradV2` output and operands + * @return a new instance of MaxPoolGrad + * @see org.tensorflow.op.NnOps.maxPoolGrad + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. + */ + public fun maxPoolGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPoolGrad = java.maxPoolGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes second-order gradients of the maxpooling function. + * + * @param data type for `output` output + * @param origInput The original input tensor. + * @param origOutput The original output tensor. + * @param grad 4-D. Gradients of gradients w.r.t. the input of `max_pool`. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPoolGradGradV2` output and operands + * @return a new instance of MaxPoolGradGrad + * @see org.tensorflow.op.NnOps.maxPoolGradGrad + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. + */ + public fun maxPoolGradGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPoolGradGrad = java.maxPoolGradGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes second-order gradients of the maxpooling function. + * + * @param data type for `output` output + * @param input The original input. + * @param grad 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + * input of `max_pool`. + * @param argmax The indices of the maximum values chosen for each output of `max_pool`. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPoolGradGradWithArgmax` output and operands + * @return a new instance of MaxPoolGradGradWithArgmax + * @see org.tensorflow.op.NnOps.maxPoolGradGradWithArgmax + * @param includeBatchInIndex Sets the includeBatchInIndex option. + * + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + * @return this Options instance. + */ + public fun maxPoolGradGradWithArgmax( + input: Operand, + grad: Operand, + argmax: Operand, + ksize: List, + strides: List, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( + input, + grad, + argmax, + ksize, + strides, + padding, + *listOfNotNull( + includeBatchInIndex?.let{ + org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) } + ).toTypedArray() + ) + + /** + * Performs max pooling on the input and outputs both max values and indices. + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, + * even if padding is involved and the mathematically correct answer is outside + * (either negative or too large). This is a bug, but fixing it is difficult to do + * in a safe backwards compatible way, especially due to flattening. + * + * @param data type for `output` output + * @param data type for `argmax` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPoolWithArgmax` output and operands + * @return a new instance of MaxPoolWithArgmax, with default output types + * @see org.tensorflow.op.NnOps.maxPoolWithArgmax + */ + public fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + padding: String, + options: Array + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + input, + ksize, + strides, + padding, + options + ) + + /** + * Performs max pooling on the input and outputs both max values and indices. + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, + * even if padding is involved and the mathematically correct answer is outside + * (either negative or too large). This is a bug, but fixing it is difficult to do + * in a safe backwards compatible way, especially due to flattening. + * + * @param data type for `output` output + * @param data type for `argmax` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param Targmax The value of the Targmax attribute + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPoolWithArgmax` output and operands + * @param data type for `MaxPoolWithArgmax` output and operands + * @return a new instance of MaxPoolWithArgmax + * @see org.tensorflow.op.NnOps.maxPoolWithArgmax + * @param includeBatchInIndex Sets the includeBatchInIndex option. + * + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + * @return this Options instance. + */ + public fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + Targmax: Class, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + input, + ksize, + strides, + Targmax, + padding, + *listOfNotNull( + includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + ).toTypedArray() + ) + + /** + * Finds values of the `n`-th order statistic for the last dimension. + * If the input is a vector (rank-1), finds the entries which is the nth-smallest + * value in the vector and outputs their values as scalar tensor. + * + * For matrices (resp. higher rank input), computes the entries which is the + * nth-smallest value in each row (resp. vector along the last dimension). Thus, + * ``` + * values.shape = input.shape[:-1] + * + * ``` + * + * @param data type for `values` output + * @param input 1-D or higher with last dimension at least `n+1`. + * @param n 0-D. Position of sorted vector to select along the last dimension (along + * each row for matrices). Valid range of n is `[0, input.shape[:-1])` + * @param options carries optional attribute values + * @param data type for `NthElement` output and operands + * @return a new instance of NthElement + * @see org.tensorflow.op.NnOps.nthElement + * @param reverse Sets the reverse option. + * + * @param reverse When set to True, find the nth-largest value in the vector and vice + * versa. + * @return this Options instance. + */ + public fun nthElement( + input: Operand, + n: Operand, + reverse: Boolean? = null + ): NthElement = java.nthElement( + input, + n, + *listOfNotNull( + reverse?.let{ org.tensorflow.op.nn.NthElement.reverse(it) } + ).toTypedArray() + ) + + /** + * Produces the average pool of the input tensor for quantized types. + * + * @param data type for `output` output + * @param input 4-D with shape `[batch, height, width, channels]`. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param ksize The size of the window for each dimension of the input tensor. + * The length must be 4 to match the number of dimensions of the input. + * @param strides The stride of the sliding window for each dimension of the input + * tensor. The length must be 4 to match the number of dimensions of the input. + * @param padding The type of padding algorithm to use. + * @param data type for `QuantizedAvgPool` output and operands + * @return a new instance of QuantizedAvgPool + * @see org.tensorflow.op.NnOps.quantizedAvgPool + */ + public fun quantizedAvgPool( + input: Operand, + minInput: Operand, + maxInput: Operand, + ksize: List, + strides: List, + padding: String + ): QuantizedAvgPool = java.quantizedAvgPool( + input, + minInput, + maxInput, + ksize, + strides, + padding + ) + + /** + * Quantized Batch normalization. + * This op is deprecated and will be removed in the future. Prefer + * `tf.nn.batch_normalization`. + * + * @param data type for `result` output + * @param t A 4D input Tensor. + * @param tMin The value represented by the lowest quantized input. + * @param tMax The value represented by the highest quantized input. + * @param m A 1D mean Tensor with size matching the last dimension of t. + * This is the first output from tf.nn.moments, + * or a saved moving average thereof. + * @param mMin The value represented by the lowest quantized mean. + * @param mMax The value represented by the highest quantized mean. + * @param v A 1D variance Tensor with size matching the last dimension of t. + * This is the second output from tf.nn.moments, + * or a saved moving average thereof. + * @param vMin The value represented by the lowest quantized variance. + * @param vMax The value represented by the highest quantized variance. + * @param beta A 1D beta Tensor with size matching the last dimension of t. + * An offset to be added to the normalized tensor. + * @param betaMin The value represented by the lowest quantized offset. + * @param betaMax The value represented by the highest quantized offset. + * @param gamma A 1D gamma Tensor with size matching the last dimension of t. + * If "scale_after_normalization" is true, this tensor will be multiplied + * with the normalized tensor. + * @param gammaMin The value represented by the lowest quantized gamma. + * @param gammaMax The value represented by the highest quantized gamma. + * @param outType The value of the outType attribute + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param scaleAfterNormalization A bool indicating whether the resulted tensor + * needs to be multiplied with gamma. + * @param data type for `QuantizedBatchNormWithGlobalNormalization` output and operands + * @param data type for `QuantizedBatchNormWithGlobalNormalization` output and operands + * @return a new instance of QuantizedBatchNormWithGlobalNormalization + * @see org.tensorflow.op.NnOps.quantizedBatchNormWithGlobalNormalization + */ + public fun quantizedBatchNormWithGlobalNormalization( + t: Operand, + tMin: Operand, + tMax: Operand, + m: Operand, + mMin: Operand, + mMax: Operand, + v: Operand, + vMin: Operand, + vMax: Operand, + beta: Operand, + betaMin: Operand, + betaMax: Operand, + gamma: Operand, + gammaMin: Operand, + gammaMax: Operand, + outType: Class, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): QuantizedBatchNormWithGlobalNormalization = + java.quantizedBatchNormWithGlobalNormalization( + t, + tMin, + tMax, + m, + mMin, + mMax, + v, + vMin, + vMax, + beta, + betaMin, + betaMax, + gamma, + gammaMin, + gammaMax, + outType, + varianceEpsilon, + scaleAfterNormalization + ) + + /** + * Adds Tensor 'bias' to Tensor 'input' for Quantized types. + * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. + * + * @param data type for `output` output + * @param input The input value + * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param minBias The float value that the lowest quantized bias value represents. + * @param maxBias The float value that the highest quantized bias value represents. + * @param outType The value of the outType attribute + * @param data type for `QuantizedBiasAdd` output and operands + * @return a new instance of QuantizedBiasAdd + * @see org.tensorflow.op.NnOps.quantizedBiasAdd + */ + public fun quantizedBiasAdd( + input: Operand, + bias: Operand, + minInput: Operand, + maxInput: Operand, + minBias: Operand, + maxBias: Operand, + outType: Class + ): QuantizedBiasAdd = java.quantizedBiasAdd( + input, + bias, + minInput, + maxInput, + minBias, + maxBias, + outType + ) + + /** + * Computes a 2D convolution given quantized 4D input and filter tensors. + * The inputs are quantized tensors where the lowest value represents the real + * number of the associated minimum, and the highest represents the maximum. + * This means that you can only interpret the quantized output in the same way, by + * taking the returned minimum and maximum values into account. + * + * @param data type for `output` output + * @param input The input value + * @param filter filter's input_depth dimension must match input's depth dimensions. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param minFilter The float value that the lowest quantized filter value represents. + * @param maxFilter The float value that the highest quantized filter value represents. + * @param outType The value of the outType attribute + * @param strides The stride of the sliding window for each dimension of the input + * tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `QuantizedConv2D` output and operands + * @return a new instance of QuantizedConv2d + * @see org.tensorflow.op.NnOps.quantizedConv2d + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + * @return this Options instance. + */ + public fun quantizedConv2d( + input: Operand, + filter: Operand, + minInput: Operand, + maxInput: Operand, + minFilter: Operand, + maxFilter: Operand, + outType: Class, + strides: List, + padding: String, + dilations: List? = null + ): QuantizedConv2d = java.quantizedConv2d( + input, + filter, + minInput, + maxInput, + minFilter, + maxFilter, + outType, + strides, + padding, + *listOfNotNull( + dilations?.let{ org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } + ).toTypedArray() + ) + + /** + * Quantized Instance normalization. + * + * @param data type for `y` output + * @param x A 4D input Tensor. + * @param xMin The value represented by the lowest quantized input. + * @param xMax The value represented by the highest quantized input. + * @param options carries optional attribute values + * @param data type for `QuantizedInstanceNorm` output and operands + * @return a new instance of QuantizedInstanceNorm + * @see org.tensorflow.op.NnOps.quantizedInstanceNorm + * @param outputRangeGiven Sets the outputRangeGiven option. + * + * @param outputRangeGiven If True, `given_y_min` and `given_y_min` + * and `given_y_max` are used as the output range. Otherwise, + * the implementation computes the output range. + * @return this Options instance. + * @param givenYMin Sets the givenYMin option. + * + * @param givenYMin Output in `y_min` if `output_range_given` is True. + * @return this Options instance. + * @param givenYMax Sets the givenYMax option. + * + * @param givenYMax Output in `y_max` if `output_range_given` is True. + * @return this Options instance. + * @param varianceEpsilon Sets the varianceEpsilon option. + * + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @return this Options instance. + * @param minSeparation Sets the minSeparation option. + * + * @param minSeparation Minimum value of `y_max - y_min` + * @return this Options instance. + */ + public fun quantizedInstanceNorm( + x: Operand, + xMin: Operand, + xMax: Operand, + outputRangeGiven: Boolean? = null, + givenYMin: Float? = null, + givenYMax: Float? = null, + varianceEpsilon: Float? = null, + minSeparation: Float? = null + ): QuantizedInstanceNorm = java.quantizedInstanceNorm( + x, + xMin, + xMax, + *listOfNotNull( + outputRangeGiven?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, + givenYMin?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, + givenYMax?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, + varianceEpsilon?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, + minSeparation?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } + ).toTypedArray() + ) + + /** + * Produces the max pool of the input tensor for quantized types. + * + * @param data type for `output` output + * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param ksize The size of the window for each dimension of the input tensor. + * The length must be 4 to match the number of dimensions of the input. + * @param strides The stride of the sliding window for each dimension of the input + * tensor. The length must be 4 to match the number of dimensions of the input. + * @param padding The type of padding algorithm to use. + * @param data type for `QuantizedMaxPool` output and operands + * @return a new instance of QuantizedMaxPool + * @see org.tensorflow.op.NnOps.quantizedMaxPool + */ + public fun quantizedMaxPool( + input: Operand, + minInput: Operand, + maxInput: Operand, + ksize: List, + strides: List, + padding: String + ): QuantizedMaxPool = java.quantizedMaxPool( + input, + minInput, + maxInput, + ksize, + strides, + padding + ) + + /** + * Computes Quantized Rectified Linear: `max(features, 0)` + * + * @param data type for `activations` output + * @param features The features value + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType The value of the outType attribute + * @param data type for `QuantizedRelu` output and operands + * @return a new instance of QuantizedRelu + * @see org.tensorflow.op.NnOps.quantizedRelu + */ + public fun quantizedRelu( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: Class + ): QuantizedRelu = java.quantizedRelu( + features, + minFeatures, + maxFeatures, + outType + ) + + /** + * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + * + * @param data type for `activations` output + * @param features The features value + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType The value of the outType attribute + * @param data type for `QuantizedRelu6` output and operands + * @return a new instance of QuantizedRelu6 + * @see org.tensorflow.op.NnOps.quantizedRelu6 + */ + public fun quantizedRelu6( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: Class + ): QuantizedRelu6 = java.quantizedRelu6( + features, + minFeatures, + maxFeatures, + outType + ) + + /** + * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + * + * @param data type for `activations` output + * @param features The features value + * @param maxValue The maxValue value + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType The value of the outType attribute + * @param data type for `QuantizedReluX` output and operands + * @return a new instance of QuantizedReluX + * @see org.tensorflow.op.NnOps.quantizedReluX + */ + public fun quantizedReluX( + features: Operand, + maxValue: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: Class + ): QuantizedReluX = java.quantizedReluX( + features, + maxValue, + minFeatures, + maxFeatures, + outType + ) + + /** + * Computes rectified linear: `max(features, 0)`. + * See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) + * Example usage: + * ``` + * + * tf.nn.relu([-2., 0., 3.]).numpy() + * array([0., 0., 3.], dtype=float32) + * ``` + * + * @param data type for `activations` output + * @param features The features value + * @param data type for `Relu` output and operands + * @return a new instance of Relu + * @see org.tensorflow.op.NnOps.relu + */ + public fun relu(features: Operand): Relu = java.relu( + features + ) + + /** + * Computes rectified linear 6: `min(max(features, 0), 6)`. + * + * @param data type for `activations` output + * @param features The features value + * @param data type for `Relu6` output and operands + * @return a new instance of Relu6 + * @see org.tensorflow.op.NnOps.relu6 + */ + public fun relu6(features: Operand): Relu6 = java.relu6( + features + ) + + /** + * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` + * if < 0, `scale * features` otherwise. + * + * To be used together with + * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. + * For correct dropout, use `tf.contrib.nn.alpha_dropout`. + * + * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + * + * @param data type for `activations` output + * @param features The features value + * @param data type for `Selu` output and operands + * @return a new instance of Selu + * @see org.tensorflow.op.NnOps.selu + */ + public fun selu(features: Operand): Selu = java.selu( + features + ) + + /** + * Computes softmax activations. + * For each batch `i` and class `j` we have + * ``` + * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ + * + * ``` + * + * @param data type for `softmax` output + * @param logits 2-D with shape `[batch_size, num_classes]`. + * @param data type for `Softmax` output and operands + * @return a new instance of Softmax + * @see org.tensorflow.op.NnOps.softmax + */ + public fun softmax(logits: Operand): Softmax = java.softmax( + logits + ) + + /** + * Computes softmax cross entropy cost and gradients to backpropagate. + * Inputs are the logits, not probabilities. + * + * @param data type for `loss` output + * @param features batch_size x num_classes matrix + * @param labels batch_size x num_classes matrix + * The caller must ensure that each batch of labels represents a valid + * probability distribution. + * @param data type for `SoftmaxCrossEntropyWithLogits` output and operands + * @return a new instance of SoftmaxCrossEntropyWithLogits + * @see org.tensorflow.op.NnOps.softmaxCrossEntropyWithLogits + */ + public fun softmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SoftmaxCrossEntropyWithLogits = + java.softmaxCrossEntropyWithLogits( + features, + labels + ) + + /** + * Computes softsign: `features / (abs(features) + 1)`. + * + * @param data type for `activations` output + * @param features The features value + * @param data type for `Softsign` output and operands + * @return a new instance of Softsign + * @see org.tensorflow.op.NnOps.softsign + */ + public fun softsign(features: Operand): Softsign = java.softsign( + features + ) + + /** + * SpaceToBatch for 4-D tensors of type T. + * This is a legacy version of the more general SpaceToBatchND. + * + * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + * More specifically, this op outputs a copy of the input tensor where values from + * the `height` and `width` dimensions are moved to the `batch` dimension. After + * the zero-padding, both `height` and `width` of the input must be divisible by the + * block size. + * + * The attr `block_size` must be greater than one. It indicates the block size. + *
    + *
  • Non-overlapping blocks of size `block_size x block size` in the height and + * width dimensions are rearranged into the batch dimension at each location.
  • + *
  • The batch of the output tensor is `batch * block_size * block_size`.
  • + *
  • Both height_pad and width_pad must be divisible by block_size.
  • + *
+ * + * The shape of the output will be: + * ``` + * [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + * depth] + * + * ``` + * + * Some examples: + * + * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: + * ``` + * x = [[[[1], [2]], [[3], [4]]]] + * + * ``` + * + * The output tensor has shape `[4, 1, 1, 1]` and value: + * ``` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * + * ``` + * + * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: + * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * + * ``` + * + * The output tensor has shape `[4, 1, 1, 3]` and value: + * ``` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + * + * ``` + * + * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * ``` + * + * The output tensor has shape `[4, 2, 2, 1]` and value: + * ``` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] + * + * ``` + * + * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * ``` + * + * The output tensor has shape `[8, 1, 2, 1]` and value: + * ``` + * x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + * [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + * + * ``` + * + * Among others, this operation is useful for reducing atrous convolution into + * regular convolution. + * + * @param data type for `output` output + * @param input 4-D with shape `[batch, height, width, depth]`. + * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + * the padding of the input with zeros across the spatial dimensions as follows: + * ` + * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] + * + * ` + * + * The effective spatial dimensions of the zero-padded input tensor will be: + * ` + * height_pad = pad_top + height + pad_bottom + * width_pad = pad_left + width + pad_right + * + * ` + * @param blockSize The value of the blockSize attribute + * @param data type for `SpaceToBatch` output and operands + * @return a new instance of SpaceToBatch + * @see org.tensorflow.op.NnOps.spaceToBatch + */ + public fun spaceToBatch( + input: Operand, + paddings: Operand, + blockSize: Long + ): SpaceToBatch = java.spaceToBatch( + input, + paddings, + blockSize + ) + + /** + * SpaceToDepth for tensors of type T. + * Rearranges blocks of spatial data, into depth. More specifically, + * this op outputs a copy of the input tensor where values from the `height` + * and `width` dimensions are moved to the `depth` dimension. + * The attr `block_size` indicates the input block size. + *
    + *
  • Non-overlapping blocks of size `block_size x block size` are rearranged + * into depth at each location.
  • + *
  • The depth of the output tensor is `block_size * block_size * input_depth`.
  • + *
  • The Y, X coordinates within each block of the input become the high order + * component of the output channel index.
  • + *
  • The input tensor's height and width must be divisible by block_size.
  • + *
+ * + * The `data_format` attr specifies the layout of the input and output tensors + * with the following options: + * "NHWC": `[ batch, height, width, channels ]` + * "NCHW": `[ batch, channels, height, width ]` + * "NCHW_VECT_C": + * `qint8 [ batch, channels / 4, height, width, 4 ]` + * + * It is useful to consider the operation as transforming a 6-D Tensor. + * e.g. for data_format = NHWC, + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + * within the output image, bX, bY means coordinates + * within the input block, iC means input channels). + * The output would be a transpose to the following layout: + * n,oY,oX,bY,bX,iC + * + * This operation is useful for resizing the activations between convolutions + * (but keeping all data), e.g. instead of pooling. It is also useful for training + * purely convolutional models. + * + * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" + * and + * block_size = 2: + * ``` + * x = [[[[1], [2]], + * [[3], [4]]]] + * + * ``` + * + * This operation will output a tensor of shape `[1, 1, 1, 4]`: + * ``` + * [[[[1, 2, 3, 4]]]] + * + * ``` + * + * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + * the corresponding output will have a single element (i.e. width and height are + * both 1) and will have a depth of 4 channels (1 * block_size * block_size). + * The output element shape is `[1, 1, 4]`. + * + * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. + * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * + * ``` + * + * This operation, for block_size of 2, will return the following tensor of shape + * `[1, 1, 1, 12]` + * ``` + * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + * + * ``` + * + * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: + * ``` + * x = [[[[1], [2], [5], [6]], + * [[3], [4], [7], [8]], + * [[9], [10], [13], [14]], + * [[11], [12], [15], [16]]]] + * + * ``` + * + * the operator will return the following tensor of shape `[1 2 2 4]`: + * ``` + * x = [[[[1, 2, 3, 4], + * [5, 6, 7, 8]], + * [[9, 10, 11, 12], + * [13, 14, 15, 16]]]] + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param blockSize The size of the spatial block. + * @param options carries optional attribute values + * @param data type for `SpaceToDepth` output and operands + * @return a new instance of SpaceToDepth + * @see org.tensorflow.op.NnOps.spaceToDepth + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat the dataFormat option + * @return this Options instance. + */ + public fun spaceToDepth( + input: Operand, + blockSize: Long, + dataFormat: String? = null + ): SpaceToDepth = java.spaceToDepth( + input, + blockSize, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } + ).toTypedArray() + ) + + /** + * Computes softmax cross entropy cost and gradients to backpropagate. + * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * a matrix of label probabilities, but rather a single label per row + * of features. This label is considered to have probability 1.0 for the + * given row. + * + * Inputs are the logits, not probabilities. + * + * @param data type for `loss` output + * @param features batch_size x num_classes matrix + * @param labels batch_size vector with values in [0, num_classes). + * This is the label for the given minibatch entry. + * @param data type for `SparseSoftmaxCrossEntropyWithLogits` output and operands + * @return a new instance of SparseSoftmaxCrossEntropyWithLogits + * @see org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits + */ + public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits( + features, + labels + ) + + /** + * Finds values and indices of the `k` largest elements for the last dimension. + * If the input is a vector (rank-1), finds the `k` largest entries in the vector + * and outputs their values and indices as vectors. Thus `values[j]` is the + * `j`-th largest entry in `input`, and its index is `indices[j]`. + * + * For matrices (resp. higher rank input), computes the top `k` entries in each + * row (resp. vector along the last dimension). Thus, + * ``` + * values.shape = indices.shape = input.shape[:-1] + [k] + * + * ``` + * + * If two elements are equal, the lower-index element appears first. + * + * @param data type for `values` output + * @param input 1-D or higher with last dimension at least `k`. + * @param k 0-D. Number of top elements to look for along the last dimension (along each + * row for matrices). + * @param options carries optional attribute values + * @param data type for `TopKV2` output and operands + * @return a new instance of TopK + * @see org.tensorflow.op.NnOps.topK + * @param sorted Sets the sorted option. + * + * @param sorted If true the resulting `k` elements will be sorted by the values in + * descending order. + * @return this Options instance. + */ + public fun topK( + input: Operand, + k: Operand, + sorted: Boolean? = null + ): TopK = java.topK( + input, + k, + *listOfNotNull( + sorted?.let{ org.tensorflow.op.nn.TopK.sorted(it) } + ).toTypedArray() + ) + + /** + * Computes size of weights that can be used by a Cudnn RNN model. + * Return the params size that can be used by the Cudnn RNN model. Subsequent + * weight allocation and initialization should use this size. + * + * num_layers: Specifies the number of layers in the RNN model. + * num_units: Specifies the size of the hidden state. + * input_size: Specifies the size of the input state. + * rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicate whether there is a linear projection between the input and + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. + * dir = (direction == bidirectional) ? 2 : 1 + * dropout: dropout probability. When set to 0., dropout is disabled. + * seed: the 1st part of a seed to initialize dropout. + * seed2: the 2nd part of a seed to initialize dropout. + * params_size: The size of the params buffer that should be allocated and + * initialized for this RNN model. Note that this params buffer may not be + * compatible across GPUs. Please use CudnnRNNParamsWeights and + * CudnnRNNParamsBiases to save and restore them in a way that is compatible + * across different runs. + * + * @param data type for `params_size` output + * @param numLayers The numLayers value + * @param numUnits The numUnits value + * @param inputSize The inputSize value + * @param T The value of the T attribute + * @param S The value of the S attribute + * @param options carries optional attribute values + * @param data type for `CudnnRNNParamsSize` output and operands + * @param data type for `CudnnRNNParamsSize` output and operands + * @return a new instance of CudnnRnnParamsSize + * @see org.tensorflow.op.NnOps.cudnnRnnParamsSize + * @param rnnMode Sets the rnnMode option. + * + * @param rnnMode the rnnMode option + * @return this Options instance. + * @param inputMode Sets the inputMode option. + * + * @param inputMode the inputMode option + * @return this Options instance. + * @param direction Sets the direction option. + * + * @param direction the direction option + * @return this Options instance. + * @param dropout Sets the dropout option. + * + * @param dropout the dropout option + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 the seed2 option + * @return this Options instance. + * @param numProj Sets the numProj option. + * + * @param numProj the numProj option + * @return this Options instance. + */ + @JvmName("cudnnRnnParamsSizeReified") + public inline fun cudnnRnnParamsSize( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRnnParamsSize = cudnnRnnParamsSize(numLayers, numUnits, inputSize, + U::class.java, T::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, + numProj) + + /** + * Performs max pooling on the input and outputs both max values and indices. + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, + * even if padding is involved and the mathematically correct answer is outside + * (either negative or too large). This is a bug, but fixing it is difficult to do + * in a safe backwards compatible way, especially due to flattening. + * + * @param data type for `output` output + * @param data type for `argmax` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param Targmax The value of the Targmax attribute + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `MaxPoolWithArgmax` output and operands + * @param data type for `MaxPoolWithArgmax` output and operands + * @return a new instance of MaxPoolWithArgmax + * @see org.tensorflow.op.NnOps.maxPoolWithArgmax + * @param includeBatchInIndex Sets the includeBatchInIndex option. + * + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + * @return this Options instance. + */ + @JvmName("maxPoolWithArgmaxReified") + public inline fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolWithArgmax = maxPoolWithArgmax(input, ksize, strides, U::class.java, + padding, includeBatchInIndex) + + /** + * Quantized Batch normalization. + * This op is deprecated and will be removed in the future. Prefer + * `tf.nn.batch_normalization`. + * + * @param data type for `result` output + * @param t A 4D input Tensor. + * @param tMin The value represented by the lowest quantized input. + * @param tMax The value represented by the highest quantized input. + * @param m A 1D mean Tensor with size matching the last dimension of t. + * This is the first output from tf.nn.moments, + * or a saved moving average thereof. + * @param mMin The value represented by the lowest quantized mean. + * @param mMax The value represented by the highest quantized mean. + * @param v A 1D variance Tensor with size matching the last dimension of t. + * This is the second output from tf.nn.moments, + * or a saved moving average thereof. + * @param vMin The value represented by the lowest quantized variance. + * @param vMax The value represented by the highest quantized variance. + * @param beta A 1D beta Tensor with size matching the last dimension of t. + * An offset to be added to the normalized tensor. + * @param betaMin The value represented by the lowest quantized offset. + * @param betaMax The value represented by the highest quantized offset. + * @param gamma A 1D gamma Tensor with size matching the last dimension of t. + * If "scale_after_normalization" is true, this tensor will be multiplied + * with the normalized tensor. + * @param gammaMin The value represented by the lowest quantized gamma. + * @param gammaMax The value represented by the highest quantized gamma. + * @param outType The value of the outType attribute + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param scaleAfterNormalization A bool indicating whether the resulted tensor + * needs to be multiplied with gamma. + * @param data type for `QuantizedBatchNormWithGlobalNormalization` output and operands + * @param data type for `QuantizedBatchNormWithGlobalNormalization` output and operands + * @return a new instance of QuantizedBatchNormWithGlobalNormalization + * @see org.tensorflow.op.NnOps.quantizedBatchNormWithGlobalNormalization + */ + @JvmName("quantizedBatchNormWithGlobalNormalizationReified") + public inline fun quantizedBatchNormWithGlobalNormalization( + t: Operand, + tMin: Operand, + tMax: Operand, + m: Operand, + mMin: Operand, + mMax: Operand, + v: Operand, + vMin: Operand, + vMax: Operand, + beta: Operand, + betaMin: Operand, + betaMax: Operand, + gamma: Operand, + gammaMin: Operand, + gammaMax: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): QuantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization(t, tMin, tMax, m, mMin, mMax, v, vMin, vMax, beta, betaMin, betaMax, gamma, gammaMin, + gammaMax, U::class.java, varianceEpsilon, scaleAfterNormalization) + + /** + * Adds Tensor 'bias' to Tensor 'input' for Quantized types. + * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. + * + * @param data type for `output` output + * @param input The input value + * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param minBias The float value that the lowest quantized bias value represents. + * @param maxBias The float value that the highest quantized bias value represents. + * @param outType The value of the outType attribute + * @param data type for `QuantizedBiasAdd` output and operands + * @return a new instance of QuantizedBiasAdd + * @see org.tensorflow.op.NnOps.quantizedBiasAdd + */ + @JvmName("quantizedBiasAddReified") + public inline fun quantizedBiasAdd( + input: Operand, + bias: Operand, + minInput: Operand, + maxInput: Operand, + minBias: Operand, + maxBias: Operand + ): QuantizedBiasAdd = quantizedBiasAdd(input, bias, minInput, maxInput, minBias, maxBias, + V::class.java) + + /** + * Computes a 2D convolution given quantized 4D input and filter tensors. + * The inputs are quantized tensors where the lowest value represents the real + * number of the associated minimum, and the highest represents the maximum. + * This means that you can only interpret the quantized output in the same way, by + * taking the returned minimum and maximum values into account. + * + * @param data type for `output` output + * @param input The input value + * @param filter filter's input_depth dimension must match input's depth dimensions. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param minFilter The float value that the lowest quantized filter value represents. + * @param maxFilter The float value that the highest quantized filter value represents. + * @param outType The value of the outType attribute + * @param strides The stride of the sliding window for each dimension of the input + * tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attribute values + * @param data type for `QuantizedConv2D` output and operands + * @return a new instance of QuantizedConv2d + * @see org.tensorflow.op.NnOps.quantizedConv2d + * @param dilations Sets the dilations option. + * + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + * @return this Options instance. + */ + @JvmName("quantizedConv2dReified") + public inline fun quantizedConv2d( + input: Operand, + filter: Operand, + minInput: Operand, + maxInput: Operand, + minFilter: Operand, + maxFilter: Operand, + strides: List, + padding: String, + dilations: List? = null + ): QuantizedConv2d = quantizedConv2d(input, filter, minInput, maxInput, minFilter, + maxFilter, V::class.java, strides, padding, dilations) + + /** + * Computes Quantized Rectified Linear: `max(features, 0)` + * + * @param data type for `activations` output + * @param features The features value + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType The value of the outType attribute + * @param data type for `QuantizedRelu` output and operands + * @return a new instance of QuantizedRelu + * @see org.tensorflow.op.NnOps.quantizedRelu + */ + @JvmName("quantizedReluReified") + public inline fun quantizedRelu( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand + ): QuantizedRelu = quantizedRelu(features, minFeatures, maxFeatures, U::class.java) + + /** + * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + * + * @param data type for `activations` output + * @param features The features value + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType The value of the outType attribute + * @param data type for `QuantizedRelu6` output and operands + * @return a new instance of QuantizedRelu6 + * @see org.tensorflow.op.NnOps.quantizedRelu6 + */ + @JvmName("quantizedRelu6Reified") + public inline fun quantizedRelu6( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand + ): QuantizedRelu6 = quantizedRelu6(features, minFeatures, maxFeatures, U::class.java) + + /** + * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + * + * @param data type for `activations` output + * @param features The features value + * @param maxValue The maxValue value + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType The value of the outType attribute + * @param data type for `QuantizedReluX` output and operands + * @return a new instance of QuantizedReluX + * @see org.tensorflow.op.NnOps.quantizedReluX + */ + @JvmName("quantizedReluXReified") + public inline fun quantizedReluX( + features: Operand, + maxValue: Operand, + minFeatures: Operand, + maxFeatures: Operand + ): QuantizedReluX = quantizedReluX(features, maxValue, minFeatures, maxFeatures, + U::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt new file mode 100644 index 00000000000..a614a1b8506 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -0,0 +1,1421 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.quantization.Dequantize +import org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs +import org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient +import org.tensorflow.op.quantization.FakeQuantWithMinMaxVars +import org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient +import org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel +import org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient +import org.tensorflow.op.quantization.Quantize +import org.tensorflow.op.quantization.QuantizeAndDequantize +import org.tensorflow.op.quantization.QuantizeAndDequantizeV3 +import org.tensorflow.op.quantization.QuantizeAndDequantizeV4 +import org.tensorflow.op.quantization.QuantizeAndDequantizeV4Grad +import org.tensorflow.op.quantization.QuantizeDownAndShrinkRange +import org.tensorflow.op.quantization.QuantizedConcat +import org.tensorflow.op.quantization.RequantizationRange +import org.tensorflow.op.quantization.Requantize +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `quantization` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class QuantizationOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Dequantize the 'input' tensor into a float or bfloat16 Tensor. + * [min_range, max_range] are scalar floats that specify the range for + * the output. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ + * + * If the input comes from a QuantizedRelu6, the output type is + * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + * Dequantize on quint8 will take each value, cast to float, and multiply + * by 6 / 255. + * Note that if quantizedtype is qint8, the operation will additionally add + * each value by 128 prior to casting. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = range / num_discrete_values + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) + * + * ``` + * + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + + * (narrow_range ? 1 : 0); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); + * + * const float scale_factor = + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * : std::max(min_range / min_expected_T, + * max_range / max_expected_T); + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param minRange The minimum scalar value possibly produced for the input. + * @param maxRange The maximum scalar value possibly produced for the input. + * @param options carries optional attribute values + * @return a new instance of Dequantize, with default output types + * @see org.tensorflow.op.QuantizationOps.dequantize + */ + public fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + options: Array + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + options + ) + + /** + * Dequantize the 'input' tensor into a float or bfloat16 Tensor. + * [min_range, max_range] are scalar floats that specify the range for + * the output. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ + * + * If the input comes from a QuantizedRelu6, the output type is + * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + * Dequantize on quint8 will take each value, cast to float, and multiply + * by 6 / 255. + * Note that if quantizedtype is qint8, the operation will additionally add + * each value by 128 prior to casting. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = range / num_discrete_values + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) + * + * ``` + * + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + + * (narrow_range ? 1 : 0); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); + * + * const float scale_factor = + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * : std::max(min_range / min_expected_T, + * max_range / max_expected_T); + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param minRange The minimum scalar value possibly produced for the input. + * @param maxRange The maximum scalar value possibly produced for the input. + * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. + * If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. + * @param options carries optional attribute values + * @param data type for `Dequantize` output and operands + * @return a new instance of Dequantize + * @see org.tensorflow.op.QuantizationOps.dequantize + * @param mode Sets the mode option. + * + * @param mode the mode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + */ + public fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + dtype: Class, + mode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + dtype, + *listOfNotNull( + mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } + ).toTypedArray() + ) + + /** + * Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. + * Attributes + *
    + *
  • `[min; max]` define the clamping range for the `inputs` data.
  • + *
  • `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` + * interval.
  • + *
  • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
  • + *
+ * + * Before quantization, `min` and `max` values are adjusted with the following + * logic. + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * the behavior can be unexpected: + *
    + *
  • If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
  • + *
  • If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
  • + *
  • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
  • + *
+ * + * Quantization is called fake since the output is still in floating point. + * + * @param inputs The inputs value + * @param options carries optional attribute values + * @return a new instance of FakeQuantWithMinMaxArgs + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxArgs + * @param min Sets the min option. + * + * @param min the min option + * @return this Options instance. + * @param max Sets the max option. + * + * @param max the max option + * @return this Options instance. + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + */ + public fun fakeQuantWithMinMaxArgs( + inputs: Operand, + min: Float? = null, + max: Float? = null, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( + inputs, + *listOfNotNull( + min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, + max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } + ).toTypedArray() + ) + + /** + * Compute gradients for a FakeQuantWithMinMaxArgs operation. + * + * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. + * @param inputs Values passed as inputs to the FakeQuantWithMinMaxArgs operation. + * @param options carries optional attribute values + * @return a new instance of FakeQuantWithMinMaxArgsGradient + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxArgsGradient + * @param min Sets the min option. + * + * @param min the min option + * @return this Options instance. + * @param max Sets the max option. + * + * @param max the max option + * @return this Options instance. + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + */ + public fun fakeQuantWithMinMaxArgsGradient( + gradients: Operand, + inputs: Operand, + min: Float? = null, + max: Float? = null, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( + gradients, + inputs, + *listOfNotNull( + min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, + max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) } + ).toTypedArray() + ) + + /** + * Fake-quantize the 'inputs' tensor of type float via global float scalars + * Fake-quantize the `inputs` tensor of type float via global float scalars + * `min` and `max` to `outputs` tensor of same shape as `inputs`. + * + * Attributes + *
    + *
  • `[min; max]` define the clamping range for the `inputs` data.
  • + *
  • `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` + * interval.
  • + *
  • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
  • + *
+ * + * Before quantization, `min` and `max` values are adjusted with the following + * logic. + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * the behavior can be unexpected: + *
    + *
  • If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
  • + *
  • If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
  • + *
  • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
  • + *
+ * + * This operation has a gradient and thus allows for training `min` and `max` + * values. + * + * @param inputs The inputs value + * @param min The min value + * @param max The max value + * @param options carries optional attribute values + * @return a new instance of FakeQuantWithMinMaxVars + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVars + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + */ + public fun fakeQuantWithMinMaxVars( + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( + inputs, + min, + max, + *listOfNotNull( + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } + ).toTypedArray() + ) + + /** + * Compute gradients for a FakeQuantWithMinMaxVars operation. + * + * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation. + * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation. + * min, max: Quantization interval, scalar floats. + * @param min The min value + * @param max The max value + * @param options carries optional attribute values + * @return a new instance of FakeQuantWithMinMaxVarsGradient + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsGradient + * @param numBits Sets the numBits option. + * + * @param numBits The bitwidth of the quantization; between 2 and 8, inclusive. + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange Whether to quantize into 2^num_bits - 1 distinct values. + * @return this Options instance. + */ + public fun fakeQuantWithMinMaxVarsGradient( + gradients: Operand, + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( + gradients, + inputs, + min, + max, + *listOfNotNull( + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) } + ).toTypedArray() + ) + + /** + * Fake-quantize the 'inputs' tensor of type float via per-channel floats + * Fake-quantize the `inputs` tensor of type float per-channel and one of the + * shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and + * `max` + * of shape `[d]` to `outputs` tensor of same shape as `inputs`. + * + * Attributes + *
    + *
  • `[min; max]` define the clamping range for the `inputs` data.
  • + *
  • `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` + * interval.
  • + *
  • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
  • + *
+ * + * Before quantization, `min` and `max` values are adjusted with the following + * logic. + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * the behavior can be unexpected: + *
    + *
  • If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
  • + *
  • If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
  • + *
  • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
  • + *
+ * + * This operation has a gradient and thus allows for training `min` and `max` + * values. + * + * @param inputs The inputs value + * @param min The min value + * @param max The max value + * @param options carries optional attribute values + * @return a new instance of FakeQuantWithMinMaxVarsPerChannel + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsPerChannel + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + */ + public fun fakeQuantWithMinMaxVarsPerChannel( + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( + inputs, + min, + max, + *listOfNotNull( + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) } + ).toTypedArray() + ) + + /** + * Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. + * + * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation, + * shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. + * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape + * same as `gradients`. + * min, max: Quantization interval, floats of shape `[d]`. + * @param min The min value + * @param max The max value + * @param options carries optional attribute values + * @return a new instance of FakeQuantWithMinMaxVarsPerChannelGradient + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsPerChannelGradient + * @param numBits Sets the numBits option. + * + * @param numBits The bitwidth of the quantization; between 2 and 16, inclusive. + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange Whether to quantize into 2^num_bits - 1 distinct values. + * @return this Options instance. + */ + public fun fakeQuantWithMinMaxVarsPerChannelGradient( + gradients: Operand, + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( + gradients, + inputs, + min, + max, + *listOfNotNull( + numBits?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) + } + ).toTypedArray() + ) + + /** + * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. + * [min_range, max_range] are scalar floats that specify the range for + * the 'input' data. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. The + * 'round_mode' attribute controls which rounding tie-breaking algorithm is used + * when rounding float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + * if T == qint8: out[i] -= (range(T) + 1) / 2.0 + * + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ + * + * Assume the input is type float and has a possible range of [0.0, 6.0] and the + * output type is quint8 ([0, 255]). The min_range and max_range values should be + * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + * value of the input by 255/6 and cast to quint8. + * + * If the output type was qint8 ([-128, 127]), the operation will additionally + * subtract each value by 128 prior to casting, so that the range of values aligns + * with the range of qint8. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = num_discrete_values / range + * quantized = round(input * range_scale) - round(range_min * range_scale) + + * numeric_limits::min() + * quantized = max(quantized, numeric_limits::min()) + * quantized = min(quantized, numeric_limits::max()) + * + * ``` + * + * The biggest difference between this and MIN_COMBINED is that the minimum range + * is rounded first, before it's subtracted from the rounded value. With + * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + * and dequantizing will introduce a larger and larger error. + * + * _SCALED mode Example_ + * + * `SCALED` mode matches the quantization approach used in + * `QuantizeAndDequantize{V2|V3`}. + * + * If the mode is `SCALED`, the quantization is performed by multiplying each + * input value by a scaling_factor. + * The scaling_factor is determined from `min_range` and `max_range` to be as large + * as possible such that the range from `min_range` to `max_range` is representable + * within values of type T. + * ``` + * const int min_T = std::numeric_limits::min(); + * const int max_T = std::numeric_limits::max(); + * const float max_float = std::numeric_limits::max(); + * + * const float scale_factor_from_min_side = + * (min_T * min_range > 0) ? min_T / min_range : max_float; + * const float scale_factor_from_max_side = + * (max_T * max_range > 0) ? max_T / max_range : max_float; + * + * const float scale_factor = std::min(scale_factor_from_min_side, + * scale_factor_from_max_side); + * + * ``` + * + * We next use the scale_factor to adjust min_range and max_range as follows: + * ``` + * min_range = min_T / scale_factor; + * max_range = max_T / scale_factor; + * + * ``` + * + * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 + * In this case, min_range would remain -10, but max_range would be adjusted to + * 127 / 12.8 = 9.921875 + * + * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + * + * The input tensor can now be quantized by clipping values to the range + * `min_range` to `max_range`, then multiplying by scale_factor as follows: + * ``` + * result = round(min(max_range, max(min_range, input)) * scale_factor) + * + * ``` + * + * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + * this operation. These outputs should be used as the range for any further + * calculations. + * + * _narrow_range (bool) attribute_ + * + * If true, we do not use the minimum quantized value. + * i.e. for int8 the quantized output, it would be restricted to the range + * -127..127 instead of the full -128..127 range. + * This is provided for compatibility with certain inference backends. + * (Only applies to SCALED mode) + * + * _axis (int) attribute_ + * + * An optional `axis` attribute can specify a dimension index of the input tensor, + * such that quantization ranges will be calculated and applied separately for each + * slice of the tensor along that dimension. This is useful for per-channel + * quantization. + * + * If axis is specified, min_range and max_range + * + * if `axis`=None, per-tensor quantization is performed as normal. + * + * _ensure_minimum_range (float) attribute_ + * + * Ensures the minimum quantization range is at least this value. + * The legacy default value for this is 0.01, but it is strongly suggested to + * set it to 0 for new uses. + * + * @param data type for `output` output + * @param input The input value + * @param minRange The minimum value of the quantization range. This value may be adjusted by + * the + * op depending on other parameters. The adjusted value is written to `output_min`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. + * @param maxRange The maximum value of the quantization range. This value may be adjusted by + * the + * op depending on other parameters. The adjusted value is written to `output_max`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. + * @param T The value of the T attribute + * @param options carries optional attribute values + * @param data type for `QuantizeV2` output and operands + * @return a new instance of Quantize + * @see org.tensorflow.op.QuantizationOps.quantize + * @param mode Sets the mode option. + * + * @param mode the mode option + * @return this Options instance. + * @param roundMode Sets the roundMode option. + * + * @param roundMode the roundMode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + * @param ensureMinimumRange Sets the ensureMinimumRange option. + * + * @param ensureMinimumRange the ensureMinimumRange option + * @return this Options instance. + */ + public fun quantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + T_: Class, + mode: String? = null, + roundMode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null, + ensureMinimumRange: Float? = null + ): Quantize = java.quantize( + input, + minRange, + maxRange, + T_, + *listOfNotNull( + mode?.let{ org.tensorflow.op.quantization.Quantize.mode(it) }, + roundMode?.let{ org.tensorflow.op.quantization.Quantize.roundMode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Quantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Quantize.axis(it) }, + ensureMinimumRange?.let{ org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } + ).toTypedArray() + ) + + /** + * Quantizes then dequantizes a tensor. + * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + * tensor, so its value can change during training. + * + * @param data type for `output` output + * @param input The input value + * @param inputMin The inputMin value + * @param inputMax The inputMax value + * @param numBits The numBits value + * @param options carries optional attribute values + * @param data type for `QuantizeAndDequantizeV3` output and operands + * @return a new instance of QuantizeAndDequantize + * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantize + * @param signedInput Sets the signedInput option. + * + * @param signedInput the signedInput option + * @return this Options instance. + * @param rangeGiven Sets the rangeGiven option. + * + * @param rangeGiven the rangeGiven option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + */ + public fun quantizeAndDequantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + numBits: Operand, + signedInput: Boolean? = null, + rangeGiven: Boolean? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): QuantizeAndDequantize = java.quantizeAndDequantize( + input, + inputMin, + inputMax, + numBits, + *listOfNotNull( + signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, + rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } + ).toTypedArray() + ) + + /** + * Quantizes then dequantizes a tensor. + * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + * tensor, so its value can change during training. + * + * @param data type for `output` output + * @param input The input value + * @param inputMin The inputMin value + * @param inputMax The inputMax value + * @param numBits The numBits value + * @param options carries optional attribute values + * @param data type for `QuantizeAndDequantizeV3` output and operands + * @return a new instance of QuantizeAndDequantizeV3 + * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV3 + * @param signedInput Sets the signedInput option. + * + * @param signedInput the signedInput option + * @return this Options instance. + * @param rangeGiven Sets the rangeGiven option. + * + * @param rangeGiven the rangeGiven option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + */ + public fun quantizeAndDequantizeV3( + input: Operand, + inputMin: Operand, + inputMax: Operand, + numBits: Operand, + signedInput: Boolean? = null, + rangeGiven: Boolean? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): QuantizeAndDequantizeV3 = java.quantizeAndDequantizeV3( + input, + inputMin, + inputMax, + numBits, + *listOfNotNull( + signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV3.signedInput(it) }, + rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV3.rangeGiven(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV3.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV3.axis(it) } + ).toTypedArray() + ) + + /** + * Quantizes then dequantizes a tensor. + * This is almost identical to QuantizeAndDequantizeV2, except that it returns a + * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. + * + * @param data type for `output` output + * @param input Tensor to quantize and then dequantize. + * @param inputMin If `range_given == True`, this specifies the minimum input value that needs + * to + * be represented, otherwise it is determined from the min value of the `input` + * tensor. + * @param inputMax If `range_given == True`, this specifies the maximum input value that needs + * to + * be represented, otherwise it is determined from the max value of the `input` + * tensor. + * @param options carries optional attribute values + * @param data type for `QuantizeAndDequantizeV4` output and operands + * @return a new instance of QuantizeAndDequantizeV4 + * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4 + * @param signedInput Sets the signedInput option. + * + * @param signedInput Whether the quantization is signed or unsigned. (actually this parameter + * should + * have been called **`signed_output`**) + * @return this Options instance. + * @param numBits Sets the numBits option. + * + * @param numBits The bitwidth of the quantization. + * @return this Options instance. + * @param rangeGiven Sets the rangeGiven option. + * + * @param rangeGiven Whether the range is given or should be determined from the `input` tensor. + * @return this Options instance. + * @param roundMode Sets the roundMode option. + * + * @param roundMode The 'round_mode' attribute controls which rounding tie-breaking algorithm is + * used when rounding float values to their quantized equivalents. The following + * rounding modes are currently supported: + *
    + *
  • HALF_TO_EVEN: this is the default round_mode.
  • + *
  • HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 + * rounds up to -7.
  • + *
+ * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange If True, then the absolute value of the quantized minimum value is the + * same as + * the quantized maximum value, instead of 1 greater. + * i.e. for 8 bit quantization, the minimum value is -127 instead of -128. + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis If specified, this axis is treated as a channel or slice axis, and a separate + * quantization range is used for each channel or slice along this axis. + * @return this Options instance. + */ + public fun quantizeAndDequantizeV4( + input: Operand, + inputMin: Operand, + inputMax: Operand, + signedInput: Boolean? = null, + numBits: Long? = null, + rangeGiven: Boolean? = null, + roundMode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): QuantizeAndDequantizeV4 = java.quantizeAndDequantizeV4( + input, + inputMin, + inputMax, + *listOfNotNull( + signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.signedInput(it) }, + numBits?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.numBits(it) }, + rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.rangeGiven(it) }, + roundMode?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.roundMode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.axis(it) } + ).toTypedArray() + ) + + /** + * Returns the gradient of `QuantizeAndDequantizeV4`. + * Returns a gradient of 1 for inputs that are within the quantization range, + * or 0 otherwise. + * + * @param data type for `input_backprop` output + * @param gradients The gradients value + * @param input The input value + * @param inputMin The inputMin value + * @param inputMax The inputMax value + * @param options carries optional attribute values + * @param data type for `QuantizeAndDequantizeV4Grad` output and operands + * @return a new instance of QuantizeAndDequantizeV4Grad + * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4Grad + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + */ + public fun quantizeAndDequantizeV4Grad( + gradients: Operand, + input: Operand, + inputMin: Operand, + inputMax: Operand, + axis: Long? = null + ): QuantizeAndDequantizeV4Grad = java.quantizeAndDequantizeV4Grad( + gradients, + input, + inputMin, + inputMax, + *listOfNotNull( + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4Grad.axis(it) } + ).toTypedArray() + ) + + /** + * Convert the quantized 'input' tensor into a lower-precision 'output', using the + * actual distribution of the values to maximize the usage of the lower bit depth + * and adjusting the output min and max ranges accordingly. + * + * [input_min, input_max] are scalar floats that specify the range for the float + * interpretation of the 'input' data. For example, if input_min is -1.0f and + * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + * + * This operator tries to squeeze as much precision as possible into an output with + * a lower bit depth by calculating the actual min and max values found in the + * data. For example, maybe that quint16 input has no values lower than 16,384 and + * none higher than 49,152. That means only half the range is actually needed, all + * the float interpretations are between -0.5f and 0.5f, so if we want to compress + * the data into a quint8 output, we can use that range rather than the theoretical + * -1.0f to 1.0f that is suggested by the input min and max. + * + * In practice, this is most useful for taking output from operations like + * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + * may have large potential output ranges, but in practice have a distribution of + * input values that only uses a small fraction of the possible range. By feeding + * that output into this operator, we can reduce it from 32 bits down to 8 with + * minimal loss of accuracy. + * + * @param data type for `output` output + * @param input The input value + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @param data type for `QuantizeDownAndShrinkRange` output and operands + * @return a new instance of QuantizeDownAndShrinkRange + * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange + */ + public fun quantizeDownAndShrinkRange( + input: Operand, + inputMin: Operand, + inputMax: Operand, + outType: Class + ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( + input, + inputMin, + inputMax, + outType + ) + + /** + * Concatenates quantized tensors along one dimension. + * + * @param data type for `output` output + * @param concatDim 0-D. The dimension along which to concatenate. Must be in the + * range [0, rank(values)). + * @param values The `N` Tensors to concatenate. Their ranks and types must match, + * and their sizes must match in all dimensions except `concat_dim`. + * @param inputMins The minimum scalar values for each of the input tensors. + * @param inputMaxes The maximum scalar values for each of the input tensors. + * @param data type for `QuantizedConcat` output and operands + * @return a new instance of QuantizedConcat + * @see org.tensorflow.op.QuantizationOps.quantizedConcat + */ + public fun quantizedConcat( + concatDim: Operand, + values: Iterable>, + inputMins: Iterable>, + inputMaxes: Iterable> + ): QuantizedConcat = java.quantizedConcat( + concatDim, + values, + inputMins, + inputMaxes + ) + + /** + * Computes a range that covers the actual values present in a quantized tensor. + * Given a quantized tensor described by `(input, input_min, input_max)`, outputs a + * range that covers the actual values present in that tensor. This op is typically + * used to produce the `requested_output_min` and `requested_output_max` for + * `Requantize`. + * + * @param input The input value + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @return a new instance of RequantizationRange + * @see org.tensorflow.op.QuantizationOps.requantizationRange + */ + public fun requantizationRange( + input: Operand, + inputMin: Operand, + inputMax: Operand + ): RequantizationRange = java.requantizationRange( + input, + inputMin, + inputMax + ) + + /** + * Converts the quantized `input` tensor into a lower-precision `output`. + * Converts the quantized `input` tensor into a lower-precision `output`, using the + * output range specified with `requested_output_min` and `requested_output_max`. + * + * `[input_min, input_max]` are scalar floats that specify the range for the float + * interpretation of the `input` data. For example, if `input_min` is -1.0f and + * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + * + * @param data type for `output` output + * @param input The input value + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @param requestedOutputMin The float value that the minimum quantized output value represents. + * @param requestedOutputMax The float value that the maximum quantized output value represents. + * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @param data type for `Requantize` output and operands + * @return a new instance of Requantize + * @see org.tensorflow.op.QuantizationOps.requantize + */ + public fun requantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + requestedOutputMin: Operand, + requestedOutputMax: Operand, + outType: Class + ): Requantize = java.requantize( + input, + inputMin, + inputMax, + requestedOutputMin, + requestedOutputMax, + outType + ) + + /** + * Dequantize the 'input' tensor into a float or bfloat16 Tensor. + * [min_range, max_range] are scalar floats that specify the range for + * the output. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ + * + * If the input comes from a QuantizedRelu6, the output type is + * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + * Dequantize on quint8 will take each value, cast to float, and multiply + * by 6 / 255. + * Note that if quantizedtype is qint8, the operation will additionally add + * each value by 128 prior to casting. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = range / num_discrete_values + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) + * + * ``` + * + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + + * (narrow_range ? 1 : 0); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); + * + * const float scale_factor = + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * : std::max(min_range / min_expected_T, + * max_range / max_expected_T); + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param minRange The minimum scalar value possibly produced for the input. + * @param maxRange The maximum scalar value possibly produced for the input. + * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. + * If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. + * @param options carries optional attribute values + * @param data type for `Dequantize` output and operands + * @return a new instance of Dequantize + * @see org.tensorflow.op.QuantizationOps.dequantize + * @param mode Sets the mode option. + * + * @param mode the mode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + */ + @JvmName("dequantizeReified") + public inline fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + mode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): Dequantize = dequantize(input, minRange, maxRange, U::class.java, mode, narrowRange, + axis) + + /** + * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. + * [min_range, max_range] are scalar floats that specify the range for + * the 'input' data. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. The + * 'round_mode' attribute controls which rounding tie-breaking algorithm is used + * when rounding float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + * if T == qint8: out[i] -= (range(T) + 1) / 2.0 + * + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ + * + * Assume the input is type float and has a possible range of [0.0, 6.0] and the + * output type is quint8 ([0, 255]). The min_range and max_range values should be + * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + * value of the input by 255/6 and cast to quint8. + * + * If the output type was qint8 ([-128, 127]), the operation will additionally + * subtract each value by 128 prior to casting, so that the range of values aligns + * with the range of qint8. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = num_discrete_values / range + * quantized = round(input * range_scale) - round(range_min * range_scale) + + * numeric_limits::min() + * quantized = max(quantized, numeric_limits::min()) + * quantized = min(quantized, numeric_limits::max()) + * + * ``` + * + * The biggest difference between this and MIN_COMBINED is that the minimum range + * is rounded first, before it's subtracted from the rounded value. With + * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + * and dequantizing will introduce a larger and larger error. + * + * _SCALED mode Example_ + * + * `SCALED` mode matches the quantization approach used in + * `QuantizeAndDequantize{V2|V3`}. + * + * If the mode is `SCALED`, the quantization is performed by multiplying each + * input value by a scaling_factor. + * The scaling_factor is determined from `min_range` and `max_range` to be as large + * as possible such that the range from `min_range` to `max_range` is representable + * within values of type T. + * ``` + * const int min_T = std::numeric_limits::min(); + * const int max_T = std::numeric_limits::max(); + * const float max_float = std::numeric_limits::max(); + * + * const float scale_factor_from_min_side = + * (min_T * min_range > 0) ? min_T / min_range : max_float; + * const float scale_factor_from_max_side = + * (max_T * max_range > 0) ? max_T / max_range : max_float; + * + * const float scale_factor = std::min(scale_factor_from_min_side, + * scale_factor_from_max_side); + * + * ``` + * + * We next use the scale_factor to adjust min_range and max_range as follows: + * ``` + * min_range = min_T / scale_factor; + * max_range = max_T / scale_factor; + * + * ``` + * + * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 + * In this case, min_range would remain -10, but max_range would be adjusted to + * 127 / 12.8 = 9.921875 + * + * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + * + * The input tensor can now be quantized by clipping values to the range + * `min_range` to `max_range`, then multiplying by scale_factor as follows: + * ``` + * result = round(min(max_range, max(min_range, input)) * scale_factor) + * + * ``` + * + * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + * this operation. These outputs should be used as the range for any further + * calculations. + * + * _narrow_range (bool) attribute_ + * + * If true, we do not use the minimum quantized value. + * i.e. for int8 the quantized output, it would be restricted to the range + * -127..127 instead of the full -128..127 range. + * This is provided for compatibility with certain inference backends. + * (Only applies to SCALED mode) + * + * _axis (int) attribute_ + * + * An optional `axis` attribute can specify a dimension index of the input tensor, + * such that quantization ranges will be calculated and applied separately for each + * slice of the tensor along that dimension. This is useful for per-channel + * quantization. + * + * If axis is specified, min_range and max_range + * + * if `axis`=None, per-tensor quantization is performed as normal. + * + * _ensure_minimum_range (float) attribute_ + * + * Ensures the minimum quantization range is at least this value. + * The legacy default value for this is 0.01, but it is strongly suggested to + * set it to 0 for new uses. + * + * @param data type for `output` output + * @param input The input value + * @param minRange The minimum value of the quantization range. This value may be adjusted by + * the + * op depending on other parameters. The adjusted value is written to `output_min`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. + * @param maxRange The maximum value of the quantization range. This value may be adjusted by + * the + * op depending on other parameters. The adjusted value is written to `output_max`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. + * @param T The value of the T attribute + * @param options carries optional attribute values + * @param data type for `QuantizeV2` output and operands + * @return a new instance of Quantize + * @see org.tensorflow.op.QuantizationOps.quantize + * @param mode Sets the mode option. + * + * @param mode the mode option + * @return this Options instance. + * @param roundMode Sets the roundMode option. + * + * @param roundMode the roundMode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + * @param ensureMinimumRange Sets the ensureMinimumRange option. + * + * @param ensureMinimumRange the ensureMinimumRange option + * @return this Options instance. + */ + @JvmName("quantizeReified") + public inline fun quantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + mode: String? = null, + roundMode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null, + ensureMinimumRange: Float? = null + ): Quantize = quantize(input, minRange, maxRange, T::class.java, mode, roundMode, + narrowRange, axis, ensureMinimumRange) + + /** + * Convert the quantized 'input' tensor into a lower-precision 'output', using the + * actual distribution of the values to maximize the usage of the lower bit depth + * and adjusting the output min and max ranges accordingly. + * + * [input_min, input_max] are scalar floats that specify the range for the float + * interpretation of the 'input' data. For example, if input_min is -1.0f and + * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + * + * This operator tries to squeeze as much precision as possible into an output with + * a lower bit depth by calculating the actual min and max values found in the + * data. For example, maybe that quint16 input has no values lower than 16,384 and + * none higher than 49,152. That means only half the range is actually needed, all + * the float interpretations are between -0.5f and 0.5f, so if we want to compress + * the data into a quint8 output, we can use that range rather than the theoretical + * -1.0f to 1.0f that is suggested by the input min and max. + * + * In practice, this is most useful for taking output from operations like + * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + * may have large potential output ranges, but in practice have a distribution of + * input values that only uses a small fraction of the possible range. By feeding + * that output into this operator, we can reduce it from 32 bits down to 8 with + * minimal loss of accuracy. + * + * @param data type for `output` output + * @param input The input value + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @param data type for `QuantizeDownAndShrinkRange` output and operands + * @return a new instance of QuantizeDownAndShrinkRange + * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange + */ + @JvmName("quantizeDownAndShrinkRangeReified") + public inline fun quantizeDownAndShrinkRange( + input: Operand, + inputMin: Operand, + inputMax: Operand + ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange(input, inputMin, inputMax, + U::class.java) + + /** + * Converts the quantized `input` tensor into a lower-precision `output`. + * Converts the quantized `input` tensor into a lower-precision `output`, using the + * output range specified with `requested_output_min` and `requested_output_max`. + * + * `[input_min, input_max]` are scalar floats that specify the range for the float + * interpretation of the `input` data. For example, if `input_min` is -1.0f and + * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + * + * @param data type for `output` output + * @param input The input value + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @param requestedOutputMin The float value that the minimum quantized output value represents. + * @param requestedOutputMax The float value that the maximum quantized output value represents. + * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @param data type for `Requantize` output and operands + * @return a new instance of Requantize + * @see org.tensorflow.op.QuantizationOps.requantize + */ + @JvmName("requantizeReified") + public inline fun requantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + requestedOutputMin: Operand, + requestedOutputMax: Operand + ): Requantize = requantize(input, inputMin, inputMax, requestedOutputMin, + requestedOutputMax, U::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt new file mode 100644 index 00000000000..f3ffaab2af5 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -0,0 +1,88 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.ragged.RaggedBincount +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber + +/** + * An API for building `ragged` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class RaggedOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.RaggedOps = ops.java.ragged + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Counts the number of occurrences of each value in an integer array. + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param data type for `output` output + * @param splits 1D int64 `Tensor`. + * @param values 2D int `Tensor`. + * @param sizeOutput non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights + * equal to 1. + * @param options carries optional attribute values + * @param data type for `RaggedBincount` output and operands + * @param data type for `RaggedBincount` output and operands + * @return a new instance of RaggedBincount + * @see org.tensorflow.op.RaggedOps.raggedBincount + * @param binaryOutput Sets the binaryOutput option. + * + * @param binaryOutput bool; Whether the kernel should count the appearance or number of + * occurrences. + * @return this Options instance. + */ + public fun raggedBincount( + splits: Operand, + values: Operand, + sizeOutput: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): RaggedBincount = java.raggedBincount( + splits, + values, + sizeOutput, + weights, + *listOfNotNull( + binaryOutput?.let{ org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } + ).toTypedArray() + ) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt new file mode 100644 index 00000000000..363f89fc260 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -0,0 +1,1281 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.random.AllCandidateSampler +import org.tensorflow.op.random.LogUniformCandidateSampler +import org.tensorflow.op.random.Multinomial +import org.tensorflow.op.random.ParameterizedTruncatedNormal +import org.tensorflow.op.random.RandomGamma +import org.tensorflow.op.random.RandomPoisson +import org.tensorflow.op.random.RandomShuffle +import org.tensorflow.op.random.RandomStandardNormal +import org.tensorflow.op.random.RandomUniform +import org.tensorflow.op.random.RandomUniformInt +import org.tensorflow.op.random.RecordInput +import org.tensorflow.op.random.StatefulRandomBinomial +import org.tensorflow.op.random.StatefulStandardNormal +import org.tensorflow.op.random.StatelessMultinomial +import org.tensorflow.op.random.StatelessRandomNormal +import org.tensorflow.op.random.StatelessRandomUniform +import org.tensorflow.op.random.StatelessTruncatedNormal +import org.tensorflow.op.random.TruncatedNormal +import org.tensorflow.op.random.UniformCandidateSampler +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `random` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class RandomOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.RandomOps = ops.java.random + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Generates labels for candidate sampling with a learned unigram distribution. + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to produce. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param options carries optional attribute values + * @return a new instance of AllCandidateSampler + * @see org.tensorflow.op.RandomOps.allCandidateSampler + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun allCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + seed: Long? = null, + seed2: Long? = null + ): AllCandidateSampler = java.allCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.AllCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.AllCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + /** + * Generates labels for candidate sampling with a log-uniform distribution. + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attribute values + * @return a new instance of LogUniformCandidateSampler + * @see org.tensorflow.op.RandomOps.logUniformCandidateSampler + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun logUniformCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): LogUniformCandidateSampler = java.logUniformCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + /** + * Draws samples from a multinomial distribution. + * + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param options carries optional attribute values + * @return a new instance of Multinomial, with default output types + * @see org.tensorflow.op.RandomOps.multinomial + */ + public fun multinomial( + logits: Operand, + numSamples: Operand, + options: Array + ): Multinomial = java.multinomial( + logits, + numSamples, + options + ) + + /** + * Draws samples from a multinomial distribution. + * + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param outputDtype The value of the outputDtype attribute + * @param options carries optional attribute values + * @param data type for `Multinomial` output and operands + * @return a new instance of Multinomial + * @see org.tensorflow.op.RandomOps.multinomial + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 is set to be non-zero, the internal random number + * generator is seeded by the given seed. Otherwise, a random seed is used. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun multinomial( + logits: Operand, + numSamples: Operand, + outputDtype: Class, + seed: Long? = null, + seed2: Long? = null + ): Multinomial = java.multinomial( + logits, + numSamples, + outputDtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } + ).toTypedArray() + ) + + /** + * Outputs random values from a normal distribution. The parameters may each be a + * scalar which applies to the entire output, or a vector of length shape[0] which + * stores the parameters for each batch. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. Batches are indexed by the 0th dimension. + * @param means The mean parameter of each batch. + * @param stdevs The standard deviation parameter of each batch. Must be greater than 0. + * @param minvals The minimum cutoff. May be -infinity. + * @param maxvals The maximum cutoff. May be +infinity, and must be more than the minval + * for each batch. + * @param options carries optional attribute values + * @param data type for `ParameterizedTruncatedNormal` output and operands + * @return a new instance of ParameterizedTruncatedNormal + * @see org.tensorflow.op.RandomOps.parameterizedTruncatedNormal + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun parameterizedTruncatedNormal( + shape: Operand, + means: Operand, + stdevs: Operand, + minvals: Operand, + maxvals: Operand, + seed: Long? = null, + seed2: Long? = null + ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( + shape, + means, + stdevs, + minvals, + maxvals, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } + ).toTypedArray() + ) + + /** + * Outputs random values from the Gamma distribution(s) described by alpha. + * This op uses the algorithm by Marsaglia et al. to acquire samples via + * transformation-rejection from pairs of uniform and normal random variables. + * See http://dl.acm.org/citation.cfm?id=358414 + * + * @param data type for `output` output + * @param shape 1-D integer tensor. Shape of independent samples to draw from each + * distribution described by the shape parameters given in alpha. + * @param alpha A tensor in which each scalar is a "shape" parameter describing the + * associated gamma distribution. + * @param options carries optional attribute values + * @param data type for `RandomGamma` output and operands + * @return a new instance of RandomGamma + * @see org.tensorflow.op.RandomOps.randomGamma + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun randomGamma( + shape: Operand, + alpha: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomGamma = java.randomGamma( + shape, + alpha, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomGamma.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomGamma.seed2(it) } + ).toTypedArray() + ) + + /** + * Outputs random values from the Poisson distribution(s) described by rate. + * This op uses two algorithms, depending on rate. If rate >= 10, then + * the algorithm by Hormann is used to acquire samples via + * transformation-rejection. + * See http://www.sciencedirect.com/science/article/pii/0167668793909974. + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * random variables. + * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + * Programming, Volume 2. Addison Wesley + * + * @param data type for `output` output + * @param shape 1-D integer tensor. Shape of independent samples to draw from each + * distribution described by the shape parameters given in rate. + * @param rate A tensor in which each scalar is a "rate" parameter describing the + * associated poisson distribution. + * @param options carries optional attribute values + * @return a new instance of RandomPoisson, with default output types + * @see org.tensorflow.op.RandomOps.randomPoisson + */ + public fun randomPoisson( + shape: Operand, + rate: Operand, + options: Array + ): RandomPoisson = java.randomPoisson( + shape, + rate, + options + ) + + /** + * Outputs random values from the Poisson distribution(s) described by rate. + * This op uses two algorithms, depending on rate. If rate >= 10, then + * the algorithm by Hormann is used to acquire samples via + * transformation-rejection. + * See http://www.sciencedirect.com/science/article/pii/0167668793909974. + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * random variables. + * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + * Programming, Volume 2. Addison Wesley + * + * @param data type for `output` output + * @param shape 1-D integer tensor. Shape of independent samples to draw from each + * distribution described by the shape parameters given in rate. + * @param rate A tensor in which each scalar is a "rate" parameter describing the + * associated poisson distribution. + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `RandomPoissonV2` output and operands + * @return a new instance of RandomPoisson + * @see org.tensorflow.op.RandomOps.randomPoisson + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun randomPoisson( + shape: Operand, + rate: Operand, + dtype: Class, + seed: Long? = null, + seed2: Long? = null + ): RandomPoisson = java.randomPoisson( + shape, + rate, + dtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } + ).toTypedArray() + ) + + /** + * Randomly shuffles a tensor along its first dimension. + * The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + * to one and only one `output[i]`. For example, a mapping that might occur for a + * 3x2 tensor is: + * ``` + * [[1, 2], [[5, 6], + * [3, 4], ==> [1, 2], + * [5, 6]] [3, 4]] + * + * ``` + * + * @param data type for `output` output + * @param value The tensor to be shuffled. + * @param options carries optional attribute values + * @param data type for `RandomShuffle` output and operands + * @return a new instance of RandomShuffle + * @see org.tensorflow.op.RandomOps.randomShuffle + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun randomShuffle( + value: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomShuffle = java.randomShuffle( + value, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomShuffle.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomShuffle.seed2(it) } + ).toTypedArray() + ) + + /** + * Outputs random values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attribute values + * @param data type for `RandomStandardNormal` output and operands + * @return a new instance of RandomStandardNormal + * @see org.tensorflow.op.RandomOps.randomStandardNormal + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun randomStandardNormal( + shape: Operand, + dtype: Class, + seed: Long? = null, + seed2: Long? = null + ): RandomStandardNormal = java.randomStandardNormal( + shape, + dtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomStandardNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomStandardNormal.seed2(it) } + ).toTypedArray() + ) + + /** + * Outputs random values from a uniform distribution. + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attribute values + * @param data type for `RandomUniform` output and operands + * @return a new instance of RandomUniform + * @see org.tensorflow.op.RandomOps.randomUniform + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun randomUniform( + shape: Operand, + dtype: Class, + seed: Long? = null, + seed2: Long? = null + ): RandomUniform = java.randomUniform( + shape, + dtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomUniform.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomUniform.seed2(it) } + ).toTypedArray() + ) + + /** + * Outputs random integers from a uniform distribution. + * The generated values are uniform integers in the range `[minval, maxval)`. + * The lower bound `minval` is included in the range, while the upper bound + * `maxval` is excluded. + * + * The random integers are slightly biased unless `maxval - minval` is an exact + * power of two. The bias is small for values of `maxval - minval` significantly + * smaller than the range of the output (either `2^32` or `2^64`). + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param minval 0-D. Inclusive lower bound on the generated integers. + * @param maxval 0-D. Exclusive upper bound on the generated integers. + * @param options carries optional attribute values + * @param data type for `RandomUniformInt` output and operands + * @return a new instance of RandomUniformInt + * @see org.tensorflow.op.RandomOps.randomUniformInt + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun randomUniformInt( + shape: Operand, + minval: Operand, + maxval: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomUniformInt = java.randomUniformInt( + shape, + minval, + maxval, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomUniformInt.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomUniformInt.seed2(it) } + ).toTypedArray() + ) + + /** + * Emits randomized records. + * + * @param filePattern Glob pattern for the data files. + * @param options carries optional attribute values + * @return a new instance of RecordInput + * @see org.tensorflow.op.RandomOps.recordInput + * @param fileRandomSeed Sets the fileRandomSeed option. + * + * @param fileRandomSeed Random seeds used to produce randomized records. + * @return this Options instance. + * @param fileShuffleShiftRatio Sets the fileShuffleShiftRatio option. + * + * @param fileShuffleShiftRatio Shifts the list of files after the list is randomly + * shuffled. + * @return this Options instance. + * @param fileBufferSize Sets the fileBufferSize option. + * + * @param fileBufferSize The randomization shuffling buffer. + * @return this Options instance. + * @param fileParallelism Sets the fileParallelism option. + * + * @param fileParallelism How many sstables are opened and concurrently iterated over. + * @return this Options instance. + * @param batchSize Sets the batchSize option. + * + * @param batchSize The batch size. + * @return this Options instance. + * @param compressionType Sets the compressionType option. + * + * @param compressionType The type of compression for the file. Currently ZLIB and + * GZIP are supported. Defaults to none. + * @return this Options instance. + */ + public fun recordInput( + filePattern: String, + fileRandomSeed: Long? = null, + fileShuffleShiftRatio: Float? = null, + fileBufferSize: Long? = null, + fileParallelism: Long? = null, + batchSize: Long? = null, + compressionType: String? = null + ): RecordInput = java.recordInput( + filePattern, + *listOfNotNull( + fileRandomSeed?.let{ org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, + fileShuffleShiftRatio?.let{ org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, + fileBufferSize?.let{ org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, + fileParallelism?.let{ org.tensorflow.op.random.RecordInput.fileParallelism(it) }, + batchSize?.let{ org.tensorflow.op.random.RecordInput.batchSize(it) }, + compressionType?.let{ org.tensorflow.op.random.RecordInput.compressionType(it) } + ).toTypedArray() + ) + + /** + * The StatefulRandomBinomial operation + * + * @param data type for `output` output + * @param resource The resource value + * @param algorithm The algorithm value + * @param shape The shape value + * @param counts The counts value + * @param probs The probs value + * @param data type for `StatefulRandomBinomial` output and operands + * @return a new instance of StatefulRandomBinomial, with default output types + * @see org.tensorflow.op.RandomOps.statefulRandomBinomial + */ + public fun statefulRandomBinomial( + resource: Operand, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand + ): StatefulRandomBinomial = java.statefulRandomBinomial( + resource, + algorithm, + shape, + counts, + probs + ) + + /** + * The StatefulRandomBinomial operation + * + * @param data type for `output` output + * @param resource The resource value + * @param algorithm The algorithm value + * @param shape The shape value + * @param counts The counts value + * @param probs The probs value + * @param dtype The value of the dtype attribute + * @param data type for `StatefulRandomBinomial` output and operands + * @param data type for `StatefulRandomBinomial` output and operands + * @return a new instance of StatefulRandomBinomial + * @see org.tensorflow.op.RandomOps.statefulRandomBinomial + */ + public fun statefulRandomBinomial( + resource: Operand, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand, + dtype: Class + ): StatefulRandomBinomial = java.statefulRandomBinomial( + resource, + algorithm, + shape, + counts, + probs, + dtype + ) + + /** + * Outputs random values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + * + * @param data type for `output` output + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @return a new instance of StatefulStandardNormal, with default output types + * @see org.tensorflow.op.RandomOps.statefulStandardNormal + */ + public fun statefulStandardNormal( + resource: Operand, + algorithm: Operand, + shape: Operand + ): StatefulStandardNormal = java.statefulStandardNormal( + resource, + algorithm, + shape + ) + + /** + * Outputs random values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + * + * @param data type for `output` output + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param data type for `StatefulStandardNormalV2` output and operands + * @return a new instance of StatefulStandardNormal + * @see org.tensorflow.op.RandomOps.statefulStandardNormal + */ + public fun statefulStandardNormal( + resource: Operand, + algorithm: Operand, + shape: Operand, + dtype: Class + ): StatefulStandardNormal = java.statefulStandardNormal( + resource, + algorithm, + shape, + dtype + ) + + /** + * Draws samples from a multinomial distribution. + * + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessMultinomial, with default output types + * @see org.tensorflow.op.RandomOps.statelessMultinomial + */ + public fun statelessMultinomial( + logits: Operand, + numSamples: Operand, + seed: Operand + ): StatelessMultinomial = java.statelessMultinomial( + logits, + numSamples, + seed + ) + + /** + * Draws samples from a multinomial distribution. + * + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param seed 2 seeds (shape [2]). + * @param outputDtype The value of the outputDtype attribute + * @param data type for `StatelessMultinomial` output and operands + * @return a new instance of StatelessMultinomial + * @see org.tensorflow.op.RandomOps.statelessMultinomial + */ + public fun statelessMultinomial( + logits: Operand, + numSamples: Operand, + seed: Operand, + outputDtype: Class + ): StatelessMultinomial = java.statelessMultinomial( + logits, + numSamples, + seed, + outputDtype + ) + + /** + * Outputs deterministic pseudorandom values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessRandomNormal, with default output types + * @see org.tensorflow.op.RandomOps.statelessRandomNormal + */ + public fun statelessRandomNormal(shape: Operand, seed: Operand): + StatelessRandomNormal = java.statelessRandomNormal( + shape, + seed + ) + + /** + * Outputs deterministic pseudorandom values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @param data type for `StatelessRandomNormal` output and operands + * @return a new instance of StatelessRandomNormal + * @see org.tensorflow.op.RandomOps.statelessRandomNormal + */ + public fun statelessRandomNormal( + shape: Operand, + seed: Operand, + dtype: Class + ): StatelessRandomNormal = java.statelessRandomNormal( + shape, + seed, + dtype + ) + + /** + * Outputs deterministic pseudorandom random values from a uniform distribution. + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessRandomUniform, with default output types + * @see org.tensorflow.op.RandomOps.statelessRandomUniform + */ + public fun statelessRandomUniform(shape: Operand, seed: Operand): + StatelessRandomUniform = java.statelessRandomUniform( + shape, + seed + ) + + /** + * Outputs deterministic pseudorandom random values from a uniform distribution. + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @param data type for `StatelessRandomUniform` output and operands + * @return a new instance of StatelessRandomUniform + * @see org.tensorflow.op.RandomOps.statelessRandomUniform + */ + public fun statelessRandomUniform( + shape: Operand, + seed: Operand, + dtype: Class + ): StatelessRandomUniform = java.statelessRandomUniform( + shape, + seed, + dtype + ) + + /** + * Outputs deterministic pseudorandom values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessTruncatedNormal, with default output types + * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal + */ + public fun statelessTruncatedNormal(shape: Operand, seed: Operand): + StatelessTruncatedNormal = java.statelessTruncatedNormal( + shape, + seed + ) + + /** + * Outputs deterministic pseudorandom values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @param data type for `StatelessTruncatedNormal` output and operands + * @return a new instance of StatelessTruncatedNormal + * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal + */ + public fun statelessTruncatedNormal( + shape: Operand, + seed: Operand, + dtype: Class + ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + shape, + seed, + dtype + ) + + /** + * Outputs random values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attribute values + * @param data type for `TruncatedNormal` output and operands + * @return a new instance of TruncatedNormal + * @see org.tensorflow.op.RandomOps.truncatedNormal + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + public fun truncatedNormal( + shape: Operand, + dtype: Class, + seed: Long? = null, + seed2: Long? = null + ): TruncatedNormal = java.truncatedNormal( + shape, + dtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.TruncatedNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.TruncatedNormal.seed2(it) } + ).toTypedArray() + ) + + /** + * Generates labels for candidate sampling with a uniform distribution. + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attribute values + * @return a new instance of UniformCandidateSampler + * @see org.tensorflow.op.RandomOps.uniformCandidateSampler + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. + */ + public fun uniformCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): UniformCandidateSampler = java.uniformCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + /** + * Draws samples from a multinomial distribution. + * + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param outputDtype The value of the outputDtype attribute + * @param options carries optional attribute values + * @param data type for `Multinomial` output and operands + * @return a new instance of Multinomial + * @see org.tensorflow.op.RandomOps.multinomial + * @param seed Sets the seed option. + * + * @param seed If either seed or seed2 is set to be non-zero, the internal random number + * generator is seeded by the given seed. Otherwise, a random seed is used. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + @JvmName("multinomialReified") + public inline fun multinomial( + logits: Operand, + numSamples: Operand, + seed: Long? = null, + seed2: Long? = null + ): Multinomial = multinomial(logits, numSamples, U::class.java, seed, seed2) + + /** + * Outputs random values from the Poisson distribution(s) described by rate. + * This op uses two algorithms, depending on rate. If rate >= 10, then + * the algorithm by Hormann is used to acquire samples via + * transformation-rejection. + * See http://www.sciencedirect.com/science/article/pii/0167668793909974. + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * random variables. + * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + * Programming, Volume 2. Addison Wesley + * + * @param data type for `output` output + * @param shape 1-D integer tensor. Shape of independent samples to draw from each + * distribution described by the shape parameters given in rate. + * @param rate A tensor in which each scalar is a "rate" parameter describing the + * associated poisson distribution. + * @param dtype The value of the dtype attribute + * @param options carries optional attribute values + * @param data type for `RandomPoissonV2` output and operands + * @return a new instance of RandomPoisson + * @see org.tensorflow.op.RandomOps.randomPoisson + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + @JvmName("randomPoissonReified") + public inline fun randomPoisson( + shape: Operand, + rate: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomPoisson = randomPoisson(shape, rate, V::class.java, seed, seed2) + + /** + * Outputs random values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attribute values + * @param data type for `RandomStandardNormal` output and operands + * @return a new instance of RandomStandardNormal + * @see org.tensorflow.op.RandomOps.randomStandardNormal + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + @JvmName("randomStandardNormalReified") + public inline fun randomStandardNormal( + shape: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomStandardNormal = randomStandardNormal(shape, U::class.java, seed, seed2) + + /** + * Outputs random values from a uniform distribution. + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attribute values + * @param data type for `RandomUniform` output and operands + * @return a new instance of RandomUniform + * @see org.tensorflow.op.RandomOps.randomUniform + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + @JvmName("randomUniformReified") + public inline fun randomUniform( + shape: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomUniform = randomUniform(shape, U::class.java, seed, seed2) + + /** + * The StatefulRandomBinomial operation + * + * @param data type for `output` output + * @param resource The resource value + * @param algorithm The algorithm value + * @param shape The shape value + * @param counts The counts value + * @param probs The probs value + * @param dtype The value of the dtype attribute + * @param data type for `StatefulRandomBinomial` output and operands + * @param data type for `StatefulRandomBinomial` output and operands + * @return a new instance of StatefulRandomBinomial + * @see org.tensorflow.op.RandomOps.statefulRandomBinomial + */ + @JvmName("statefulRandomBinomialReified") + public inline fun statefulRandomBinomialTyped( + resource: Operand, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand + ): StatefulRandomBinomial = statefulRandomBinomial(resource, algorithm, shape, counts, + probs, V::class.java) + + /** + * Outputs random values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + * + * @param data type for `output` output + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param data type for `StatefulStandardNormalV2` output and operands + * @return a new instance of StatefulStandardNormal + * @see org.tensorflow.op.RandomOps.statefulStandardNormal + */ + @JvmName("statefulStandardNormalReified") + public inline fun statefulStandardNormalTyped( + resource: Operand, + algorithm: Operand, + shape: Operand + ): StatefulStandardNormal = statefulStandardNormal(resource, algorithm, shape, + U::class.java) + + /** + * Draws samples from a multinomial distribution. + * + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param seed 2 seeds (shape [2]). + * @param outputDtype The value of the outputDtype attribute + * @param data type for `StatelessMultinomial` output and operands + * @return a new instance of StatelessMultinomial + * @see org.tensorflow.op.RandomOps.statelessMultinomial + */ + @JvmName("statelessMultinomialReified") + public inline fun statelessMultinomialTyped( + logits: Operand, + numSamples: Operand, + seed: Operand + ): StatelessMultinomial = statelessMultinomial(logits, numSamples, seed, V::class.java) + + /** + * Outputs deterministic pseudorandom values from a normal distribution. + * The generated values will have mean 0 and standard deviation 1. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @param data type for `StatelessRandomNormal` output and operands + * @return a new instance of StatelessRandomNormal + * @see org.tensorflow.op.RandomOps.statelessRandomNormal + */ + @JvmName("statelessRandomNormalReified") + public inline fun statelessRandomNormalTyped(shape: Operand, + seed: Operand): StatelessRandomNormal = statelessRandomNormal(shape, + seed, V::class.java) + + /** + * Outputs deterministic pseudorandom random values from a uniform distribution. + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @param data type for `StatelessRandomUniform` output and operands + * @return a new instance of StatelessRandomUniform + * @see org.tensorflow.op.RandomOps.statelessRandomUniform + */ + @JvmName("statelessRandomUniformReified") + public inline fun statelessRandomUniformTyped(shape: Operand, + seed: Operand): StatelessRandomUniform = + statelessRandomUniform(shape, seed, V::class.java) + + /** + * Outputs deterministic pseudorandom values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @param data type for `StatelessTruncatedNormal` output and operands + * @return a new instance of StatelessTruncatedNormal + * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal + */ + @JvmName("statelessTruncatedNormalReified") + public inline fun statelessTruncatedNormalTyped(shape: Operand, seed: Operand): StatelessTruncatedNormal = + statelessTruncatedNormal(shape, seed, V::class.java) + + /** + * Outputs random values from a truncated normal distribution. + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * @param data type for `output` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attribute values + * @param data type for `TruncatedNormal` output and operands + * @return a new instance of TruncatedNormal + * @see org.tensorflow.op.RandomOps.truncatedNormal + * @param seed Sets the seed option. + * + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + */ + @JvmName("truncatedNormalReified") + public inline fun truncatedNormal( + shape: Operand, + seed: Long? = null, + seed2: Long? = null + ): TruncatedNormal = truncatedNormal(shape, U::class.java, seed, seed2) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt new file mode 100644 index 00000000000..be6f97a6bc6 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -0,0 +1,738 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Int +import kotlin.Long +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.core.Shape +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `shape` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class ShapeOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.ShapeOps = ops.java.shape + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last + * dimension. + * + * @param shape the TensorFlow shape + * @param lastDimension the dimension(s) to append + * @return a 1-dimensional operand containing the dimensions of a shape followed by the last + * dimension + * @see org.tensorflow.op.ShapeOps.append + */ + public fun append(shape: Shape, lastDimension: Long): Operand = java.append( + shape, + lastDimension + ) + + /** + * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last + * dimension. + * + * @param shape the TensorFlow shape + * @param lastDimension the dimension(s) to append + * @return a 1-dimensional operand containing the dimensions of a shape followed by the last + * dimension + * @see org.tensorflow.op.ShapeOps.append + */ + public fun append(shape: Shape, lastDimension: Int): Operand = java.append( + shape, + lastDimension + ) + + /** + * Creates a 1-dimensional operand that represents a new shape containing the dimensions of the + * operand representing a shape, followed by the dimensions of an operand representing a shape + * to + * append. + * + * @param shape the TensorFlow shape + * @param shapeToAppend the other shape to append + * @return a 1-dimensional operand that represents a new shape containing the dimensions of the + * operand representing a shape, followed by the dimensions of an operand representing a + * shape + * to append + * @see org.tensorflow.op.ShapeOps.append + */ + public fun append(shape: Operand, shapeToAppend: Operand): Operand = + java.append( + shape, + shapeToAppend + ) + + /** + * Flatten the operand to 1 dimension. + * + * @param the type of operand + * @param operand the operand to flatten + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.flatten + */ + public fun flatten(operand: Operand): Operand = java.flatten( + operand + ) + + /** + * Flatten the shape to 1 dimension. + * + * @param shape the TensorFlow shape + * @return the flattened shape + * @see org.tensorflow.op.ShapeOps.flatten + */ + public fun flatten(shape: Shape): Operand = java.flatten( + shape + ) + + /** + * Flatten the operand to 1 dimension + * + * @param the type of operand + * @param the shape datatype + * @param operand the operand to flatten + * @param type the shape datatype + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.flatten + */ + public fun flatten(operand: Operand, type: Class): Operand = + java.flatten( + operand, + type + ) + + /** + * Flatten the shape to 1 dimension. + * + * @param the shape datatype + * @param shape the TensorFlow shape + * @param type the shape datatype + * @return the flattened shape + * @see org.tensorflow.op.ShapeOps.flatten + */ + public fun flatten(shape: Shape, type: Class): Operand = + java.flatten( + shape, + type + ) + + /** + * Creates a 1-dimensional Operand containing the Shape's first dimension. + * + * @param shape the TensorFlow shape + * @return a 1-dimensional Operand containing the Shape's first dimension + * @see org.tensorflow.op.ShapeOps.head + */ + public fun head(shape: Shape): Operand = java.head( + shape + ) + + /** + * Creates a 1-dimensional Operand containing the Shape's first dimension. + * + * @param shape the TensorFlow shape + * @param type the shape datatype. + * @param the shape datatype. + * @return a 1-dimensional Operand containing the Shape's first dimension + * @see org.tensorflow.op.ShapeOps.head + */ + public fun head(shape: Shape, type: Class): Operand = java.head( + shape, + type + ) + + /** + * Get the number of dimensions of the shape object. + * + * @param shape the shape + * @return the number of dimensions + * @see org.tensorflow.op.ShapeOps.numDimensions + */ + public fun numDimensions(shape: Shape): Operand = java.numDimensions( + shape + ) + + /** + * Get the number of dimensions of the shape object. + * + * @param the shape datatype + * @param shape the shape + * @param type the shape datatype + * @return the number of dimensions + * @see org.tensorflow.op.ShapeOps.numDimensions + */ + public fun numDimensions(shape: Shape, type: Class): Operand = + java.numDimensions( + shape, + type + ) + + /** + * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of + * the shape. + * + * @param shape the TensorFlow shape + * @param firstDimension the dimension to prepend + * @return a 1-dimensional operand containing the first dimension followed by the dimensions of + * the shape + * @see org.tensorflow.op.ShapeOps.prepend + */ + public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( + shape, + firstDimension + ) + + /** + * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of + * the shape. + * + * @param shape the TensorFlow shape + * @param firstDimension the dimension to prepend + * @return a 1-dimensional operand containing the first dimension followed by the dimensions of + * the shape + * @see org.tensorflow.op.ShapeOps.prepend + */ + public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( + shape, + firstDimension + ) + + /** + * Creates a 1-dimensional operand that represents a new shape containing the dimensions of an + * operand representing the shape to prepend, followed by the dimensions of an operand + * representing a shape. + * + * @param shape an operand containing the dimensions of a shape + * @param shapeToPrepend an operand containing the dimensions of the shape to prepend + * @return a 1-dimensional operand that represents a new shape containing the dimensions of an + * operand representing the shape to prepend, followed by the dimensions of an operand + * representing the shape + * @see org.tensorflow.op.ShapeOps.prepend + */ + public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = + java.prepend( + shape, + shapeToPrepend + ) + + /** + * Reshapes the operand by reducing the shape to the specified axis. + * + * @param the type of Operand + * @param operand the operand + * @param axis the axis + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.reduceDims + */ + public fun reduceDims(operand: Operand, axis: Operand): Operand = + java.reduceDims( + operand, + axis + ) + + /** + * Reduces the shape to the specified axis. + * + * @param shape the TensorFlow shape + * @param axis the axis + * @return an operand containing the dimensions for the reduced shape + * @see org.tensorflow.op.ShapeOps.reduceDims + */ + public fun reduceDims(shape: Shape, axis: Operand): Operand = + java.reduceDims( + shape, + axis + ) + + /** + * Reshapes the operand by reducing the shape to the specified axis. + * + * @param the type of Operand + * @param the shape datatype + * @param operand the operand + * @param axis the axis + * @param type the shape datatype + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.reduceDims + */ + public fun reduceDims( + operand: Operand, + axis: Operand, + type: Class + ): Operand = java.reduceDims( + operand, + axis, + type + ) + + /** + * Reduces the shape to the specified axis. + * + * @param the shape datatype + * @param shape the TensorFlow shape + * @param axis the axis + * @param type the shape datatype + * @return the reduced shape + * @see org.tensorflow.op.ShapeOps.reduceDims + */ + public fun reduceDims( + shape: Shape, + axis: Operand, + type: Class + ): Operand = java.reduceDims( + shape, + axis, + type + ) + + /** + * Get the size represented by the TensorFlow shape. + * + * @param shape the TensorFlow shape + * @return the size + * @see org.tensorflow.op.ShapeOps.size + */ + public fun size(shape: Shape): Operand = java.size( + shape + ) + + /** + * Get the size of the specified dimension for the shape of the tensor. + * + * @param input the operand + * @param dim the dimension + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ + public fun size(input: Operand, dim: Operand): Operand = + java.size( + input, + dim + ) + + /** + * Get the size of the specified dimension in the shape. + * + * @param shape the TensorFlow shape + * @param dim the dimension + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ + public fun size(shape: Shape, dim: Operand): Operand = java.size( + shape, + dim + ) + + /** + * Get the size represented by the TensorFlow shape. + * + * @param the type of the shape + * @param shape the TensorFlow shape + * @param type the shape datatype + * @return the size + * @see org.tensorflow.op.ShapeOps.size + */ + public fun size(shape: Shape, type: Class): Operand = java.size( + shape, + type + ) + + /** + * Get the size of the specified dimension for the shape of the tensor. + * + * @param the shape datatype + * @param input the operand + * @param dim the dimension + * @param type the shape datatype + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ + public fun size( + input: Operand, + dim: Operand, + type: Class + ): Operand = java.size( + input, + dim, + type + ) + + /** + * Get the size of the specified dimension in the shape. + * + * @param the shape datatype + * @param shape the TensorFlow shape + * @param dim the dimension + * @param type the shape datatype + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ + public fun size( + shape: Shape, + dim: Operand, + type: Class + ): Operand = java.size( + shape, + dim, + type + ) + + /** + * Removes dimensions of size 1 from the shape. + * + * @param shape the TensorFlow shape + * @return the squeezed shape + * @see org.tensorflow.op.ShapeOps.squeeze + */ + public fun squeeze(shape: Shape): Operand = java.squeeze( + shape + ) + + /** + * Removes dimensions of size 1 from the shape. + * + * @param the shape datatype. + * @param shape the TensorFlow shape + * @param type the shape datatype. + * @return the squeezed shape + * @see org.tensorflow.op.ShapeOps.squeeze + */ + public fun squeeze(shape: Shape, type: Class): Operand = + java.squeeze( + shape, + type + ) + + /** + * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of + * the + * Shape. + * + * @param shape the TensorFlow shape + * @return a 1-dimensional Operand that contains the dimension matching the last dimension of + * the + * Shape + * @see org.tensorflow.op.ShapeOps.tail + */ + public fun tail(shape: Shape): Operand = java.tail( + shape + ) + + /** + * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * + * the Shape. + * + * @param shape the TensorFlow shape + * @param type the shape datatype. + * @param the shape datatype. + * @return a 1-dimensional Operand that contains the dimension matching the last dimension of + * the + * Shape + * @see org.tensorflow.op.ShapeOps.tail + */ + public fun tail(shape: Shape, type: Class): Operand = java.tail( + shape, + type + ) + + /** + * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the + * shape. + * + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the + * shape + * @see org.tensorflow.op.ShapeOps.take + */ + public fun take(shape: Shape, n: Operand): Operand = java.take( + shape, + n + ) + + /** + * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of + * the + * shape. + * + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param type the shape datatype. + * @param the shape datatype. + * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the + * shape + * @see org.tensorflow.op.ShapeOps.take + */ + public fun take( + shape: Shape, + n: Operand, + type: Class + ): Operand = java.take( + shape, + n, + type + ) + + /** + * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape. + * + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape + * @see org.tensorflow.op.ShapeOps.takeLast + */ + public fun takeLast(shape: Shape, n: Operand): Operand = + java.takeLast( + shape, + n + ) + + /** + * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape. + * + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param type the shape datatype. + * @param the shape datatype. + * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape + * @see org.tensorflow.op.ShapeOps.takeLast + */ + public fun takeLast( + shape: Shape, + n: Operand, + type: Class + ): Operand = java.takeLast( + shape, + n, + type + ) + + /** + * Flatten the operand to 1 dimension + * + * @param the type of operand + * @param the shape datatype + * @param operand the operand to flatten + * @param type the shape datatype + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.flatten + */ + @JvmName("flattenReified") + public inline fun flattenTyped(operand: Operand): Operand + = flatten(operand, U::class.java) + + /** + * Flatten the shape to 1 dimension. + * + * @param the shape datatype + * @param shape the TensorFlow shape + * @param type the shape datatype + * @return the flattened shape + * @see org.tensorflow.op.ShapeOps.flatten + */ + @JvmName("flattenReified") + public inline fun flatten(shape: Shape): Operand = flatten(shape, + U::class.java) + + /** + * Creates a 1-dimensional Operand containing the Shape's first dimension. + * + * @param shape the TensorFlow shape + * @param type the shape datatype. + * @param the shape datatype. + * @return a 1-dimensional Operand containing the Shape's first dimension + * @see org.tensorflow.op.ShapeOps.head + */ + @JvmName("headReified") + public inline fun head(shape: Shape): Operand = head(shape, + U::class.java) + + /** + * Get the number of dimensions of the shape object. + * + * @param the shape datatype + * @param shape the shape + * @param type the shape datatype + * @return the number of dimensions + * @see org.tensorflow.op.ShapeOps.numDimensions + */ + @JvmName("numDimensionsReified") + public inline fun numDimensions(shape: Shape): Operand = + numDimensions(shape, U::class.java) + + /** + * Reshapes the operand by reducing the shape to the specified axis. + * + * @param the type of Operand + * @param the shape datatype + * @param operand the operand + * @param axis the axis + * @param type the shape datatype + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.reduceDims + */ + @JvmName("reduceDimsReified") + public inline fun reduceDims(operand: Operand, + axis: Operand): Operand = reduceDims(operand, axis, U::class.java) + + /** + * Reduces the shape to the specified axis. + * + * @param the shape datatype + * @param shape the TensorFlow shape + * @param axis the axis + * @param type the shape datatype + * @return the reduced shape + * @see org.tensorflow.op.ShapeOps.reduceDims + */ + @JvmName("reduceDimsReified") + public inline fun reduceDims(shape: Shape, axis: Operand): + Operand = reduceDims(shape, axis, U::class.java) + + /** + * Get the size represented by the TensorFlow shape. + * + * @param the type of the shape + * @param shape the TensorFlow shape + * @param type the shape datatype + * @return the size + * @see org.tensorflow.op.ShapeOps.size + */ + @JvmName("sizeReified") + public inline fun size(shape: Shape): Operand = size(shape, + U::class.java) + + /** + * Get the size of the specified dimension for the shape of the tensor. + * + * @param the shape datatype + * @param input the operand + * @param dim the dimension + * @param type the shape datatype + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ + @JvmName("sizeReified") + public inline fun size(input: Operand, dim: Operand): + Operand = size(input, dim, U::class.java) + + /** + * Get the size of the specified dimension in the shape. + * + * @param the shape datatype + * @param shape the TensorFlow shape + * @param dim the dimension + * @param type the shape datatype + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ + @JvmName("sizeReified") + public inline fun size(shape: Shape, dim: Operand): Operand = + size(shape, dim, U::class.java) + + /** + * Removes dimensions of size 1 from the shape. + * + * @param the shape datatype. + * @param shape the TensorFlow shape + * @param type the shape datatype. + * @return the squeezed shape + * @see org.tensorflow.op.ShapeOps.squeeze + */ + @JvmName("squeezeReified") + public inline fun squeeze(shape: Shape): Operand = squeeze(shape, + U::class.java) + + /** + * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * + * the Shape. + * + * @param shape the TensorFlow shape + * @param type the shape datatype. + * @param the shape datatype. + * @return a 1-dimensional Operand that contains the dimension matching the last dimension of + * the + * Shape + * @see org.tensorflow.op.ShapeOps.tail + */ + @JvmName("tailReified") + public inline fun tail(shape: Shape): Operand = tail(shape, + U::class.java) + + /** + * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of + * the + * shape. + * + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param type the shape datatype. + * @param the shape datatype. + * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the + * shape + * @see org.tensorflow.op.ShapeOps.take + */ + @JvmName("takeReified") + public inline fun take(shape: Shape, n: Operand): Operand = + take(shape, n, U::class.java) + + /** + * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape. + * + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param type the shape datatype. + * @param the shape datatype. + * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape + * @see org.tensorflow.op.ShapeOps.takeLast + */ + @JvmName("takeLastReified") + public inline fun takeLast(shape: Shape, n: Operand): Operand = + takeLast(shape, n, U::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt new file mode 100644 index 00000000000..b43902e3dec --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -0,0 +1,667 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.signal.BatchFft +import org.tensorflow.op.signal.BatchFft2d +import org.tensorflow.op.signal.BatchFft3d +import org.tensorflow.op.signal.BatchIfft +import org.tensorflow.op.signal.BatchIfft2d +import org.tensorflow.op.signal.BatchIfft3d +import org.tensorflow.op.signal.Fft +import org.tensorflow.op.signal.Fft2d +import org.tensorflow.op.signal.Fft3d +import org.tensorflow.op.signal.Ifft +import org.tensorflow.op.signal.Ifft2d +import org.tensorflow.op.signal.Ifft3d +import org.tensorflow.op.signal.Irfft +import org.tensorflow.op.signal.Irfft2d +import org.tensorflow.op.signal.Irfft3d +import org.tensorflow.op.signal.Rfft +import org.tensorflow.op.signal.Rfft2d +import org.tensorflow.op.signal.Rfft3d +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `signal` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class SignalOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.SignalOps = ops.java.signal + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * The BatchFFT operation + * + * @param input The input value + * @return a new instance of BatchFft + * @see org.tensorflow.op.SignalOps.batchFft + */ + public fun batchFft(input: Operand): BatchFft = java.batchFft( + input + ) + + /** + * The BatchFFT2D operation + * + * @param input The input value + * @return a new instance of BatchFft2d + * @see org.tensorflow.op.SignalOps.batchFft2d + */ + public fun batchFft2d(input: Operand): BatchFft2d = java.batchFft2d( + input + ) + + /** + * The BatchFFT3D operation + * + * @param input The input value + * @return a new instance of BatchFft3d + * @see org.tensorflow.op.SignalOps.batchFft3d + */ + public fun batchFft3d(input: Operand): BatchFft3d = java.batchFft3d( + input + ) + + /** + * The BatchIFFT operation + * + * @param input The input value + * @return a new instance of BatchIfft + * @see org.tensorflow.op.SignalOps.batchIfft + */ + public fun batchIfft(input: Operand): BatchIfft = java.batchIfft( + input + ) + + /** + * The BatchIFFT2D operation + * + * @param input The input value + * @return a new instance of BatchIfft2d + * @see org.tensorflow.op.SignalOps.batchIfft2d + */ + public fun batchIfft2d(input: Operand): BatchIfft2d = java.batchIfft2d( + input + ) + + /** + * The BatchIFFT3D operation + * + * @param input The input value + * @return a new instance of BatchIfft3d + * @see org.tensorflow.op.SignalOps.batchIfft3d + */ + public fun batchIfft3d(input: Operand): BatchIfft3d = java.batchIfft3d( + input + ) + + /** + * Fast Fourier transform. + * Computes the 1-dimensional discrete Fourier transform over the inner-most + * dimension of `input`. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param data type for `FFT` output and operands + * @return a new instance of Fft + * @see org.tensorflow.op.SignalOps.fft + */ + public fun fft(input: Operand): Fft = java.fft( + input + ) + + /** + * 2D fast Fourier transform. + * Computes the 2-dimensional discrete Fourier transform over the inner-most + * 2 dimensions of `input`. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param data type for `FFT2D` output and operands + * @return a new instance of Fft2d + * @see org.tensorflow.op.SignalOps.fft2d + */ + public fun fft2d(input: Operand): Fft2d = java.fft2d( + input + ) + + /** + * 3D fast Fourier transform. + * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 + * dimensions of `input`. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param data type for `FFT3D` output and operands + * @return a new instance of Fft3d + * @see org.tensorflow.op.SignalOps.fft3d + */ + public fun fft3d(input: Operand): Fft3d = java.fft3d( + input + ) + + /** + * Inverse fast Fourier transform. + * Computes the inverse 1-dimensional discrete Fourier transform over the + * inner-most dimension of `input`. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param data type for `IFFT` output and operands + * @return a new instance of Ifft + * @see org.tensorflow.op.SignalOps.ifft + */ + public fun ifft(input: Operand): Ifft = java.ifft( + input + ) + + /** + * Inverse 2D fast Fourier transform. + * Computes the inverse 2-dimensional discrete Fourier transform over the + * inner-most 2 dimensions of `input`. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param data type for `IFFT2D` output and operands + * @return a new instance of Ifft2d + * @see org.tensorflow.op.SignalOps.ifft2d + */ + public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( + input + ) + + /** + * Inverse 3D fast Fourier transform. + * Computes the inverse 3-dimensional discrete Fourier transform over the + * inner-most 3 dimensions of `input`. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param data type for `IFFT3D` output and operands + * @return a new instance of Ifft3d + * @see org.tensorflow.op.SignalOps.ifft3d + */ + public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( + input + ) + + /** + * Inverse real-valued fast Fourier transform. + * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is + * larger, the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @return a new instance of Irfft, with default output types + * @see org.tensorflow.op.SignalOps.irfft + */ + public fun irfft(input: Operand, fftLength: Operand): Irfft = + java.irfft( + input, + fftLength + ) + + /** + * Inverse real-valued fast Fourier transform. + * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is + * larger, the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param Treal The value of the Treal attribute + * @param data type for `IRFFT` output and operands + * @return a new instance of Irfft + * @see org.tensorflow.op.SignalOps.irfft + */ + public fun irfft( + input: Operand, + fftLength: Operand, + Treal: Class + ): Irfft = java.irfft( + input, + fftLength, + Treal + ) + + /** + * Inverse 2D real-valued fast Fourier transform. + * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @return a new instance of Irfft2d, with default output types + * @see org.tensorflow.op.SignalOps.irfft2d + */ + public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = + java.irfft2d( + input, + fftLength + ) + + /** + * Inverse 2D real-valued fast Fourier transform. + * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param Treal The value of the Treal attribute + * @param data type for `IRFFT2D` output and operands + * @return a new instance of Irfft2d + * @see org.tensorflow.op.SignalOps.irfft2d + */ + public fun irfft2d( + input: Operand, + fftLength: Operand, + Treal: Class + ): Irfft2d = java.irfft2d( + input, + fftLength, + Treal + ) + + /** + * Inverse 3D real-valued fast Fourier transform. + * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @return a new instance of Irfft3d, with default output types + * @see org.tensorflow.op.SignalOps.irfft3d + */ + public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = + java.irfft3d( + input, + fftLength + ) + + /** + * Inverse 3D real-valued fast Fourier transform. + * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param Treal The value of the Treal attribute + * @param data type for `IRFFT3D` output and operands + * @return a new instance of Irfft3d + * @see org.tensorflow.op.SignalOps.irfft3d + */ + public fun irfft3d( + input: Operand, + fftLength: Operand, + Treal: Class + ): Irfft3d = java.irfft3d( + input, + fftLength, + Treal + ) + + /** + * Real-valued fast Fourier transform. + * Computes the 1-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most dimension of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the + * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, + * followed by the `fft_length / 2` positive-frequency terms. + * + * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param Tcomplex The value of the Tcomplex attribute + * @param data type for `RFFT` output and operands + * @return a new instance of Rfft + * @see org.tensorflow.op.SignalOps.rfft + */ + public fun rfft( + input: Operand, + fftLength: Operand, + Tcomplex: Class + ): Rfft = java.rfft( + input, + fftLength, + Tcomplex + ) + + /** + * 2D real-valued fast Fourier transform. + * Computes the 2-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most 2 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * positive-frequency terms. + * + * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param Tcomplex The value of the Tcomplex attribute + * @param data type for `RFFT2D` output and operands + * @return a new instance of Rfft2d + * @see org.tensorflow.op.SignalOps.rfft2d + */ + public fun rfft2d( + input: Operand, + fftLength: Operand, + Tcomplex: Class + ): Rfft2d = java.rfft2d( + input, + fftLength, + Tcomplex + ) + + /** + * 3D real-valued fast Fourier transform. + * Computes the 3-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most 3 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * positive-frequency terms. + * + * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param Tcomplex The value of the Tcomplex attribute + * @param data type for `RFFT3D` output and operands + * @return a new instance of Rfft3d + * @see org.tensorflow.op.SignalOps.rfft3d + */ + public fun rfft3d( + input: Operand, + fftLength: Operand, + Tcomplex: Class + ): Rfft3d = java.rfft3d( + input, + fftLength, + Tcomplex + ) + + /** + * Inverse real-valued fast Fourier transform. + * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is + * larger, the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param Treal The value of the Treal attribute + * @param data type for `IRFFT` output and operands + * @return a new instance of Irfft + * @see org.tensorflow.op.SignalOps.irfft + */ + @JvmName("irfftReified") + public inline fun irfftTyped(input: Operand, + fftLength: Operand): Irfft = irfft(input, fftLength, U::class.java) + + /** + * Inverse 2D real-valued fast Fourier transform. + * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param Treal The value of the Treal attribute + * @param data type for `IRFFT2D` output and operands + * @return a new instance of Irfft2d + * @see org.tensorflow.op.SignalOps.irfft2d + */ + @JvmName("irfft2dReified") + public inline fun irfft2dTyped(input: Operand, + fftLength: Operand): Irfft2d = irfft2d(input, fftLength, U::class.java) + + /** + * Inverse 3D real-valued fast Fourier transform. + * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param Treal The value of the Treal attribute + * @param data type for `IRFFT3D` output and operands + * @return a new instance of Irfft3d + * @see org.tensorflow.op.SignalOps.irfft3d + */ + @JvmName("irfft3dReified") + public inline fun irfft3dTyped(input: Operand, + fftLength: Operand): Irfft3d = irfft3d(input, fftLength, U::class.java) + + /** + * Real-valued fast Fourier transform. + * Computes the 1-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most dimension of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the + * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, + * followed by the `fft_length / 2` positive-frequency terms. + * + * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param Tcomplex The value of the Tcomplex attribute + * @param data type for `RFFT` output and operands + * @return a new instance of Rfft + * @see org.tensorflow.op.SignalOps.rfft + */ + @JvmName("rfftReified") + public inline fun rfft(input: Operand, + fftLength: Operand): Rfft = rfft(input, fftLength, U::class.java) + + /** + * 2D real-valued fast Fourier transform. + * Computes the 2-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most 2 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * positive-frequency terms. + * + * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param Tcomplex The value of the Tcomplex attribute + * @param data type for `RFFT2D` output and operands + * @return a new instance of Rfft2d + * @see org.tensorflow.op.SignalOps.rfft2d + */ + @JvmName("rfft2dReified") + public inline fun rfft2d(input: Operand, + fftLength: Operand): Rfft2d = rfft2d(input, fftLength, U::class.java) + + /** + * 3D real-valued fast Fourier transform. + * Computes the 3-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most 3 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * positive-frequency terms. + * + * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param data type for `output` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param Tcomplex The value of the Tcomplex attribute + * @param data type for `RFFT3D` output and operands + * @return a new instance of Rfft3d + * @see org.tensorflow.op.SignalOps.rfft3d + */ + @JvmName("rfft3dReified") + public inline fun rfft3d(input: Operand, + fftLength: Operand): Rfft3d = rfft3d(input, fftLength, U::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt new file mode 100644 index 00000000000..9a2abc83428 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -0,0 +1,2311 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.sparse.AddManySparseToTensorsMap +import org.tensorflow.op.sparse.AddSparseToTensorsMap +import org.tensorflow.op.sparse.DenseToDenseSetOperation +import org.tensorflow.op.sparse.DenseToSparseSetOperation +import org.tensorflow.op.sparse.DeserializeSparse +import org.tensorflow.op.sparse.SparseAccumulatorApplyGradient +import org.tensorflow.op.sparse.SparseAccumulatorTakeGradient +import org.tensorflow.op.sparse.SparseAdd +import org.tensorflow.op.sparse.SparseAddGrad +import org.tensorflow.op.sparse.SparseBincount +import org.tensorflow.op.sparse.SparseConcat +import org.tensorflow.op.sparse.SparseConditionalAccumulator +import org.tensorflow.op.sparse.SparseCross +import org.tensorflow.op.sparse.SparseCrossHashed +import org.tensorflow.op.sparse.SparseDenseCwiseAdd +import org.tensorflow.op.sparse.SparseDenseCwiseDiv +import org.tensorflow.op.sparse.SparseDenseCwiseMul +import org.tensorflow.op.sparse.SparseFillEmptyRows +import org.tensorflow.op.sparse.SparseFillEmptyRowsGrad +import org.tensorflow.op.sparse.SparseMatMul +import org.tensorflow.op.sparse.SparseReduceMax +import org.tensorflow.op.sparse.SparseReduceMaxSparse +import org.tensorflow.op.sparse.SparseReduceSum +import org.tensorflow.op.sparse.SparseReduceSumSparse +import org.tensorflow.op.sparse.SparseReorder +import org.tensorflow.op.sparse.SparseReshape +import org.tensorflow.op.sparse.SparseSegmentMean +import org.tensorflow.op.sparse.SparseSegmentMeanGrad +import org.tensorflow.op.sparse.SparseSegmentMeanWithNumSegments +import org.tensorflow.op.sparse.SparseSegmentSqrtN +import org.tensorflow.op.sparse.SparseSegmentSqrtNGrad +import org.tensorflow.op.sparse.SparseSegmentSqrtNWithNumSegments +import org.tensorflow.op.sparse.SparseSegmentSum +import org.tensorflow.op.sparse.SparseSegmentSumGrad +import org.tensorflow.op.sparse.SparseSegmentSumWithNumSegments +import org.tensorflow.op.sparse.SparseSlice +import org.tensorflow.op.sparse.SparseSliceGrad +import org.tensorflow.op.sparse.SparseSoftmax +import org.tensorflow.op.sparse.SparseSparseMaximum +import org.tensorflow.op.sparse.SparseSparseMinimum +import org.tensorflow.op.sparse.SparseSplit +import org.tensorflow.op.sparse.SparseTensorDenseAdd +import org.tensorflow.op.sparse.SparseTensorDenseMatMul +import org.tensorflow.op.sparse.SparseToDense +import org.tensorflow.op.sparse.SparseToSparseSetOperation +import org.tensorflow.op.sparse.TakeManySparseFromTensorsMap +import org.tensorflow.types.TBool +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `sparse` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class SparseOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.SparseOps = ops.java.sparse + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. + * A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, + * `sparse_values`, and `sparse_shape`, where + * + * `sparse_indices.shape[1] == sparse_shape.shape[0] == R` + * + * An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` + * having a first `sparse_indices` column taking values between `[0, N)`, where + * the minibatch size `N == sparse_shape[0]`. + * + * The input `SparseTensor` must have rank `R` greater than 1, and the first + * dimension is treated as the minibatch dimension. Elements of the `SparseTensor` + * must be sorted in increasing order of this first dimension. The stored + * `SparseTensor` objects pointed to by each row of the output `sparse_handles` + * will have rank `R-1`. + * + * The `SparseTensor` values can then be read out as part of a minibatch by passing + * the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure + * the correct `SparseTensorsMap` is accessed, ensure that the same + * `container` and `shared_name` are passed to that Op. If no `shared_name` + * is provided here, instead use the _name_ of the Operation created by calling + * `sparse.AddManySparseToTensorsMap` as the `shared_name` passed to + * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. + * + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * `sparse_indices[:, 0]` must be ordered values in `[0, N)`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * The minibatch size `N == sparse_shape[0]`. + * @param options carries optional attribute values + * @return a new instance of AddManySparseToTensorsMap + * @see org.tensorflow.op.SparseOps.addManySparseToTensorsMap + * @param container Sets the container option. + * + * @param container The container name for the `SparseTensorsMap` created by this op. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName The shared name for the `SparseTensorsMap` created by this op. + * If blank, the new Operation's unique name is used. + * @return this Options instance. + */ + public fun addManySparseToTensorsMap( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + container: String? = null, + sharedName: String? = null + ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( + sparseIndices, + sparseValues, + sparseShape, + *listOfNotNull( + container?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } + ).toTypedArray() + ) + + /** + * Add a `SparseTensor` to a `SparseTensorsMap` return its handle. + * A `SparseTensor` is represented by three tensors: `sparse_indices`, + * `sparse_values`, and `sparse_shape`. + * + * This operator takes the given `SparseTensor` and adds it to a container + * object (a `SparseTensorsMap`). A unique key within this container is generated + * in the form of an `int64`, and this is the value that is returned. + * + * The `SparseTensor` can then be read out as part of a minibatch by passing + * the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure + * the correct `SparseTensorsMap` is accessed, ensure that the same + * `container` and `shared_name` are passed to that Op. If no `shared_name` + * is provided here, instead use the _name_ of the Operation created by calling + * `sparse.AddSparseToTensorsMap` as the `shared_name` passed to + * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. + * + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @param options carries optional attribute values + * @return a new instance of AddSparseToTensorsMap + * @see org.tensorflow.op.SparseOps.addSparseToTensorsMap + * @param container Sets the container option. + * + * @param container The container name for the `SparseTensorsMap` created by this op. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName The shared name for the `SparseTensorsMap` created by this op. + * If blank, the new Operation's unique name is used. + * @return this Options instance. + */ + public fun addSparseToTensorsMap( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + container: String? = null, + sharedName: String? = null + ): AddSparseToTensorsMap = java.addSparseToTensorsMap( + sparseIndices, + sparseValues, + sparseShape, + *listOfNotNull( + container?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } + ).toTypedArray() + ) + + /** + * Applies set operation along last dimension of 2 `Tensor` inputs. + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param data type for `result_values` output + * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. + * @param set2 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. + * @param setOperation The value of the setOperation attribute + * @param options carries optional attribute values + * @param data type for `DenseToDenseSetOperation` output and operands + * @return a new instance of DenseToDenseSetOperation + * @see org.tensorflow.op.SparseOps.denseToDenseSetOperation + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. + */ + public fun denseToDenseSetOperation( + set1: Operand, + set2: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): DenseToDenseSetOperation = java.denseToDenseSetOperation( + set1, + set2, + setOperation, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } + ).toTypedArray() + ) + + /** + * Applies set operation along last dimension of `Tensor` and `SparseTensor`. + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + * ignored. + * + * If `validate_indices` is `True`, this op validates the order and range of `set2` + * indices. + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param data type for `result_values` output + * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. + * @param set2Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * order. + * @param set2Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * order. + * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + * be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the + * max set size across `n-1` dimensions. + * @param setOperation The value of the setOperation attribute + * @param options carries optional attribute values + * @param data type for `DenseToSparseSetOperation` output and operands + * @return a new instance of DenseToSparseSetOperation + * @see org.tensorflow.op.SparseOps.denseToSparseSetOperation + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. + */ + public fun denseToSparseSetOperation( + set1: Operand, + set2Indices: Operand, + set2Values: Operand, + set2Shape: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): DenseToSparseSetOperation = java.denseToSparseSetOperation( + set1, + set2Indices, + set2Values, + set2Shape, + setOperation, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } + ).toTypedArray() + ) + + /** + * Deserialize `SparseTensor` objects. + * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where + * the last dimension stores serialized `SparseTensor` objects and the other N + * dimensions (N >= 0) correspond to a batch. The ranks of the original + * `SparseTensor` objects must all match. When the final `SparseTensor` is + * created, its rank is the rank of the incoming `SparseTensor` objects plus N; + * the sparse tensors have been concatenated along new dimensions, one for each + * batch. + * + * The output `SparseTensor` object's shape values for the original dimensions + * are the max across the input `SparseTensor` objects' shape values for the + * corresponding dimensions. The new dimensions match the size of the batch. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final deserialized `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param serializedSparse The serialized `SparseTensor` objects. The last dimension + * must have 3 columns. + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param data type for `DeserializeSparse` output and operands + * @return a new instance of DeserializeSparse + * @see org.tensorflow.op.SparseOps.deserializeSparse + */ + public fun deserializeSparse(serializedSparse: Operand, dtype: Class): + DeserializeSparse = java.deserializeSparse( + serializedSparse, + dtype + ) + + /** + * Applies a sparse gradient to a given accumulator. + * Does not add if local_step is smaller than the accumulator's + * global_step. + * + * @param handle The handle to a accumulator. + * @param localStep The local_step value at which the sparse gradient was computed. + * @param gradientIndices Indices of the sparse gradient to be accumulated. Must be a + * vector. + * @param gradientValues Values are the non-zero slices of the gradient, and must have + * the same first dimension as indices, i.e., the nnz represented by indices and + * values must be consistent. + * @param gradientShape Shape of the sparse gradient to be accumulated. + * @param hasKnownShape Boolean indicating whether gradient_shape is unknown, in which + * case the input is ignored during validation. + * @return a new instance of SparseAccumulatorApplyGradient + * @see org.tensorflow.op.SparseOps.sparseAccumulatorApplyGradient + */ + public fun sparseAccumulatorApplyGradient( + handle: Operand, + localStep: Operand, + gradientIndices: Operand, + gradientValues: Operand, + gradientShape: Operand, + hasKnownShape: Boolean + ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( + handle, + localStep, + gradientIndices, + gradientValues, + gradientShape, + hasKnownShape + ) + + /** + * Extracts the average sparse gradient in a SparseConditionalAccumulator. + * The op will blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it will return its + * average of the accumulated gradients. Also automatically increments + * the recorded global_step in the accumulator by 1, and resets the + * aggregate to 0. + * + * @param data type for `values` output + * @param handle The handle to a SparseConditionalAccumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @param data type for `SparseAccumulatorTakeGradient` output and operands + * @return a new instance of SparseAccumulatorTakeGradient + * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient + */ + public fun sparseAccumulatorTakeGradient( + handle: Operand, + numRequired: Operand, + dtype: Class + ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( + handle, + numRequired, + dtype + ) + + /** + * Adds two `SparseTensor` objects to produce another `SparseTensor`. + * The input `SparseTensor` objects' indices are assumed ordered in standard + * lexicographic order. If this is not the case, before this step run + * `SparseReorder` to restore index ordering. + * + * By default, if two values sum to zero at some index, the output `SparseTensor` + * would still include that particular location in its index, storing a zero in the + * corresponding value slot. To override this, callers can specify `thresh`, + * indicating that if the sum has a magnitude strictly smaller than `thresh`, its + * corresponding value and index would then not be included. In particular, + * `thresh == 0` (default) means everything is kept and actual thresholding happens + * only for a positive value. + * + * In the following shapes, `nnz` is the count after taking `thresh` into account. + * + * @param data type for `sum_values` output + * @param aIndices 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` + * Matrix. + * @param aValues 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. + * @param aShape 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. + * @param bIndices 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` + * Matrix. + * @param bValues 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. + * @param bShape 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. + * @param thresh 0-D. The magnitude threshold that determines if an output value/index + * pair takes space. + * @param data type for `SparseAdd` output and operands + * @return a new instance of SparseAdd + * @see org.tensorflow.op.SparseOps.sparseAdd + */ + public fun sparseAdd( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand, + thresh: Operand + ): SparseAdd = java.sparseAdd( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape, + thresh + ) + + /** + * The gradient operator for the SparseAdd op. + * The SparseAdd op calculates A + B, where A, B, and the sum are all represented + * as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. + * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty + * values of A and B. + * + * @param data type for `a_val_grad` output + * @param backpropValGrad 1-D with shape `[nnz(sum)]`. The gradient with respect to + * the non-empty values of the sum. + * @param aIndices 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. + * @param bIndices 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. + * @param sumIndices 2-D. The `indices` of the sum `SparseTensor`, size + * `[nnz(sum), ndims]`. + * @param data type for `SparseAddGrad` output and operands + * @return a new instance of SparseAddGrad + * @see org.tensorflow.op.SparseOps.sparseAddGrad + */ + public fun sparseAddGrad( + backpropValGrad: Operand, + aIndices: Operand, + bIndices: Operand, + sumIndices: Operand + ): SparseAddGrad = java.sparseAddGrad( + backpropValGrad, + aIndices, + bIndices, + sumIndices + ) + + /** + * Counts the number of occurrences of each value in an integer array. + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param data type for `output` output + * @param indices 2D int64 `Tensor`. + * @param values 1D int `Tensor`. + * @param denseShape 1D int64 `Tensor`. + * @param sizeOutput non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights + * equal to 1. + * @param options carries optional attribute values + * @param data type for `SparseBincount` output and operands + * @param data type for `SparseBincount` output and operands + * @return a new instance of SparseBincount + * @see org.tensorflow.op.SparseOps.sparseBincount + * @param binaryOutput Sets the binaryOutput option. + * + * @param binaryOutput bool; Whether the kernel should count the appearance or number of + * occurrences. + * @return this Options instance. + */ + public fun sparseBincount( + indices: Operand, + values: Operand, + denseShape: Operand, + sizeOutput: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): SparseBincount = java.sparseBincount( + indices, + values, + denseShape, + sizeOutput, + weights, + *listOfNotNull( + binaryOutput?.let{ org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } + ).toTypedArray() + ) + + /** + * Concatenates a list of `SparseTensor` along the specified dimension. + * Concatenation is with respect to the dense versions of these sparse tensors. + * It is assumed that each input is a `SparseTensor` whose elements are ordered + * along increasing dimension number. + * + * All inputs' shapes must match, except for the concat dimension. The + * `indices`, `values`, and `shapes` lists must have the same length. + * + * The output shape is identical to the inputs', except along the concat + * dimension, where it is the sum of the inputs' sizes along that dimension. + * + * The output elements will be resorted to preserve the sort order along + * increasing dimension number. + * + * This op runs in `O(M log M)` time, where `M` is the total number of non-empty + * values across all inputs. This is due to the need for an internal sort in + * order to concatenate efficiently across an arbitrary dimension. + * + * For example, if `concat_dim = 1` and the inputs are + * ``` + * sp_inputs[0]: shape = [2, 3] + * [0, 2]: "a" + * [1, 0]: "b" + * [1, 1]: "c" + * + * sp_inputs[1]: shape = [2, 4] + * [0, 1]: "d" + * [0, 2]: "e" + * + * ``` + * + * then the output will be + * ``` + * shape = [2, 7] + * [0, 2]: "a" + * [0, 4]: "d" + * [0, 5]: "e" + * [1, 0]: "b" + * [1, 1]: "c" + * + * ``` + * + * Graphically this is equivalent to doing + * ``` + * [ a] concat [ d e ] = [ a d e ] + * [b c ] [ ] [b c ] + * + * ``` + * + * @param data type for `output_values` output + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. Non-empty values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param concatDim Dimension to concatenate along. Must be in range [-rank, rank), + * where rank is the number of dimensions in each input `SparseTensor`. + * @param data type for `SparseConcat` output and operands + * @return a new instance of SparseConcat + * @see org.tensorflow.op.SparseOps.sparseConcat + */ + public fun sparseConcat( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + concatDim: Long + ): SparseConcat = java.sparseConcat( + indices, + values, + shapes, + concatDim + ) + + /** + * A conditional accumulator for aggregating sparse gradients. + * The accumulator accepts gradients marked with local_step greater or + * equal to the most recent global_step known to the accumulator. The + * average can be extracted from the accumulator, provided sufficient + * gradients have been accumulated. Extracting the average automatically + * resets the aggregate to 0, and increments the global_step recorded by + * the accumulator. + * + * @param dtype The type of the value being accumulated. + * @param shape The shape of the values. + * @param options carries optional attribute values + * @param data type for `SparseConditionalAccumulator` output and operands + * @return a new instance of SparseConditionalAccumulator + * @see org.tensorflow.op.SparseOps.sparseConditionalAccumulator + * @param container Sets the container option. + * + * @param container If non-empty, this accumulator is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this accumulator will be shared under the given name + * across multiple sessions. + * @return this Options instance. + * @param reductionType Sets the reductionType option. + * + * @param reductionType the reductionType option + * @return this Options instance. + */ + public fun sparseConditionalAccumulator( + dtype: Class, + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( + dtype, + shape, + *listOfNotNull( + container?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, + reductionType?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } + ).toTypedArray() + ) + + /** + * Generates sparse cross from a list of sparse and dense tensors. + * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each + * representing features of one feature column. It outputs a 2D `SparseTensor` with + * the batchwise crosses of these features. + * + * For example, if the inputs are + * ``` + * inputs[0]: SparseTensor with shape = [2, 2] + * [0, 0]: "a" + * [1, 0]: "b" + * [1, 1]: "c" + * + * inputs[1]: SparseTensor with shape = [2, 1] + * [0, 0]: "d" + * [1, 0]: "e" + * + * inputs[2]: Tensor [["f"], ["g"]] + * + * ``` + * + * then the output will be + * ``` + * shape = [2, 2] + * [0, 0]: "a_X_d_X_f" + * [1, 0]: "b_X_e_X_g" + * [1, 1]: "c_X_e_X_g" + * + * ``` + * + * if hashed_output=true then the output will be + * ``` + * shape = [2, 2] + * [0, 0]: FingerprintCat64( + * Fingerprint64("f"), FingerprintCat64( + * Fingerprint64("d"), Fingerprint64("a"))) + * [1, 0]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("b"))) + * [1, 1]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("c"))) + * + * ``` + * + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param denseInputs 2-D. Columns represented by dense `Tensor`. + * @param sep string used when joining a list of string inputs, can be used as separator later. + * @return a new instance of SparseCross + * @see org.tensorflow.op.SparseOps.sparseCross + */ + public fun sparseCross( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + denseInputs: Iterable>, + sep: Operand + ): SparseCross = java.sparseCross( + indices, + values, + shapes, + denseInputs, + sep + ) + + /** + * Generates sparse cross from a list of sparse and dense tensors. + * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each + * representing features of one feature column. It outputs a 2D `SparseTensor` with + * the batchwise crosses of these features. + * + * For example, if the inputs are + * ``` + * inputs[0]: SparseTensor with shape = [2, 2] + * [0, 0]: "a" + * [1, 0]: "b" + * [1, 1]: "c" + * + * inputs[1]: SparseTensor with shape = [2, 1] + * [0, 0]: "d" + * [1, 0]: "e" + * + * inputs[2]: Tensor [["f"], ["g"]] + * + * ``` + * + * then the output will be + * ``` + * shape = [2, 2] + * [0, 0]: "a_X_d_X_f" + * [1, 0]: "b_X_e_X_g" + * [1, 1]: "c_X_e_X_g" + * + * ``` + * + * if hashed_output=true then the output will be + * ``` + * shape = [2, 2] + * [0, 0]: FingerprintCat64( + * Fingerprint64("f"), FingerprintCat64( + * Fingerprint64("d"), Fingerprint64("a"))) + * [1, 0]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("b"))) + * [1, 1]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("c"))) + * + * ``` + * + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param denseInputs 2-D. Columns represented by dense `Tensor`. + * @param numBuckets It is used if hashed_output is true. + * output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. + * @param strongHash boolean, if true, siphash with salt will be used instead of farmhash. + * @param salt Specify the salt that will be used by the siphash function. + * @return a new instance of SparseCrossHashed + * @see org.tensorflow.op.SparseOps.sparseCrossHashed + */ + public fun sparseCrossHashed( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + denseInputs: Iterable>, + numBuckets: Operand, + strongHash: Operand, + salt: Operand + ): SparseCrossHashed = java.sparseCrossHashed( + indices, + values, + shapes, + denseInputs, + numBuckets, + strongHash, + salt + ) + + /** + * Adds up a SparseTensor and a dense Tensor, using these special rules: + * (1) Broadcasts the dense side to have the same shape as the sparse side, if + * eligible; + * (2) Then, only the dense values pointed to by the indices of the SparseTensor + * participate in the cwise addition. + * + * By these rules, the result is a logical SparseTensor with exactly the same + * indices and shape, but possibly with different non-zero values. The output of + * this Op is the resultant non-zero values. + * + * @param data type for `output` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spShape 1-D. Shape of the input SparseTensor. + * @param dense `R`-D. The dense Tensor operand. + * @param data type for `SparseDenseCwiseAdd` output and operands + * @return a new instance of SparseDenseCwiseAdd + * @see org.tensorflow.op.SparseOps.sparseDenseCwiseAdd + */ + public fun sparseDenseCwiseAdd( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( + spIndices, + spValues, + spShape, + dense + ) + + /** + * Component-wise divides a SparseTensor by a dense Tensor. + * _Limitation_: this Op only broadcasts the dense side to the sparse side, but not + * the other direction. + * + * @param data type for `output` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spShape 1-D. Shape of the input SparseTensor. + * @param dense `R`-D. The dense Tensor operand. + * @param data type for `SparseDenseCwiseDiv` output and operands + * @return a new instance of SparseDenseCwiseDiv + * @see org.tensorflow.op.SparseOps.sparseDenseCwiseDiv + */ + public fun sparseDenseCwiseDiv( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( + spIndices, + spValues, + spShape, + dense + ) + + /** + * Component-wise multiplies a SparseTensor by a dense Tensor. + * The output locations corresponding to the implicitly zero elements in the sparse + * tensor will be zero (i.e., will not take up storage space), regardless of the + * contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). + * + * _Limitation_: this Op only broadcasts the dense side to the sparse side, but not + * the other direction. + * + * @param data type for `output` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spShape 1-D. Shape of the input SparseTensor. + * @param dense `R`-D. The dense Tensor operand. + * @param data type for `SparseDenseCwiseMul` output and operands + * @return a new instance of SparseDenseCwiseMul + * @see org.tensorflow.op.SparseOps.sparseDenseCwiseMul + */ + public fun sparseDenseCwiseMul( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( + spIndices, + spValues, + spShape, + dense + ) + + /** + * Fills empty rows in the input 2-D `SparseTensor` with a default value. + * The input `SparseTensor` is represented via the tuple of inputs + * (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the + * same `dense_shape` but with indices `output_indices` and values + * `output_values`. + * + * This op inserts a single entry for every row that doesn't have any values. + * The index is created as `[row, 0, ..., 0]` and the inserted value + * is `default_value`. + * + * For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: + * ``` + * [0, 1]: a + * [0, 3]: b + * [2, 0]: c + * [3, 1]: d + * + * ``` + * + * Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: + * ``` + * [0, 1]: a + * [0, 3]: b + * [1, 0]: default_value + * [2, 0]: c + * [3, 1]: d + * [4, 0]: default_value + * + * ``` + * + * The output `SparseTensor` will be in row-major order and will have the + * same shape as the input. + * + * This op also returns an indicator vector shaped `[dense_shape[0]]` such that + * ``` + * empty_row_indicator[i] = True iff row i was an empty row. + * + * ``` + * + * And a reverse index map vector shaped `[indices.shape[0]]` that is used during + * backpropagation, + * ``` + * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] + * + * ``` + * + * @param data type for `output_values` output + * @param indices 2-D. the indices of the sparse tensor. + * @param values 1-D. the values of the sparse tensor. + * @param denseShape 1-D. the shape of the sparse tensor. + * @param defaultValue 0-D. default value to insert into location `[row, 0, ..., 0]` + * for rows missing from the input sparse tensor. + * output indices: 2-D. the indices of the filled sparse tensor. + * @param data type for `SparseFillEmptyRows` output and operands + * @return a new instance of SparseFillEmptyRows + * @see org.tensorflow.op.SparseOps.sparseFillEmptyRows + */ + public fun sparseFillEmptyRows( + indices: Operand, + values: Operand, + denseShape: Operand, + defaultValue: Operand + ): SparseFillEmptyRows = java.sparseFillEmptyRows( + indices, + values, + denseShape, + defaultValue + ) + + /** + * The gradient of SparseFillEmptyRows. + * Takes vectors reverse_index_map, shaped `[N]`, and grad_values, + * shaped `[N_full]`, where `N_full >= N` and copies data into either + * `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and + * `d_default_value` is a scalar. + * + * d_values[j] = grad_values[reverse_index_map[j]] + * d_default_value = sum_{k : 0 .. N_full - 1} ( + * grad_values[k] * 1{k not in reverse_index_map}) + * + * @param data type for `d_values` output + * @param reverseIndexMap 1-D. The reverse index map from SparseFillEmptyRows. + * @param gradValues 1-D. The gradients from backprop. + * @param data type for `SparseFillEmptyRowsGrad` output and operands + * @return a new instance of SparseFillEmptyRowsGrad + * @see org.tensorflow.op.SparseOps.sparseFillEmptyRowsGrad + */ + public fun sparseFillEmptyRowsGrad(reverseIndexMap: Operand, + gradValues: Operand): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( + reverseIndexMap, + gradValues + ) + + /** + * Multiply matrix "a" by matrix "b". + * The inputs must be two-dimensional matrices and the inner dimension of "a" must + * match the outer dimension of "b". Both "a" and "b" must be + * `Tensor`s not + * `SparseTensor`s. This op is optimized for the case where at least one of "a" or + * "b" is sparse, in the sense that they have a large proportion of zero values. + * The breakeven for using this versus a dense matrix multiply on one platform was + * 30% zero values in the sparse matrix. + * + * The gradient computation of this operation will only take advantage of sparsity + * in the input gradient when that gradient comes from a Relu. + * + * @param a The a value + * @param b The b value + * @param options carries optional attribute values + * @return a new instance of SparseMatMul + * @see org.tensorflow.op.SparseOps.sparseMatMul + * @param transposeA Sets the transposeA option. + * + * @param transposeA the transposeA option + * @return this Options instance. + * @param transposeB Sets the transposeB option. + * + * @param transposeB the transposeB option + * @return this Options instance. + * @param aIsSparse Sets the aIsSparse option. + * + * @param aIsSparse the aIsSparse option + * @return this Options instance. + * @param bIsSparse Sets the bIsSparse option. + * + * @param bIsSparse the bIsSparse option + * @return this Options instance. + */ + public fun sparseMatMul( + a: Operand, + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null, + aIsSparse: Boolean? = null, + bIsSparse: Boolean? = null + ): SparseMatMul = java.sparseMatMul( + a, + b, + *listOfNotNull( + transposeA?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, + aIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, + bIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } + ).toTypedArray() + ) + + /** + * Computes the max of elements across dimensions of a SparseTensor. + * This Op takes a SparseTensor and is the sparse counterpart to + * `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` + * instead of a sparse one. + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * with length 1. + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * with a single element is returned. Additionally, the axes can be negative, + * which are interpreted according to the indexing rules in Python. + * + * @param data type for `output` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. + * @param options carries optional attribute values + * @param data type for `SparseReduceMax` output and operands + * @return a new instance of SparseReduceMax + * @see org.tensorflow.op.SparseOps.sparseReduceMax + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun sparseReduceMax( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceMax = java.sparseReduceMax( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the max of elements across dimensions of a SparseTensor. + * This Op takes a SparseTensor and is the sparse counterpart to + * `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a + * SparseTensor. + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * with length 1. + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * with a single element is returned. Additionally, the axes can be negative, + * which are interpreted according to the indexing rules in Python. + * + * @param data type for `output_values` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. + * @param options carries optional attribute values + * @param data type for `SparseReduceMaxSparse` output and operands + * @return a new instance of SparseReduceMaxSparse + * @see org.tensorflow.op.SparseOps.sparseReduceMaxSparse + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun sparseReduceMaxSparse( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the sum of elements across dimensions of a SparseTensor. + * This Op takes a SparseTensor and is the sparse counterpart to + * `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` + * instead of a sparse one. + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * with length 1. + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * with a single element is returned. Additionally, the axes can be negative, + * which are interpreted according to the indexing rules in Python. + * + * @param data type for `output` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. + * @param options carries optional attribute values + * @param data type for `SparseReduceSum` output and operands + * @return a new instance of SparseReduceSum + * @see org.tensorflow.op.SparseOps.sparseReduceSum + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun sparseReduceSum( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceSum = java.sparseReduceSum( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } + ).toTypedArray() + ) + + /** + * Computes the sum of elements across dimensions of a SparseTensor. + * This Op takes a SparseTensor and is the sparse counterpart to + * `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a + * SparseTensor. + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * with length 1. + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * with a single element is returned. Additionally, the axes can be negative, + * which are interpreted according to the indexing rules in Python. + * + * @param data type for `output_values` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. + * @param options carries optional attribute values + * @param data type for `SparseReduceSumSparse` output and operands + * @return a new instance of SparseReduceSumSparse + * @see org.tensorflow.op.SparseOps.sparseReduceSumSparse + * @param keepDims Sets the keepDims option. + * + * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. + */ + public fun sparseReduceSumSparse( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceSumSparse = java.sparseReduceSumSparse( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } + ).toTypedArray() + ) + + /** + * Reorders a SparseTensor into the canonical, row-major ordering. + * Note that by convention, all sparse ops preserve the canonical ordering along + * increasing dimension number. The only time ordering can be violated is during + * manual manipulation of the indices and values vectors to add entries. + * + * Reordering does not affect the shape of the SparseTensor. + * + * If the tensor has rank `R` and `N` non-empty values, `input_indices` has + * shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. + * + * @param data type for `output_values` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param data type for `SparseReorder` output and operands + * @return a new instance of SparseReorder + * @see org.tensorflow.op.SparseOps.sparseReorder + */ + public fun sparseReorder( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand + ): SparseReorder = java.sparseReorder( + inputIndices, + inputValues, + inputShape + ) + + /** + * Reshapes a SparseTensor to represent values in a new dense shape. + * This operation has the same semantics as reshape on the represented dense + * tensor. The `input_indices` are recomputed based on the requested `new_shape`. + * + * If one component of `new_shape` is the special value -1, the size of that + * dimension is computed so that the total dense size remains constant. At + * most one component of `new_shape` can be -1. The number of dense elements + * implied by `new_shape` must be the same as the number of dense elements + * originally implied by `input_shape`. + * + * Reshaping does not affect the order of values in the SparseTensor. + * + * If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` + * has length `R_out`, then `input_indices` has shape `[N, R_in]`, + * `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and + * `output_shape` has length `R_out`. + * + * @param inputIndices 2-D. `N x R_in` matrix with the indices of non-empty values in a + * SparseTensor. + * @param inputShape 1-D. `R_in` vector with the input SparseTensor's dense shape. + * @param newShape 1-D. `R_out` vector with the requested new dense shape. + * @return a new instance of SparseReshape + * @see org.tensorflow.op.SparseOps.sparseReshape + */ + public fun sparseReshape( + inputIndices: Operand, + inputShape: Operand, + newShape: Operand + ): SparseReshape = java.sparseReshape( + inputIndices, + inputShape, + newShape + ) + + /** + * Computes the mean along sparse segments of a tensor. + * See `tf.sparse.segment_sum` for usage examples. + * + * Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first + * dimension, selecting a subset of dimension 0, specified by `indices`. + * + * @param data type for `output` output + * @param data The data value + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param data type for `SparseSegmentMean` output and operands + * @return a new instance of SparseSegmentMean + * @see org.tensorflow.op.SparseOps.sparseSegmentMean + */ + public fun sparseSegmentMean( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentMean = java.sparseSegmentMean( + data, + indices, + segmentIds + ) + + /** + * Computes gradients for SparseSegmentMean. + * Returns tensor "output" with same shape as grad, except for dimension 0 whose + * value is output_dim0. + * + * @param data type for `output` output + * @param grad gradient propagated to the SparseSegmentMean op. + * @param indices indices passed to the corresponding SparseSegmentMean op. + * @param segmentIds segment_ids passed to the corresponding SparseSegmentMean op. + * @param outputDim0 dimension 0 of "data" passed to SparseSegmentMean op. + * @param data type for `SparseSegmentMeanGrad` output and operands + * @return a new instance of SparseSegmentMeanGrad + * @see org.tensorflow.op.SparseOps.sparseSegmentMeanGrad + */ + public fun sparseSegmentMeanGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( + grad, + indices, + segmentIds, + outputDim0 + ) + + /** + * Computes the mean along sparse segments of a tensor. + * Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * @param data type for `output` output + * @param data The data value + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param numSegments Should equal the number of distinct segment IDs. + * @param data type for `SparseSegmentMeanWithNumSegments` output and operands + * @return a new instance of SparseSegmentMeanWithNumSegments + * @see org.tensorflow.op.SparseOps.sparseSegmentMeanWithNumSegments + */ + public fun sparseSegmentMeanWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + /** + * Computes the sum along sparse segments of a tensor divided by the sqrt of N. + * N is the size of the segment being reduced. + * + * See `tf.sparse.segment_sum` for usage examples. + * + * @param data type for `output` output + * @param data The data value + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param data type for `SparseSegmentSqrtN` output and operands + * @return a new instance of SparseSegmentSqrtN + * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtN + */ + public fun sparseSegmentSqrtN( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( + data, + indices, + segmentIds + ) + + /** + * Computes gradients for SparseSegmentSqrtN. + * Returns tensor "output" with same shape as grad, except for dimension 0 whose + * value is output_dim0. + * + * @param data type for `output` output + * @param grad gradient propagated to the SparseSegmentSqrtN op. + * @param indices indices passed to the corresponding SparseSegmentSqrtN op. + * @param segmentIds segment_ids passed to the corresponding SparseSegmentSqrtN op. + * @param outputDim0 dimension 0 of "data" passed to SparseSegmentSqrtN op. + * @param data type for `SparseSegmentSqrtNGrad` output and operands + * @return a new instance of SparseSegmentSqrtNGrad + * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNGrad + */ + public fun sparseSegmentSqrtNGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( + grad, + indices, + segmentIds, + outputDim0 + ) + + /** + * Computes the sum along sparse segments of a tensor divided by the sqrt of N. + * N is the size of the segment being reduced. + * + * Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * @param data type for `output` output + * @param data The data value + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param numSegments Should equal the number of distinct segment IDs. + * @param data type for `SparseSegmentSqrtNWithNumSegments` output and operands + * @return a new instance of SparseSegmentSqrtNWithNumSegments + * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNWithNumSegments + */ + public fun sparseSegmentSqrtNWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + /** + * Computes the sum along sparse segments of a tensor. + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first + * dimension, selecting a subset of dimension 0, specified by `indices`. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + * + * # Select two rows, one segment. + * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + * # => [[0 0 0 0]] + * + * # Select two rows, two segment. + * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + * # => [[ 1 2 3 4] + * # [-1 -2 -3 -4]] + * + * # Select all rows, two segments. + * tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + * # => [[0 0 0 0] + * # [5 6 7 8]] + * + * # Which is equivalent to: + * tf.segment_sum(c, tf.constant([0, 0, 1])) + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param data type for `SparseSegmentSum` output and operands + * @return a new instance of SparseSegmentSum + * @see org.tensorflow.op.SparseOps.sparseSegmentSum + */ + public fun sparseSegmentSum( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentSum = java.sparseSegmentSum( + data, + indices, + segmentIds + ) + + /** + * Computes gradients for SparseSegmentSum. + * Returns tensor "output" with same shape as grad, except for dimension 0 whose + * value is output_dim0. + * + * @param data type for `output` output + * @param grad gradient propagated to the SparseSegmentSum op. + * @param indices indices passed to the corresponding SparseSegmentSum op. + * @param segmentIds segment_ids passed to the corresponding SparseSegmentSum op. + * @param outputDim0 dimension 0 of "data" passed to SparseSegmentSum op. + * @param data type for `SparseSegmentSumGrad` output and operands + * @return a new instance of SparseSegmentSumGrad + * @see org.tensorflow.op.SparseOps.sparseSegmentSumGrad + */ + public fun sparseSegmentSumGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentSumGrad = java.sparseSegmentSumGrad( + grad, + indices, + segmentIds, + outputDim0 + ) + + /** + * Computes the sum along sparse segments of a tensor. + * Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) + * for an explanation of segments. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + * + * tf.sparse_segment_sum_with_num_segments( + * c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + * # => [[0 0 0 0] + * # [0 0 0 0] + * # [0 0 0 0]] + * + * tf.sparse_segment_sum_with_num_segments(c, + * tf.constant([0, 1]), + * tf.constant([0, 2], + * num_segments=4)) + * # => [[ 1 2 3 4] + * # [ 0 0 0 0] + * # [-1 -2 -3 -4] + * # [ 0 0 0 0]] + * + * ``` + * + * @param data type for `output` output + * @param data The data value + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param numSegments Should equal the number of distinct segment IDs. + * @param data type for `SparseSegmentSumWithNumSegments` output and operands + * @return a new instance of SparseSegmentSumWithNumSegments + * @see org.tensorflow.op.SparseOps.sparseSegmentSumWithNumSegments + */ + public fun sparseSegmentSumWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + /** + * Slice a `SparseTensor` based on the `start` and `size`. + * For example, if the input is + * ``` + * input_tensor = shape = [2, 7] + * [ a d e ] + * [b c ] + * + * ``` + * + * Graphically the output tensors are: + * ``` + * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] + * [ a ] + * [b c ] + * + * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] + * [ d e ] + * [ ] + * + * ``` + * + * @param data type for `output_values` output + * @param indices 2-D tensor represents the indices of the sparse tensor. + * @param values 1-D tensor represents the values of the sparse tensor. + * @param shape 1-D. tensor represents the shape of the sparse tensor. + * @param start 1-D. tensor represents the start of the slice. + * @param sizeOutput 1-D. tensor represents the size of the slice. + * output indices: A list of 1-D tensors represents the indices of the output + * sparse tensors. + * @param data type for `SparseSlice` output and operands + * @return a new instance of SparseSlice + * @see org.tensorflow.op.SparseOps.sparseSlice + */ + public fun sparseSlice( + indices: Operand, + values: Operand, + shape: Operand, + start: Operand, + sizeOutput: Operand + ): SparseSlice = java.sparseSlice( + indices, + values, + shape, + start, + sizeOutput + ) + + /** + * The gradient operator for the SparseSlice op. + * This op takes in the upstream gradient w.r.t. non-empty values of + * the sliced `SparseTensor`, and outputs the gradients w.r.t. + * the non-empty values of input `SparseTensor`. + * + * @param data type for `val_grad` output + * @param backpropValGrad 1-D. The gradient with respect to + * the non-empty values of the sliced `SparseTensor`. + * @param inputIndices 2-D. The `indices` of the input `SparseTensor`. + * @param inputStart 1-D. tensor represents the start of the slice. + * @param outputIndices 2-D. The `indices` of the sliced `SparseTensor`. + * @param data type for `SparseSliceGrad` output and operands + * @return a new instance of SparseSliceGrad + * @see org.tensorflow.op.SparseOps.sparseSliceGrad + */ + public fun sparseSliceGrad( + backpropValGrad: Operand, + inputIndices: Operand, + inputStart: Operand, + outputIndices: Operand + ): SparseSliceGrad = java.sparseSliceGrad( + backpropValGrad, + inputIndices, + inputStart, + outputIndices + ) + + /** + * Applies softmax to a batched N-D `SparseTensor`. + * The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` + * (where `N >= 2`), and with indices sorted in the canonical lexicographic order. + * + * This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost + * logical submatrix with shape `[B, C]`, but with the catch that _the implicitly + * zero elements do not participate_. Specifically, the algorithm is equivalent + * to the following: + * + * (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix + * with shape `[B, C]`, along the size-C dimension; + * (2) Masks out the original implicitly-zero locations; + * (3) Renormalizes the remaining elements. + * + * Hence, the `SparseTensor` result has exactly the same non-zero indices and + * shape. + * + * @param data type for `output` output + * @param spIndices 2-D. `NNZ x R` matrix with the indices of non-empty values in a + * SparseTensor, in canonical ordering. + * @param spValues 1-D. `NNZ` non-empty values corresponding to `sp_indices`. + * @param spShape 1-D. Shape of the input SparseTensor. + * @param data type for `SparseSoftmax` output and operands + * @return a new instance of SparseSoftmax + * @see org.tensorflow.op.SparseOps.sparseSoftmax + */ + public fun sparseSoftmax( + spIndices: Operand, + spValues: Operand, + spShape: Operand + ): SparseSoftmax = java.sparseSoftmax( + spIndices, + spValues, + spShape + ) + + /** + * Returns the element-wise max of two SparseTensors. + * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. + * + * @param data type for `output_values` output + * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, in the canonical lexicographic ordering. + * @param aValues 1-D. `N` non-empty values corresponding to `a_indices`. + * @param aShape 1-D. Shape of the input SparseTensor. + * @param bIndices counterpart to `a_indices` for the other operand. + * @param bValues counterpart to `a_values` for the other operand; must be of the same dtype. + * @param bShape counterpart to `a_shape` for the other operand; the two shapes must be equal. + * @param data type for `SparseSparseMaximum` output and operands + * @return a new instance of SparseSparseMaximum + * @see org.tensorflow.op.SparseOps.sparseSparseMaximum + */ + public fun sparseSparseMaximum( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand + ): SparseSparseMaximum = java.sparseSparseMaximum( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape + ) + + /** + * Returns the element-wise min of two SparseTensors. + * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. + * + * @param data type for `output_values` output + * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, in the canonical lexicographic ordering. + * @param aValues 1-D. `N` non-empty values corresponding to `a_indices`. + * @param aShape 1-D. Shape of the input SparseTensor. + * @param bIndices counterpart to `a_indices` for the other operand. + * @param bValues counterpart to `a_values` for the other operand; must be of the same dtype. + * @param bShape counterpart to `a_shape` for the other operand; the two shapes must be equal. + * @param data type for `SparseSparseMinimum` output and operands + * @return a new instance of SparseSparseMinimum + * @see org.tensorflow.op.SparseOps.sparseSparseMinimum + */ + public fun sparseSparseMinimum( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand + ): SparseSparseMinimum = java.sparseSparseMinimum( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape + ) + + /** + * Split a `SparseTensor` into `num_split` tensors along one dimension. + * If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices + * `[0 : shape[split_dim] % num_split]` gets one extra dimension. + * For example, if `split_dim = 1` and `num_split = 2` and the input is + * ``` + * input_tensor = shape = [2, 7] + * [ a d e ] + * [b c ] + * + * ``` + * + * Graphically the output tensors are: + * ``` + * output_tensor[0] = shape = [2, 4] + * [ a ] + * [b c ] + * + * output_tensor[1] = shape = [2, 3] + * [ d e ] + * [ ] + * + * ``` + * + * @param data type for `output_values` output + * @param splitDim 0-D. The dimension along which to split. Must be in the range + * `[0, rank(shape))`. + * @param indices 2-D tensor represents the indices of the sparse tensor. + * @param values 1-D tensor represents the values of the sparse tensor. + * @param shape 1-D. tensor represents the shape of the sparse tensor. + * output indices: A list of 1-D tensors represents the indices of the output + * sparse tensors. + * @param numSplit The number of ways to split. + * @param data type for `SparseSplit` output and operands + * @return a new instance of SparseSplit + * @see org.tensorflow.op.SparseOps.sparseSplit + */ + public fun sparseSplit( + splitDim: Operand, + indices: Operand, + values: Operand, + shape: Operand, + numSplit: Long + ): SparseSplit = java.sparseSplit( + splitDim, + indices, + values, + shape, + numSplit + ) + + /** + * Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. + * This Op does not require `a_indices` be sorted in standard lexicographic order. + * + * @param data type for `output` output + * @param aIndices 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. + * @param aValues 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. + * @param aShape 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. + * @param b `ndims`-D Tensor. With shape `a_shape`. + * @param data type for `SparseTensorDenseAdd` output and operands + * @param data type for `SparseTensorDenseAdd` output and operands + * @return a new instance of SparseTensorDenseAdd + * @see org.tensorflow.op.SparseOps.sparseTensorDenseAdd + */ + public fun sparseTensorDenseAdd( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + b: Operand + ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( + aIndices, + aValues, + aShape, + b + ) + + /** + * Multiply SparseTensor (of rank 2) "A" by dense matrix "B". + * No validity checking is performed on the indices of A. However, the following + * input format is recommended for optimal behavior: + * + * if adjoint_a == false: + * A should be sorted in lexicographically increasing order. Use SparseReorder + * if you're not sure. + * if adjoint_a == true: + * A should be sorted in order of increasing dimension 1 (i.e., "column major" + * order instead of "row major" order). + * + * @param data type for `product` output + * @param aIndices 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. + * @param aValues 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. + * @param aShape 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. + * @param b 2-D. A dense Matrix. + * @param options carries optional attribute values + * @param data type for `SparseTensorDenseMatMul` output and operands + * @return a new instance of SparseTensorDenseMatMul + * @see org.tensorflow.op.SparseOps.sparseTensorDenseMatMul + * @param adjointA Sets the adjointA option. + * + * @param adjointA Use the adjoint of A in the matrix multiply. If A is complex, this + * is transpose(conj(A)). Otherwise it's transpose(A). + * @return this Options instance. + * @param adjointB Sets the adjointB option. + * + * @param adjointB Use the adjoint of B in the matrix multiply. If B is complex, this + * is transpose(conj(B)). Otherwise it's transpose(B). + * @return this Options instance. + */ + public fun sparseTensorDenseMatMul( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + b: Operand, + adjointA: Boolean? = null, + adjointB: Boolean? = null + ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( + aIndices, + aValues, + aShape, + b, + *listOfNotNull( + adjointA?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, + adjointB?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } + ).toTypedArray() + ) + + /** + * Converts a sparse representation into a dense tensor. + * Builds an array `dense` with shape `output_shape` such that + * ``` + * # If sparse_indices is scalar + * dense[i] = (i == sparse_indices ? sparse_values : default_value) + * + * # If sparse_indices is a vector, then for each i + * dense[sparse_indices[i]] = sparse_values[i] + * + * # If sparse_indices is an n by d matrix, then for each i in [0, n) + * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] + * + * ``` + * + * All other values in `dense` are set to `default_value`. If `sparse_values` is a + * scalar, all sparse indices are set to this single value. + * + * Indices should be sorted in lexicographic order, and indices must not + * contain any repeats. If `validate_indices` is true, these properties + * are checked during execution. + * + * @param data type for `dense` output + * @param sparseIndices 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + * index where `sparse_values[i]` will be placed. + * @param outputShape 1-D. Shape of the dense output tensor. + * @param sparseValues 1-D. Values corresponding to each row of `sparse_indices`, + * or a scalar value to be used for all sparse indices. + * @param defaultValue Scalar value to set for indices not specified in + * `sparse_indices`. + * @param options carries optional attribute values + * @param data type for `SparseToDense` output and operands + * @param data type for `SparseToDense` output and operands + * @return a new instance of SparseToDense + * @see org.tensorflow.op.SparseOps.sparseToDense + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices If true, indices are checked to make sure they are sorted in + * lexicographic order and that there are no repeats. + * @return this Options instance. + */ + public fun sparseToDense( + sparseIndices: Operand, + outputShape: Operand, + sparseValues: Operand, + defaultValue: Operand, + validateIndices: Boolean? = null + ): SparseToDense = java.sparseToDense( + sparseIndices, + outputShape, + sparseValues, + defaultValue, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } + ).toTypedArray() + ) + + /** + * Applies set operation along last dimension of 2 `SparseTensor` inputs. + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * If `validate_indices` is `True`, `sparse.SparseToSparseSetOperation` validates the + * order and range of `set1` and `set2` indices. + * + * Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, + * and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same + * as `set2`. Dimension `n` contains values in a set, duplicates are allowed but + * ignored. + * + * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + * ignored. + * + * If `validate_indices` is `True`, this op validates the order and range of `set1` + * and `set2` indices. + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param data type for `result_values` output + * @param set1Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * order. + * @param set1Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * order. + * @param set1Shape 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must + * be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the + * max set size across `0...n-1` dimensions. + * @param set2Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * order. + * @param set2Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * order. + * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + * be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the + * max set size across `0...n-1` dimensions. + * @param setOperation The value of the setOperation attribute + * @param options carries optional attribute values + * @param data type for `SparseToSparseSetOperation` output and operands + * @return a new instance of SparseToSparseSetOperation + * @see org.tensorflow.op.SparseOps.sparseToSparseSetOperation + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. + */ + public fun sparseToSparseSetOperation( + set1Indices: Operand, + set1Values: Operand, + set1Shape: Operand, + set2Indices: Operand, + set2Values: Operand, + set2Shape: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( + set1Indices, + set1Values, + set1Shape, + set2Indices, + set2Values, + set2Shape, + setOperation, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) + } + ).toTypedArray() + ) + + /** + * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. + * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where + * `N` is the minibatch size and the rows correspond to the output handles of + * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the + * original `SparseTensor` objects that went into the given input ops must all + * match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects + * (they have been concatenated along a new row dimension on the left). + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the handles represent an input, which is a `[2, 3]` matrix + * representing two original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. + * Shape: `[N]`. + * @param dtype The `dtype` of the `SparseTensor` objects stored in the + * `SparseTensorsMap`. + * @param options carries optional attribute values + * @param data type for `TakeManySparseFromTensorsMap` output and operands + * @return a new instance of TakeManySparseFromTensorsMap + * @see org.tensorflow.op.SparseOps.takeManySparseFromTensorsMap + * @param container Sets the container option. + * + * @param container The container name for the `SparseTensorsMap` read by this op. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName The shared name for the `SparseTensorsMap` read by this op. + * It should not be blank; rather the `shared_name` or unique Operation name + * of the Op that created the original `SparseTensorsMap` should be used. + * @return this Options instance. + */ + public fun takeManySparseFromTensorsMap( + sparseHandles: Operand, + dtype: Class, + container: String? = null, + sharedName: String? = null + ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( + sparseHandles, + dtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } + ).toTypedArray() + ) + + /** + * Deserialize `SparseTensor` objects. + * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where + * the last dimension stores serialized `SparseTensor` objects and the other N + * dimensions (N >= 0) correspond to a batch. The ranks of the original + * `SparseTensor` objects must all match. When the final `SparseTensor` is + * created, its rank is the rank of the incoming `SparseTensor` objects plus N; + * the sparse tensors have been concatenated along new dimensions, one for each + * batch. + * + * The output `SparseTensor` object's shape values for the original dimensions + * are the max across the input `SparseTensor` objects' shape values for the + * corresponding dimensions. The new dimensions match the size of the batch. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final deserialized `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param serializedSparse The serialized `SparseTensor` objects. The last dimension + * must have 3 columns. + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param data type for `DeserializeSparse` output and operands + * @return a new instance of DeserializeSparse + * @see org.tensorflow.op.SparseOps.deserializeSparse + */ + @JvmName("deserializeSparseReified") + public inline fun deserializeSparse(serializedSparse: Operand): + DeserializeSparse = deserializeSparse(serializedSparse, U::class.java) + + /** + * Extracts the average sparse gradient in a SparseConditionalAccumulator. + * The op will blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it will return its + * average of the accumulated gradients. Also automatically increments + * the recorded global_step in the accumulator by 1, and resets the + * aggregate to 0. + * + * @param data type for `values` output + * @param handle The handle to a SparseConditionalAccumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @param data type for `SparseAccumulatorTakeGradient` output and operands + * @return a new instance of SparseAccumulatorTakeGradient + * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient + */ + @JvmName("sparseAccumulatorTakeGradientReified") + public inline fun sparseAccumulatorTakeGradient(handle: Operand, + numRequired: Operand): SparseAccumulatorTakeGradient = + sparseAccumulatorTakeGradient(handle, numRequired, T::class.java) + + /** + * A conditional accumulator for aggregating sparse gradients. + * The accumulator accepts gradients marked with local_step greater or + * equal to the most recent global_step known to the accumulator. The + * average can be extracted from the accumulator, provided sufficient + * gradients have been accumulated. Extracting the average automatically + * resets the aggregate to 0, and increments the global_step recorded by + * the accumulator. + * + * @param dtype The type of the value being accumulated. + * @param shape The shape of the values. + * @param options carries optional attribute values + * @param data type for `SparseConditionalAccumulator` output and operands + * @return a new instance of SparseConditionalAccumulator + * @see org.tensorflow.op.SparseOps.sparseConditionalAccumulator + * @param container Sets the container option. + * + * @param container If non-empty, this accumulator is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this accumulator will be shared under the given name + * across multiple sessions. + * @return this Options instance. + * @param reductionType Sets the reductionType option. + * + * @param reductionType the reductionType option + * @return this Options instance. + */ + @JvmName("sparseConditionalAccumulatorReified") + public inline fun sparseConditionalAccumulator( + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): SparseConditionalAccumulator = sparseConditionalAccumulator(T::class.java, shape, + container, sharedName, reductionType) + + /** + * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. + * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where + * `N` is the minibatch size and the rows correspond to the output handles of + * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the + * original `SparseTensor` objects that went into the given input ops must all + * match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects + * (they have been concatenated along a new row dimension on the left). + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the handles represent an input, which is a `[2, 3]` matrix + * representing two original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. + * Shape: `[N]`. + * @param dtype The `dtype` of the `SparseTensor` objects stored in the + * `SparseTensorsMap`. + * @param options carries optional attribute values + * @param data type for `TakeManySparseFromTensorsMap` output and operands + * @return a new instance of TakeManySparseFromTensorsMap + * @see org.tensorflow.op.SparseOps.takeManySparseFromTensorsMap + * @param container Sets the container option. + * + * @param container The container name for the `SparseTensorsMap` read by this op. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName The shared name for the `SparseTensorsMap` read by this op. + * It should not be blank; rather the `shared_name` or unique Operation name + * of the Op that created the original `SparseTensorsMap` should be used. + * @return this Options instance. + */ + @JvmName("takeManySparseFromTensorsMapReified") + public inline fun takeManySparseFromTensorsMap( + sparseHandles: Operand, + container: String? = null, + sharedName: String? = null + ): TakeManySparseFromTensorsMap = takeManySparseFromTensorsMap(sparseHandles, + T::class.java, container, sharedName) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt new file mode 100644 index 00000000000..1447651c42b --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -0,0 +1,911 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.strings.Join +import org.tensorflow.op.strings.Lower +import org.tensorflow.op.strings.ReduceJoin +import org.tensorflow.op.strings.RegexFullMatch +import org.tensorflow.op.strings.RegexReplace +import org.tensorflow.op.strings.StringFormat +import org.tensorflow.op.strings.StringLength +import org.tensorflow.op.strings.StringNGrams +import org.tensorflow.op.strings.StringSplit +import org.tensorflow.op.strings.Strip +import org.tensorflow.op.strings.Substr +import org.tensorflow.op.strings.ToHashBucket +import org.tensorflow.op.strings.ToHashBucketFast +import org.tensorflow.op.strings.ToHashBucketStrong +import org.tensorflow.op.strings.ToNumber +import org.tensorflow.op.strings.UnicodeScript +import org.tensorflow.op.strings.UnicodeTranscode +import org.tensorflow.op.strings.UnsortedSegmentJoin +import org.tensorflow.op.strings.Upper +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber + +/** + * An API for building `strings` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class StringsOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.StringsOps = ops.java.strings + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Joins the strings in the given list of string tensors into one tensor; + * with the given separator (default is an empty separator). + * + * Examples: + * ``` + * + * s = ["hello", "world", "tensorflow"] + * tf.strings.join(s, " ") + * + * ``` + * + * @param inputs A list of string tensors. The tensors must all have the same shape, + * or be scalars. Scalars may be mixed in; these will be broadcast to the shape + * of non-scalar inputs. + * @param options carries optional attribute values + * @return a new instance of Join + * @see org.tensorflow.op.StringsOps.join + * @param separator Sets the separator option. + * + * @param separator string, an optional join separator. + * @return this Options instance. + */ + public fun join(inputs: Iterable>, separator: String? = null): Join = + java.join( + inputs, + *listOfNotNull( + separator?.let{ org.tensorflow.op.strings.Join.separator(it) } + ).toTypedArray() + ) + + /** + * Converts all uppercase characters into their respective lowercase replacements. + * Example: + * ``` + * + * tf.strings.lower("CamelCase string and ALL CAPS") + * + * ``` + * + * @param input The input to be lower-cased. + * @param options carries optional attribute values + * @return a new instance of Lower + * @see org.tensorflow.op.StringsOps.lower + * @param encoding Sets the encoding option. + * + * @param encoding Character encoding of `input`. Allowed values are '' and 'utf-8'. + * Value '' is interpreted as ASCII. + * @return this Options instance. + */ + public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( + input, + *listOfNotNull( + encoding?.let{ org.tensorflow.op.strings.Lower.encoding(it) } + ).toTypedArray() + ) + + /** + * Joins a string Tensor across the given dimensions. + * Computes the string join across dimensions in the given string Tensor of shape + * `[\`\(d_0, d_1, ..., d_{n-1`\\)`]}. Returns a new Tensor created by joining the input + * strings with the given separator (default: empty string). Negative indices are + * counted backwards from the end, with `-1` being equivalent to `n - 1`. If + * indices are not specified, joins across all dimensions beginning from `n - 1` + * through `0`. + * + * For example: + * ``` + * # tensor `a` is [["a", "b"], ["c", "d"]] + * tf.reduce_join(a, 0) ==> ["ac", "bd"] + * tf.reduce_join(a, 1) ==> ["ab", "cd"] + * tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + * tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + * tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + * tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] + * tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + * tf.reduce_join(a, [0, 1]) ==> "acbd" + * tf.reduce_join(a, [1, 0]) ==> "abcd" + * tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] + * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" + * + * ``` + * + * @param inputs The input to be joined. All reduced indices must have non-zero size. + * @param reductionIndices The dimensions to reduce over. Dimensions are reduced in the + * order specified. Omitting `reduction_indices` is equivalent to passing + * `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. + * @param options carries optional attribute values + * @return a new instance of ReduceJoin + * @see org.tensorflow.op.StringsOps.reduceJoin + * @param keepDims Sets the keepDims option. + * + * @param keepDims If `True`, retain reduced dimensions with length `1`. + * @return this Options instance. + * @param separator Sets the separator option. + * + * @param separator The separator to use when joining. + * @return this Options instance. + */ + public fun reduceJoin( + inputs: Operand, + reductionIndices: Operand, + keepDims: Boolean? = null, + separator: String? = null + ): ReduceJoin = java.reduceJoin( + inputs, + reductionIndices, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, + separator?.let{ org.tensorflow.op.strings.ReduceJoin.separator(it) } + ).toTypedArray() + ) + + /** + * Check if the input matches the regex pattern. + * The input is a string tensor of any shape. The pattern is a scalar + * string tensor which is applied to every element of the input tensor. + * The boolean values (True or False) of the output tensor indicate + * if the input matches the regex pattern provided. + * + * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + * + * Examples: + * ``` + * + * tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") + * + * tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") + * + * ``` + * + * @param input A string tensor of the text to be processed. + * @param pattern A scalar string tensor containing the regular expression to match the input. + * @return a new instance of RegexFullMatch + * @see org.tensorflow.op.StringsOps.regexFullMatch + */ + public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = + java.regexFullMatch( + input, + pattern + ) + + /** + * Replaces matches of the `pattern` regular expression in `input` with the + * replacement string provided in `rewrite`. + * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + * + * @param input The text to be processed. + * @param pattern The regular expression to be matched in the `input` strings. + * @param rewrite The rewrite string to be substituted for the `pattern` expression where it is + * matched in the `input` strings. + * @param options carries optional attribute values + * @return a new instance of RegexReplace + * @see org.tensorflow.op.StringsOps.regexReplace + * @param replaceGlobal Sets the replaceGlobal option. + * + * @param replaceGlobal If True, the replacement is global (that is, all matches of the + * `pattern` regular + * expression in each input string are rewritten), otherwise the `rewrite` + * substitution is only made for the first `pattern` match. + * @return this Options instance. + */ + public fun regexReplace( + input: Operand, + pattern: Operand, + rewrite: Operand, + replaceGlobal: Boolean? = null + ): RegexReplace = java.regexReplace( + input, + pattern, + rewrite, + *listOfNotNull( + replaceGlobal?.let{ org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } + ).toTypedArray() + ) + + /** + * Formats a string template using a list of tensors. + * Formats a string template using a list of tensors, pretty-printing tensor summaries. + * + * @param inputs The list of tensors to format into the placeholder string. + * @param options carries optional attribute values + * @return a new instance of StringFormat + * @see org.tensorflow.op.StringsOps.stringFormat + * @param template Sets the template option. + * + * @param template A string, the template to format tensor summaries into. + * @return this Options instance. + * @param placeholder Sets the placeholder option. + * + * @param placeholder A string, at each placeholder in the template a subsequent tensor summary + * will be inserted. + * @return this Options instance. + * @param summarize Sets the summarize option. + * + * @param summarize When formatting the tensor summaries print the first and last summarize + * entries of each tensor dimension. + * @return this Options instance. + */ + public fun stringFormat( + inputs: Iterable>, + template: String? = null, + placeholder: String? = null, + summarize: Long? = null + ): StringFormat = java.stringFormat( + inputs, + *listOfNotNull( + template?.let{ org.tensorflow.op.strings.StringFormat.template(it) }, + placeholder?.let{ org.tensorflow.op.strings.StringFormat.placeholder(it) }, + summarize?.let{ org.tensorflow.op.strings.StringFormat.summarize(it) } + ).toTypedArray() + ) + + /** + * String lengths of `input`. + * Computes the length of each string given in the input tensor. + * ``` + * + * strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) + * tf.strings.length(strings).numpy() # default counts bytes + * array([ 5, 10, 4], dtype=int32) + * tf.strings.length(strings, unit="UTF8_CHAR").numpy() + * array([ 5, 10, 1], dtype=int32) + * ``` + * + * @param input The strings for which to compute the length for each element. + * @param options carries optional attribute values + * @return a new instance of StringLength + * @see org.tensorflow.op.StringsOps.stringLength + * @param unit Sets the unit option. + * + * @param unit The unit that is counted to compute string length. One of: `"BYTE"` (for + * the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 + * encoded Unicode code points in each string). Results are undefined + * if `unit=UTF8_CHAR` and the `input` strings do not contain structurally + * valid UTF-8. + * @return this Options instance. + */ + public fun stringLength(input: Operand, unit: String? = null): StringLength = + java.stringLength( + input, + *listOfNotNull( + unit?.let{ org.tensorflow.op.strings.StringLength.unit(it) } + ).toTypedArray() + ) + + /** + * Creates ngrams from ragged string data. + * This op accepts a ragged tensor with 1 ragged dimension containing only + * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams + * of that string, joined along the innermost axis. + * + * @param data type for `ngrams_splits` output + * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a + * 1D string tensor. + * @param dataSplits The splits tensor of the ragged string tensor to make ngrams out of. + * @param separator The string to append between elements of the token. Use "" for no + * separator. + * @param ngramWidths The sizes of the ngrams to create. + * @param leftPad The string to use to pad the left side of the ngram sequence. Only used if + * pad_width != 0. + * @param rightPad The string to use to pad the right side of the ngram sequence. Only used if + * pad_width != 0. + * @param padWidth The number of padding elements to add to each side of each + * sequence. Note that padding will never be greater than 'ngram_widths'-1 + * regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` + * elements. + * @param preserveShortSequences The value of the preserveShortSequences attribute + * @param data type for `StringNGrams` output and operands + * @return a new instance of StringNGrams + * @see org.tensorflow.op.StringsOps.stringNGrams + */ + public fun stringNGrams( + `data`: Operand, + dataSplits: Operand, + separator: String, + ngramWidths: List, + leftPad: String, + rightPad: String, + padWidth: Long, + preserveShortSequences: Boolean + ): StringNGrams = java.stringNGrams( + data, + dataSplits, + separator, + ngramWidths, + leftPad, + rightPad, + padWidth, + preserveShortSequences + ) + + /** + * Split elements of `source` based on `sep` into a `SparseTensor`. + * Let N be the size of source (typically N will be the batch size). Split each + * element of `source` based on `sep` and return a `SparseTensor` + * containing the split tokens. Empty tokens are ignored. + * + * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', + * then the output will be + * ``` + * st.indices = [0, 0; + * 0, 1; + * 1, 0; + * 1, 1; + * 1, 2] + * st.shape = [2, 3] + * st.values = ['hello', 'world', 'a', 'b', 'c'] + * + * ``` + * + * If `sep` is given, consecutive delimiters are not grouped together and are + * deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and + * sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty + * string, consecutive whitespace are regarded as a single separator, and the + * result will contain no empty strings at the startor end if the string has + * leading or trailing whitespace. + * + * Note that the above mentioned behavior matches python's str.split. + * + * @param input `1-D` string `Tensor`, the strings to split. + * @param sep `0-D` string `Tensor`, the delimiter character. + * @param options carries optional attribute values + * @return a new instance of StringSplit + * @see org.tensorflow.op.StringsOps.stringSplit + * @param maxsplit Sets the maxsplit option. + * + * @param maxsplit An `int`. If `maxsplit > 0`, limit of the split of the result. + * @return this Options instance. + */ + public fun stringSplit( + input: Operand, + sep: Operand, + maxsplit: Long? = null + ): StringSplit = java.stringSplit( + input, + sep, + *listOfNotNull( + maxsplit?.let{ org.tensorflow.op.strings.StringSplit.maxsplit(it) } + ).toTypedArray() + ) + + /** + * Strip leading and trailing whitespaces from the Tensor. + * Examples: + * ``` + * + * tf.strings.strip(["\nTensorFlow", " The python library "]).numpy() + * array([b'TensorFlow', b'The python library'], dtype=object) + * ``` + * + * @param input A string `Tensor` of any shape. + * @return a new instance of Strip + * @see org.tensorflow.op.StringsOps.strip + */ + public fun strip(input: Operand): Strip = java.strip( + input + ) + + /** + * Return substrings from `Tensor` of strings. + * For each string in the input `Tensor`, creates a substring starting at index + * `pos` with a total length of `len`. + * + * If `len` defines a substring that would extend beyond the length of the input + * string, or if `len` is negative, then as many characters as possible are used. + * + * A negative `pos` indicates distance within the string backwards from the end. + * + * If `pos` specifies an index which is out of range for any of the input strings, + * then an `InvalidArgumentError` is thrown. + * + * `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on + * Op creation. + * + * _NOTE_: `strings.Substr` supports broadcasting up to two dimensions. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + *
+ * + * Examples + * + * Using scalar `pos` and `len`: + * ``` + * input = [b'Hello', b'World'] + * position = 1 + * length = 3 + * + * output = [b'ell', b'orl'] + * + * ``` + * + * Using `pos` and `len` with same shape as `input`: + * ``` + * input = [[b'ten', b'eleven', b'twelve'], + * [b'thirteen', b'fourteen', b'fifteen'], + * [b'sixteen', b'seventeen', b'eighteen']] + * position = [[1, 2, 3], + * [1, 2, 3], + * [1, 2, 3]] + * length = [[2, 3, 4], + * [4, 3, 2], + * [5, 5, 5]] + * + * output = [[b'en', b'eve', b'lve'], + * [b'hirt', b'urt', b'te'], + * [b'ixtee', b'vente', b'hteen']] + * + * ``` + * + * Broadcasting `pos` and `len` onto `input`: + * ``` + * input = [[b'ten', b'eleven', b'twelve'], + * [b'thirteen', b'fourteen', b'fifteen'], + * [b'sixteen', b'seventeen', b'eighteen'], + * [b'nineteen', b'twenty', b'twentyone']] + * position = [1, 2, 3] + * length = [1, 2, 3] + * + * output = [[b'e', b'ev', b'lve'], + * [b'h', b'ur', b'tee'], + * [b'i', b've', b'hte'], + * [b'i', b'en', b'nty']] + * + * ``` + * + * Broadcasting `input` onto `pos` and `len`: + * ``` + * input = b'thirteen' + * position = [1, 5, 7] + * length = [3, 2, 1] + * + * output = [b'hir', b'ee', b'n'] + * + * ``` + * + * Raises: + *
    + *
  • `ValueError`: If the first argument cannot be converted to a + * Tensor of `dtype string`.
  • + *
  • `InvalidArgumentError`: If indices are out of range.
  • + *
  • `ValueError`: If `pos` and `len` are not the same shape.
  • + *
+ * + * @param input Tensor of strings + * @param pos Scalar defining the position of first character in each substring + * @param len Scalar defining the number of characters to include in each substring + * @param options carries optional attribute values + * @param data type for `Substr` output and operands + * @return a new instance of Substr + * @see org.tensorflow.op.StringsOps.substr + * @param unit Sets the unit option. + * + * @param unit The unit that is used to create the substring. One of: `"BYTE"` (for + * defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 + * encoded Unicode code points). The default is `"BYTE"`. Results are undefined if + * `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid + * UTF-8. + * @return this Options instance. + */ + public fun substr( + input: Operand, + pos: Operand, + len: Operand, + unit: String? = null + ): Substr = java.substr( + input, + pos, + len, + *listOfNotNull( + unit?.let{ org.tensorflow.op.strings.Substr.unit(it) } + ).toTypedArray() + ) + + /** + * Converts each string in the input Tensor to its hash mod by a number of buckets. + * The hash function is deterministic on the content of the string within the + * process. + * + * Note that the hash function may change from time to time. + * This functionality will be deprecated and it's recommended to use + * `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. + * + * @param stringTensor The stringTensor value + * @param numBuckets The number of buckets. + * @return a new instance of ToHashBucket + * @see org.tensorflow.op.StringsOps.toHashBucket + */ + public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = + java.toHashBucket( + stringTensor, + numBuckets + ) + + /** + * Converts each string in the input Tensor to its hash mod by a number of buckets. + * The hash function is deterministic on the content of the string within the + * process and will never change. However, it is not suitable for cryptography. + * This function may be used when CPU time is scarce and inputs are trusted or + * unimportant. There is a risk of adversaries constructing inputs that all hash + * to the same bucket. To prevent this problem, use a strong hash function with + * `tf.string_to_hash_bucket_strong`. + * + * Examples: + * ``` + * + * tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], + * 3).numpy() + * array([0, 2, 2]) + * ``` + * + * @param input The strings to assign a hash bucket. + * @param numBuckets The number of buckets. + * @return a new instance of ToHashBucketFast + * @see org.tensorflow.op.StringsOps.toHashBucketFast + */ + public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = + java.toHashBucketFast( + input, + numBuckets + ) + + /** + * Converts each string in the input Tensor to its hash mod by a number of buckets. + * The hash function is deterministic on the content of the string within the + * process. The hash function is a keyed hash function, where attribute `key` + * defines the key of the hash function. `key` is an array of 2 elements. + * + * A strong hash is important when inputs may be malicious, e.g. URLs with + * additional components. Adversaries could try to make their inputs hash to the + * same bucket for a denial-of-service attack or to skew the results. A strong + * hash can be used to make it difficult to find inputs with a skewed hash value + * distribution over buckets. This requires that the hash function is + * seeded by a high-entropy (random) "key" unknown to the adversary. + * + * The additional robustness comes at a cost of roughly 4x higher compute + * time than `tf.string_to_hash_bucket_fast`. + * + * Examples: + * ``` + * + * tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() + * array([2, 0]) + * ``` + * + * @param input The strings to assign a hash bucket. + * @param numBuckets The number of buckets. + * @param key The key used to seed the hash function, passed as a list of two uint64 + * elements. + * @return a new instance of ToHashBucketStrong + * @see org.tensorflow.op.StringsOps.toHashBucketStrong + */ + public fun toHashBucketStrong( + input: Operand, + numBuckets: Long, + key: List + ): ToHashBucketStrong = java.toHashBucketStrong( + input, + numBuckets, + key + ) + + /** + * Converts each string in the input Tensor to the specified numeric type. + * (Note that int32 overflow results in an error while float overflow + * results in a rounded value.) + * + * Example: + * ``` + * + * strings = ["5.0", "3.0", "7.0"] + * tf.strings.to_number(strings) + * + * ``` + * + * @param data type for `output` output + * @param stringTensor The stringTensor value + * @return a new instance of ToNumber, with default output types + * @see org.tensorflow.op.StringsOps.toNumber + */ + public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( + stringTensor + ) + + /** + * Converts each string in the input Tensor to the specified numeric type. + * (Note that int32 overflow results in an error while float overflow + * results in a rounded value.) + * + * Example: + * ``` + * + * strings = ["5.0", "3.0", "7.0"] + * tf.strings.to_number(strings) + * + * ``` + * + * @param data type for `output` output + * @param stringTensor The stringTensor value + * @param outType The numeric type to interpret each string in `string_tensor` as. + * @param data type for `StringToNumber` output and operands + * @return a new instance of ToNumber + * @see org.tensorflow.op.StringsOps.toNumber + */ + public fun toNumber(stringTensor: Operand, outType: Class): + ToNumber = java.toNumber( + stringTensor, + outType + ) + + /** + * Determine the script codes of a given tensor of Unicode integer code points. + * This operation converts Unicode code points to script codes corresponding to + * each code point. Script codes correspond to International Components for + * Unicode (ICU) UScriptCode values. + * + * See[ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html) + * for more details on script codes. + * + * For an example, see the unicode strings guide on [unicode scripts] + * (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode). + * + * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will + * match input shape. + * + * Examples: + * ``` + * + * tf.strings.unicode_script([1, 31, 38]) + * + * ``` + * + * @param input A Tensor of int32 Unicode code points. + * @return a new instance of UnicodeScript + * @see org.tensorflow.op.StringsOps.unicodeScript + */ + public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( + input + ) + + /** + * Transcode the input text from a source encoding to a destination encoding. + * The input is a string tensor of any shape. The output is a string tensor of + * the same shape containing the transcoded strings. Output strings are always + * valid unicode. If the input contains invalid encoding positions, the + * `errors` attribute sets the policy for how to deal with them. If the default + * error-handling policy is used, invalid formatting will be substituted in the + * output by the `replacement_char`. If the errors policy is to `ignore`, any + * invalid encoding positions in the input are skipped and not included in the + * output. If it set to `strict` then any invalid formatting will result in an + * InvalidArgument error. + * + * This operation can be used with `output_encoding = input_encoding` to enforce + * correct formatting for inputs even if they are already in the desired encoding. + * + * If the input is prefixed by a Byte Order Mark needed to determine encoding + * (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that + * BOM will be consumed and not emitted into the output. If the input encoding + * is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is + * interpreted as a non-breaking-space and is preserved in the output (including + * always for UTF-8). + * + * The end result is that if the input is marked as an explicit endianness the + * transcoding is faithful to all codepoints in the source. If it is not marked + * with an explicit endianness, the BOM is not considered part of the string itself + * but as metadata, and so is not preserved in the output. + * + * Examples: + * ``` + * + * tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], + * "UTF-8", "UTF-16-BE") + * + * tf.strings.unicode_transcode(["A", "B", "C"], "US + * ASCII", "UTF-8").numpy() + * array([b'A', b'B', b'C'], dtype=object) + * ``` + * + * @param input The text to be processed. Can have any shape. + * @param inputEncoding Text encoding of the input strings. This is any of the encodings + * supported + * by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + * @param outputEncoding The unicode encoding to use in the output. Must be one of + * `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. + * @param options carries optional attribute values + * @return a new instance of UnicodeTranscode + * @see org.tensorflow.op.StringsOps.unicodeTranscode + * @param errors Sets the errors option. + * + * @param errors Error handling policy when there is invalid formatting found in the input. + * The value of 'strict' will cause the operation to produce a InvalidArgument + * error on any invalid input formatting. A value of 'replace' (the default) will + * cause the operation to replace any invalid formatting in the input with the + * `replacement_char` codepoint. A value of 'ignore' will cause the operation to + * skip any invalid formatting in the input and produce no corresponding output + * character. + * @return this Options instance. + * @param replacementChar Sets the replacementChar option. + * + * @param replacementChar The replacement character codepoint to be used in place of any invalid + * formatting in the input when `errors='replace'`. Any valid unicode codepoint may + * be used. The default value is the default unicode replacement character is + * 0xFFFD or U+65533.) + * + * Note that for UTF-8, passing a replacement character expressible in 1 byte, such + * as ' ', will preserve string alignment to the source since invalid bytes will be + * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte + * replacement character will preserve byte alignment to the source. + * @return this Options instance. + * @param replaceControlCharacters Sets the replaceControlCharacters option. + * + * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with the + * `replacement_char`. Default is false. + * @return this Options instance. + */ + public fun unicodeTranscode( + input: Operand, + inputEncoding: String, + outputEncoding: String, + errors: String? = null, + replacementChar: Long? = null, + replaceControlCharacters: Boolean? = null + ): UnicodeTranscode = java.unicodeTranscode( + input, + inputEncoding, + outputEncoding, + *listOfNotNull( + errors?.let{ org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, + replacementChar?.let{ org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, + replaceControlCharacters?.let{ + org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) } + ).toTypedArray() + ) + + /** + * Joins the elements of `inputs` based on `segment_ids`. + * Computes the string join along segments of a tensor. + * Given `segment_ids` with rank `N` and `data` with rank `N+M`: + * ``` + * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` + * + * ``` + * + * where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. + * Strings are joined in row-major order. + * + * For example: + * ``` + * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] + * output_array = string_ops.unsorted_segment_join(inputs=inputs, + * segment_ids=[1, 0, 1], + * num_segments=2, + * separator=':')) + * # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] + * + * + * inputs = ['this', 'is', 'a', 'test'] + * output_array = string_ops.unsorted_segment_join(inputs=inputs, + * segment_ids=[0, 0, 0, 0], + * num_segments=1, + * separator=':')) + * # output_array ==> ['this:is:a:test'] + * + * ``` + * + * @param inputs The input to be joined. + * @param segmentIds A tensor whose shape is a prefix of data.shape. Negative segment ids are + * not + * supported. + * @param numSegments A scalar. + * @param options carries optional attribute values + * @return a new instance of UnsortedSegmentJoin + * @see org.tensorflow.op.StringsOps.unsortedSegmentJoin + * @param separator Sets the separator option. + * + * @param separator The separator to use when joining. + * @return this Options instance. + */ + public fun unsortedSegmentJoin( + inputs: Operand, + segmentIds: Operand, + numSegments: Operand, + separator: String? = null + ): UnsortedSegmentJoin = java.unsortedSegmentJoin( + inputs, + segmentIds, + numSegments, + *listOfNotNull( + separator?.let{ org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } + ).toTypedArray() + ) + + /** + * Converts all lowercase characters into their respective uppercase replacements. + * Example: + * ``` + * + * tf.strings.upper("CamelCase string and ALL CAPS") + * + * ``` + * + * @param input The input to be upper-cased. + * @param options carries optional attribute values + * @return a new instance of Upper + * @see org.tensorflow.op.StringsOps.upper + * @param encoding Sets the encoding option. + * + * @param encoding Character encoding of `input`. Allowed values are '' and 'utf-8'. + * Value '' is interpreted as ASCII. + * @return this Options instance. + */ + public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( + input, + *listOfNotNull( + encoding?.let{ org.tensorflow.op.strings.Upper.encoding(it) } + ).toTypedArray() + ) + + /** + * Converts each string in the input Tensor to the specified numeric type. + * (Note that int32 overflow results in an error while float overflow + * results in a rounded value.) + * + * Example: + * ``` + * + * strings = ["5.0", "3.0", "7.0"] + * tf.strings.to_number(strings) + * + * ``` + * + * @param data type for `output` output + * @param stringTensor The stringTensor value + * @param outType The numeric type to interpret each string in `string_tensor` as. + * @param data type for `StringToNumber` output and operands + * @return a new instance of ToNumber + * @see org.tensorflow.op.StringsOps.toNumber + */ + @JvmName("toNumberReified") + public inline fun toNumberTyped(stringTensor: Operand): + ToNumber = toNumber(stringTensor, T::class.java) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt new file mode 100644 index 00000000000..8d538de8d15 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -0,0 +1,239 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Long +import org.tensorflow.Operand +import org.tensorflow.Tensor +import org.tensorflow.op.Scope +import org.tensorflow.op.summary.AudioSummary +import org.tensorflow.op.summary.HistogramSummary +import org.tensorflow.op.summary.ImageSummary +import org.tensorflow.op.summary.MergeSummary +import org.tensorflow.op.summary.ScalarSummary +import org.tensorflow.op.summary.TensorSummary +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `summary` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class SummaryOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.SummaryOps = ops.java.summary + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Outputs a `Summary` protocol buffer with audio. + * The summary has up to `max_outputs` summary values containing audio. The + * audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, + * channels]` or 2-D with shape `[batch_size, frames]`. The values are + * assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. + * + * The `tag` argument is a scalar `Tensor` of type `string`. It is used to + * build the `tag` of the summary values: + *
    + *
  • If `max_outputs` is 1, the summary value tag is '_tag_/audio'.
  • + *
  • If `max_outputs` is greater than 1, the summary value tags are + * generated sequentially as '_tag_/audio/0', '_tag_/audio/1', etc.
  • + *
+ * + * @param tag Scalar. Used to build the `tag` attribute of the summary values. + * @param tensor 2-D of shape `[batch_size, frames]`. + * @param sampleRate The sample rate of the signal in hertz. + * @param options carries optional attribute values + * @return a new instance of AudioSummary + * @see org.tensorflow.op.SummaryOps.audioSummary + * @param maxOutputs Sets the maxOutputs option. + * + * @param maxOutputs Max number of batch elements to generate audio for. + * @return this Options instance. + */ + public fun audioSummary( + tag: Operand, + tensor: Operand, + sampleRate: Operand, + maxOutputs: Long? = null + ): AudioSummary = java.audioSummary( + tag, + tensor, + sampleRate, + *listOfNotNull( + maxOutputs?.let{ org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } + ).toTypedArray() + ) + + /** + * Outputs a `Summary` protocol buffer with a histogram. + * The + * generated[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + * has one summary value containing a histogram for `values`. + * + * This op reports an `InvalidArgument` error if any value is not finite. + * + * @param tag Scalar. Tag to use for the `Summary.Value`. + * @param values Any shape. Values to use to build the histogram. + * @return a new instance of HistogramSummary + * @see org.tensorflow.op.SummaryOps.histogramSummary + */ + public fun histogramSummary(tag: Operand, values: Operand): + HistogramSummary = java.histogramSummary( + tag, + values + ) + + /** + * Outputs a `Summary` protocol buffer with images. + * The summary has up to `max_images` summary values containing images. The + * images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, + * channels]` and where `channels` can be: + *
    + *
  • 1: `tensor` is interpreted as Grayscale.
  • + *
  • 3: `tensor` is interpreted as RGB.
  • + *
  • 4: `tensor` is interpreted as RGBA.
  • + *
+ * + * The images have the same number of channels as the input tensor. For float + * input, the values are normalized one image at a time to fit in the range + * `[0, 255]`. `uint8` values are unchanged. The op uses two different + * normalization algorithms: + *
    + *
  • + * + * If the input values are all positive, they are rescaled so the largest one + * is 255. + *
  • + *
  • + * + * If any input value is negative, the values are shifted so input value 0.0 + * is at 127. They are then rescaled so that either the smallest value is 0, + * or the largest one is 255. + *
  • + *
+ * + * The `tag` argument is a scalar `Tensor` of type `string`. It is used to + * build the `tag` of the summary values: + *
    + *
  • If `max_images` is 1, the summary value tag is '_tag_/image'.
  • + *
  • If `max_images` is greater than 1, the summary value tags are + * generated sequentially as '_tag_/image/0', '_tag_/image/1', etc.
  • + *
+ * + * The `bad_color` argument is the color to use in the generated images for + * non-finite input values. It is a `uint8` 1-D tensor of length `channels`. + * Each element must be in the range `[0, 255]` (It represents the value of a + * pixel in the output image). Non-finite values in the input tensor are + * replaced by this tensor in the output image. The default value is the color + * red. + * + * @param tag Scalar. Used to build the `tag` attribute of the summary values. + * @param tensor 4-D of shape `[batch_size, height, width, channels]` where + * `channels` is 1, 3, or 4. + * @param options carries optional attribute values + * @return a new instance of ImageSummary + * @see org.tensorflow.op.SummaryOps.imageSummary + * @param maxImages Sets the maxImages option. + * + * @param maxImages Max number of batch elements to generate images for. + * @return this Options instance. + * @param badColor Sets the badColor option. + * + * @param badColor Color to use for pixels with non-finite values. + * @return this Options instance. + */ + public fun imageSummary( + tag: Operand, + tensor: Operand, + maxImages: Long? = null, + badColor: Tensor? = null + ): ImageSummary = java.imageSummary( + tag, + tensor, + *listOfNotNull( + maxImages?.let{ org.tensorflow.op.summary.ImageSummary.maxImages(it) }, + badColor?.let{ org.tensorflow.op.summary.ImageSummary.badColor(it) } + ).toTypedArray() + ) + + /** + * Merges summaries. + * This op creates + * a[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + * protocol buffer that contains the union of all the values in the input + * summaries. + * + * When the Op is run, it reports an `InvalidArgument` error if multiple values + * in the summaries to merge use the same tag. + * + * @param inputs Can be of any shape. Each must contain serialized `Summary` protocol + * buffers. + * @return a new instance of MergeSummary + * @see org.tensorflow.op.SummaryOps.mergeSummary + */ + public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( + inputs + ) + + /** + * Outputs a `Summary` protocol buffer with scalar values. + * The input `tags` and `values` must have the same shape. The generated summary + * has a summary value for each tag-value pair in `tags` and `values`. + * + * @param tags Tags for the summary. + * @param values Same shape as `tags. Values for the summary. + * @return a new instance of ScalarSummary + * @see org.tensorflow.op.SummaryOps.scalarSummary + */ + public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary = + java.scalarSummary( + tags, + values + ) + + /** + * Outputs a `Summary` protocol buffer with a tensor and per-plugin data. + * + * @param tag A string attached to this summary. Used for organization in TensorBoard. + * @param tensor A tensor to serialize. + * @param serializedSummaryMetadata A serialized SummaryMetadata proto. Contains plugin + * data. + * @return a new instance of TensorSummary + * @see org.tensorflow.op.SummaryOps.tensorSummary + */ + public fun tensorSummary( + tag: Operand, + tensor: Operand, + serializedSummaryMetadata: Operand + ): TensorSummary = java.tensorSummary( + tag, + tensor, + serializedSummaryMetadata + ) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt new file mode 100644 index 00000000000..e626cafa706 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt @@ -0,0 +1,211 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Long +import kotlin.String +import org.tensorflow.ConcreteFunction +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.tpu.Compile +import org.tensorflow.op.tpu.CompileSucceededAssert +import org.tensorflow.op.tpu.Execute +import org.tensorflow.op.tpu.ExecuteAndUpdateVariables +import org.tensorflow.op.tpu.PartitionedInput +import org.tensorflow.op.tpu.PartitionedOutput +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TType + +/** + * An API for building `tpu` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class TpuOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.TpuOps = ops.java.tpu + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Compiles a computations for execution on one or more TPU devices. + * For the internal use of the distributed TPU compiler. + * + * 'num_computations' is the number of computations to be compiled. + * 'function' is a function containing the computation to compile. + * 'dynamic_shapes' contains dynamic shapes of arguments whose shapes were not + * known statically at TPUReplication rewrite time. + * 'guaranteed_constants' is a list of tensors which have been guaranteed to not + * change their values during the session lifetime. These contain tensors marked as + * constant using the GuaranteeConstOp. + * 'metadata' is a serialized TPUCompileMetadataProto describing + * the shapes and types of the inputs to the computation, as well as a mapping onto + * the TPU pod topology. + * Each 'program' output is a string key that is passed to the _TPUExecute op and + * used to look up the program in the compilation cache. + * 'may_modify_variables' indicates whether variables may be modified. + * + * @param dynamicShapes The dynamicShapes value + * @param guaranteedConstants The guaranteedConstants value + * @param numComputations The value of the numComputations attribute + * @param function The value of the function attribute + * @param metadata The value of the metadata attribute + * @return a new instance of Compile + * @see org.tensorflow.op.TpuOps.compile + */ + public fun compile( + dynamicShapes: Iterable>, + guaranteedConstants: Iterable>, + numComputations: Long, + function: ConcreteFunction, + metadata: String + ): Compile = java.compile( + dynamicShapes, + guaranteedConstants, + numComputations, + function, + metadata + ) + + /** + * Asserts that compilation succeeded. + * This op produces no output and closes the device during failure to ensure all + * pending device interactions fail. + * + * 'compilation_status' is a serialized CompilationResultProto. + * + * @param compilationStatus The compilationStatus value + * @return a new instance of CompileSucceededAssert + * @see org.tensorflow.op.TpuOps.compileSucceededAssert + */ + public fun compileSucceededAssert(compilationStatus: Operand): CompileSucceededAssert = + java.compileSucceededAssert( + compilationStatus + ) + + /** + * Op that loads and executes a TPU program on a TPU device. + * For the internal use of the distributed TPU compiler. + * + * @param args The args value + * @param key The key value + * @param Tresults The value of the Tresults attribute + * @return a new instance of Execute + * @see org.tensorflow.op.TpuOps.execute + */ + public fun execute( + args: Iterable>, + key: Operand, + Tresults: List> + ): Execute = java.execute( + args, + key, + Tresults + ) + + /** + * Op that executes a program with optional in-place variable updates. + * It (optionally) reads device variables, loads and executes a TPU program on a + * TPU device, and then (optionally) in-place updates variables using the program + * outputs, as specified in attributes device_var_reads_indices (program input + * indices from directly reading variables) and device_var_updates_indices (program + * output indices used to update variables, -1 means no-update/read-only). Such + * program outputs are consumed by these variables will not appear in the op + * output. For the internal use of the distributed TPU compiler. + * + * @param args The args value + * @param key The key value + * @param Tresults The value of the Tresults attribute + * @param deviceVarReadsIndices The value of the deviceVarReadsIndices attribute + * @param deviceVarUpdatesIndices The value of the deviceVarUpdatesIndices attribute + * @return a new instance of ExecuteAndUpdateVariables + * @see org.tensorflow.op.TpuOps.executeAndUpdateVariables + */ + public fun executeAndUpdateVariables( + args: Iterable>, + key: Operand, + Tresults: List>, + deviceVarReadsIndices: List, + deviceVarUpdatesIndices: List + ): ExecuteAndUpdateVariables = java.executeAndUpdateVariables( + args, + key, + Tresults, + deviceVarReadsIndices, + deviceVarUpdatesIndices + ) + + /** + * An op that groups a list of partitioned inputs together. This op + * + * @param data type for `output` output + * @param inputs A list of partitioned inputs which must have the same shape. + * @param options carries optional attribute values + * @param data type for `TPUPartitionedInput` output and operands + * @return a new instance of PartitionedInput + * @see org.tensorflow.op.TpuOps.partitionedInput + * @param partitionDim Sets the partitionDim option. + * + * @param partitionDim An integer describles which dimension is partitioned. -1 means + * those inputs are replicated. + * @return this Options instance. + */ + public fun partitionedInput(inputs: Iterable>, partitionDim: Long? = + null): PartitionedInput = java.partitionedInput( + inputs, + *listOfNotNull( + partitionDim?.let{ org.tensorflow.op.tpu.PartitionedInput.partitionDim(it) } + ).toTypedArray() + ) + + /** + * An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned + * outputs outside the XLA computation. + * + * @param data type for `output` output + * @param inputs A tensor which represents the full shape of partitioned tensors. + * @param numSplits The value of the numSplits attribute + * @param options carries optional attribute values + * @param data type for `TPUPartitionedOutput` output and operands + * @return a new instance of PartitionedOutput + * @see org.tensorflow.op.TpuOps.partitionedOutput + * @param partitionDim Sets the partitionDim option. + * + * @param partitionDim An integer describles which dimension is partitioned. + * @return this Options instance. + */ + public fun partitionedOutput( + inputs: Operand, + numSplits: Long, + partitionDim: Long? = null + ): PartitionedOutput = java.partitionedOutput( + inputs, + numSplits, + *listOfNotNull( + partitionDim?.let{ org.tensorflow.op.tpu.PartitionedOutput.partitionDim(it) } + ).toTypedArray() + ) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt new file mode 100644 index 00000000000..0820318b378 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -0,0 +1,3163 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.ConcreteFunction +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.train.AccumulatorApplyGradient +import org.tensorflow.op.train.AccumulatorNumAccumulated +import org.tensorflow.op.train.AccumulatorSetGlobalStep +import org.tensorflow.op.train.AccumulatorTakeGradient +import org.tensorflow.op.train.ApplyAdadelta +import org.tensorflow.op.train.ApplyAdagrad +import org.tensorflow.op.train.ApplyAdagradDa +import org.tensorflow.op.train.ApplyAdam +import org.tensorflow.op.train.ApplyAddSign +import org.tensorflow.op.train.ApplyCenteredRmsProp +import org.tensorflow.op.train.ApplyFtrl +import org.tensorflow.op.train.ApplyGradientDescent +import org.tensorflow.op.train.ApplyMomentum +import org.tensorflow.op.train.ApplyPowerSign +import org.tensorflow.op.train.ApplyProximalAdagrad +import org.tensorflow.op.train.ApplyProximalGradientDescent +import org.tensorflow.op.train.ApplyRmsProp +import org.tensorflow.op.train.BatchMatMul +import org.tensorflow.op.train.ConditionalAccumulator +import org.tensorflow.op.train.GenerateVocabRemapping +import org.tensorflow.op.train.MergeV2Checkpoints +import org.tensorflow.op.train.NegTrain +import org.tensorflow.op.train.PreventGradient +import org.tensorflow.op.train.ResourceApplyAdadelta +import org.tensorflow.op.train.ResourceApplyAdagradDa +import org.tensorflow.op.train.ResourceApplyAdam +import org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad +import org.tensorflow.op.train.ResourceApplyAddSign +import org.tensorflow.op.train.ResourceApplyCenteredRmsProp +import org.tensorflow.op.train.ResourceApplyFtrl +import org.tensorflow.op.train.ResourceApplyGradientDescent +import org.tensorflow.op.train.ResourceApplyKerasMomentum +import org.tensorflow.op.train.ResourceApplyMomentum +import org.tensorflow.op.train.ResourceApplyPowerSign +import org.tensorflow.op.train.ResourceApplyProximalAdagrad +import org.tensorflow.op.train.ResourceApplyProximalGradientDescent +import org.tensorflow.op.train.ResourceApplyRmsProp +import org.tensorflow.op.train.ResourceSparseApplyAdadelta +import org.tensorflow.op.train.ResourceSparseApplyAdagrad +import org.tensorflow.op.train.ResourceSparseApplyAdagradDa +import org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp +import org.tensorflow.op.train.ResourceSparseApplyFtrl +import org.tensorflow.op.train.ResourceSparseApplyKerasMomentum +import org.tensorflow.op.train.ResourceSparseApplyMomentum +import org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad +import org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent +import org.tensorflow.op.train.ResourceSparseApplyRmsProp +import org.tensorflow.op.train.Restore +import org.tensorflow.op.train.RestoreSlice +import org.tensorflow.op.train.Save +import org.tensorflow.op.train.SaveSlices +import org.tensorflow.op.train.SdcaFprint +import org.tensorflow.op.train.SdcaShrinkL1 +import org.tensorflow.op.train.SparseApplyAdadelta +import org.tensorflow.op.train.SparseApplyAdagradDa +import org.tensorflow.op.train.SparseApplyCenteredRmsProp +import org.tensorflow.op.train.SparseApplyFtrl +import org.tensorflow.op.train.SparseApplyMomentum +import org.tensorflow.op.train.SparseApplyProximalAdagrad +import org.tensorflow.op.train.SparseApplyProximalGradientDescent +import org.tensorflow.op.train.SparseApplyRmsProp +import org.tensorflow.op.train.SymbolicGradient +import org.tensorflow.op.train.TileGrad +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `train` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class TrainOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.TrainOps = ops.java.train + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Applies a gradient to a given accumulator. + * Does not add if local_step is lesser than the accumulator's global_step. + * + * @param handle The handle to a accumulator. + * @param localStep The local_step value at which the gradient was computed. + * @param gradient A tensor of the gradient to be accumulated. + * @return a new instance of AccumulatorApplyGradient + * @see org.tensorflow.op.TrainOps.accumulatorApplyGradient + */ + public fun accumulatorApplyGradient( + handle: Operand, + localStep: Operand, + gradient: Operand + ): AccumulatorApplyGradient = java.accumulatorApplyGradient( + handle, + localStep, + gradient + ) + + /** + * Returns the number of gradients aggregated in the given accumulators. + * + * @param handle The handle to an accumulator. + * @return a new instance of AccumulatorNumAccumulated + * @see org.tensorflow.op.TrainOps.accumulatorNumAccumulated + */ + public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = + java.accumulatorNumAccumulated( + handle + ) + + /** + * Updates the accumulator with a new value for global_step. + * Logs warning if the accumulator's value is already higher than + * new_global_step. + * + * @param handle The handle to an accumulator. + * @param newGlobalStep The new global_step value to set. + * @return a new instance of AccumulatorSetGlobalStep + * @see org.tensorflow.op.TrainOps.accumulatorSetGlobalStep + */ + public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): + AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( + handle, + newGlobalStep + ) + + /** + * Extracts the average gradient in the given ConditionalAccumulator. + * The op blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it returns the average of + * the accumulated gradients. Also automatically increments the recorded + * global_step in the accumulator by 1, and resets the aggregate to 0. + * + * @param data type for `average` output + * @param handle The handle to an accumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @param data type for `AccumulatorTakeGradient` output and operands + * @return a new instance of AccumulatorTakeGradient + * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient + */ + public fun accumulatorTakeGradient( + handle: Operand, + numRequired: Operand, + dtype: Class + ): AccumulatorTakeGradient = java.accumulatorTakeGradient( + handle, + numRequired, + dtype + ) + + /** + * Update '*var' according to the adadelta scheme. + * accum = rho() * accum + (1 - rho()) * grad.square(); + * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + * update_accum = rho() * update_accum + (1 - rho()) * update.square(); + * var -= update; + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param accumUpdate Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ApplyAdadelta` output and operands + * @return a new instance of ApplyAdadelta + * @see org.tensorflow.op.TrainOps.applyAdadelta + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var, accum and update_accum tensors will be + * protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun applyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyAdadelta = java.applyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the adagrad scheme. + * accum += grad * grad + * var -= lr * grad * (1 / sqrt(accum)) + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ApplyAdagrad` output and operands + * @return a new instance of ApplyAdagrad + * @see org.tensorflow.op.TrainOps.applyAdagrad + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param updateSlots Sets the updateSlots option. + * + * @param updateSlots the updateSlots option + * @return this Options instance. + */ + public fun applyAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + useLocking: Boolean? = null, + updateSlots: Boolean? = null + ): ApplyAdagrad = java.applyAdagrad( + `var`, + accum, + lr, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, + updateSlots?.let{ org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the proximal adagrad scheme. + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param gradientAccumulator Should be from a Variable(). + * @param gradientSquaredAccumulator Should be from a Variable(). + * @param grad The gradient. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param globalStep Training step number. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ApplyAdagradDA` output and operands + * @return a new instance of ApplyAdagradDa + * @see org.tensorflow.op.TrainOps.applyAdagradDa + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun applyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ApplyAdagradDa = java.applyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the Adam algorithm. + * $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + * $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ + * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - + * \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - + * m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param v Should be from a Variable(). + * @param beta1Power Must be a scalar. + * @param beta2Power Must be a scalar. + * @param lr Scaling factor. Must be a scalar. + * @param beta1 Momentum factor. Must be a scalar. + * @param beta2 Momentum factor. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ApplyAdam` output and operands + * @return a new instance of ApplyAdam + * @see org.tensorflow.op.TrainOps.applyAdam + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If `True`, uses the nesterov update. + * @return this Options instance. + */ + public fun applyAdam( + `var`: Operand, + m: Operand, + v: Operand, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ApplyAdam = java.applyAdam( + `var`, + m, + v, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAdam.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ApplyAdam.useNesterov(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the AddSign update. + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- (alpha + sign_decay * sign(g) *sign(m)) * g + * variable <- variable - lr_t * update + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param alpha Must be a scalar. + * @param signDecay Must be a scalar. + * @param beta Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ApplyAddSign` output and operands + * @return a new instance of ApplyAddSign + * @see org.tensorflow.op.TrainOps.applyAddSign + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and m tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun applyAddSign( + `var`: Operand, + m: Operand, + lr: Operand, + alpha: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyAddSign = java.applyAddSign( + `var`, + m, + lr, + alpha, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAddSign.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the centered RMSProp algorithm. + * The centered RMSProp algorithm uses an estimate of the centered second moment + * (i.e., the variance) for normalization, as opposed to regular RMSProp, which + * uses the (uncentered) second moment. This often helps with training, but is + * slightly more expensive in terms of computation and memory. + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will + * update even if the grad is zero, but in this sparse implementation, mg, ms, + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * mean_grad = decay * mean_grad + (1-decay) * gradient + * + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * mg <- rho * mg_{t-1} + (1-rho) * grad + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + * var <- var - mom + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param mg Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum Momentum Scale. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ApplyCenteredRMSProp` output and operands + * @return a new instance of ApplyCenteredRmsProp + * @see org.tensorflow.op.TrainOps.applyCenteredRmsProp + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun applyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the Ftrl-proximal scheme. + * grad_with_shrinkage = grad + 2 * l2_shrinkage * var + * accum_new = accum + grad * grad + * linear += grad_with_shrinkage - + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * accum = accum_new + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param linear Should be from a Variable(). + * @param grad The gradient. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 shrinkage regularization. Must be a scalar. + * @param l2Shrinkage The l2Shrinkage value + * @param lrPower Scaling factor. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ApplyFtrlV2` output and operands + * @return a new instance of ApplyFtrl + * @see org.tensorflow.op.TrainOps.applyFtrl + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param multiplyLinearByLr Sets the multiplyLinearByLr option. + * + * @param multiplyLinearByLr the multiplyLinearByLr option + * @return this Options instance. + */ + public fun applyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ApplyFtrl = java.applyFtrl( + `var`, + accum, + linear, + grad, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + /** + * Update '*var' by subtracting 'alpha' * 'delta' from it. + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param delta The change. + * @param options carries optional attribute values + * @param data type for `ApplyGradientDescent` output and operands + * @return a new instance of ApplyGradientDescent + * @see org.tensorflow.op.TrainOps.applyGradientDescent + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun applyGradientDescent( + `var`: Operand, + alpha: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ApplyGradientDescent = java.applyGradientDescent( + `var`, + alpha, + delta, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the momentum scheme. + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * accum = accum * momentum + grad + * var -= lr * accum + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param grad The gradient. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ApplyMomentum` output and operands + * @return a new instance of ApplyMomentum + * @see org.tensorflow.op.TrainOps.applyMomentum + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var - lr * momentum * accum, so in the end, the var you get is actually + * var - lr * momentum * accum. + * @return this Options instance. + */ + public fun applyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ApplyMomentum = java.applyMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the AddSign update. + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + * variable <- variable - lr_t * update + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param logbase Must be a scalar. + * @param signDecay Must be a scalar. + * @param beta Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ApplyPowerSign` output and operands + * @return a new instance of ApplyPowerSign + * @see org.tensorflow.op.TrainOps.applyPowerSign + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and m tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun applyPowerSign( + `var`: Operand, + m: Operand, + lr: Operand, + logbase: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyPowerSign = java.applyPowerSign( + `var`, + m, + lr, + logbase, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + * accum += grad * grad + * prox_v = var - lr * grad * (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lr_l2) * max{|prox_v|-lr_l1,0} + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ApplyProximalAdagrad` output and operands + * @return a new instance of ApplyProximalAdagrad + * @see org.tensorflow.op.TrainOps.applyProximalAdagrad + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun applyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyProximalAdagrad = java.applyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' as FOBOS algorithm with fixed learning rate. + * prox_v = var - alpha * delta + * var = sign(prox_v)/(1+alpha_l2) * max{|prox_v|-alpha_l1,0} + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param delta The change. + * @param options carries optional attribute values + * @param data type for `ApplyProximalGradientDescent` output and operands + * @return a new instance of ApplyProximalGradientDescent + * @see org.tensorflow.op.TrainOps.applyProximalGradientDescent + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun applyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + delta, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the RMSProp algorithm. + * Note that in dense implementation of this algorithm, ms and mom will + * update even if the grad is zero, but in this sparse implementation, ms + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum The momentum value + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ApplyRMSProp` output and operands + * @return a new instance of ApplyRmsProp + * @see org.tensorflow.op.TrainOps.applyRmsProp + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun applyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyRmsProp = java.applyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + /** + * Multiplies slices of two tensors in batches. + * Multiplies all slices of `Tensor` `x` and `y` (each slice can be + * viewed as an element of a batch), and arranges the individual results + * in a single output tensor of the same batch size. Each of the + * individual slices can optionally be adjointed (to adjoint a matrix + * means to transpose and conjugate it) before multiplication by setting + * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + * + * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + * and `[..., r_y, c_y]`. + * + * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + * ``` + * r_o = c_x if adj_x else r_x + * c_o = r_y if adj_y else c_y + * + * ``` + * + * It is computed as: + * ``` + * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + * + * ``` + * + * _NOTE_: `train.BatchMatMul` supports broadcasting in the batch dimensions. More + * about broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . + * + * @param data type for `output` output + * @param x 2-D or higher with shape `[..., r_x, c_x]`. + * @param y 2-D or higher with shape `[..., r_y, c_y]`. + * @param Tout If not spcified, Tout is the same type to input type. + * @param options carries optional attribute values + * @param data type for `BatchMatMulV3` output and operands + * @return a new instance of BatchMatMul + * @see org.tensorflow.op.TrainOps.batchMatMul + * @param adjX Sets the adjX option. + * + * @param adjX If `True`, adjoint the slices of `x`. Defaults to `False`. + * @return this Options instance. + * @param adjY Sets the adjY option. + * + * @param adjY If `True`, adjoint the slices of `y`. Defaults to `False`. + * @return this Options instance. + */ + public fun batchMatMul( + x: Operand, + y: Operand, + Tout: Class, + adjX: Boolean? = null, + adjY: Boolean? = null + ): BatchMatMul = java.batchMatMul( + x, + y, + Tout, + *listOfNotNull( + adjX?.let{ org.tensorflow.op.train.BatchMatMul.adjX(it) }, + adjY?.let{ org.tensorflow.op.train.BatchMatMul.adjY(it) } + ).toTypedArray() + ) + + /** + * A conditional accumulator for aggregating gradients. + * The accumulator accepts gradients marked with local_step greater or + * equal to the most recent global_step known to the accumulator. The + * average can be extracted from the accumulator, provided sufficient + * gradients have been accumulated. Extracting the average automatically + * resets the aggregate to 0, and increments the global_step recorded by + * the accumulator. + * + * @param dtype The type of the value being accumulated. + * @param shape The shape of the values, can be [], in which case shape is unknown. + * @param options carries optional attribute values + * @param data type for `ConditionalAccumulator` output and operands + * @return a new instance of ConditionalAccumulator + * @see org.tensorflow.op.TrainOps.conditionalAccumulator + * @param container Sets the container option. + * + * @param container If non-empty, this accumulator is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this accumulator will be shared under the + * given name across multiple sessions. + * @return this Options instance. + * @param reductionType Sets the reductionType option. + * + * @param reductionType the reductionType option + * @return this Options instance. + */ + public fun conditionalAccumulator( + dtype: Class, + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): ConditionalAccumulator = java.conditionalAccumulator( + dtype, + shape, + *listOfNotNull( + container?.let{ org.tensorflow.op.train.ConditionalAccumulator.container(it) }, + sharedName?.let{ org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, + reductionType?.let{ org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } + ).toTypedArray() + ) + + /** + * Given a path to new and old vocabulary files, returns a remapping Tensor of + * length `num_new_vocab`, where `remapping[i]` contains the row number in the old + * vocabulary that corresponds to row `i` in the new vocabulary (starting at line + * `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` + * in the new vocabulary is not in the old vocabulary. The old vocabulary is + * constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the + * default value of -1. + * + * `num_vocab_offset` enables + * use in the partitioned variable case, and should generally be set through + * examining partitioning info. The format of the files should be a text file, + * with each line containing a single entity within the vocabulary. + * + * For example, with `new_vocab_file` a text file containing each of the following + * elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, + * f3], + * `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be + * `[0, -1, 2]`. + * + * The op also returns a count of how many entries in the new vocabulary + * were present in the old vocabulary, which is used to calculate the number of + * values to initialize in a weight matrix remapping + * + * This functionality can be used to remap both row vocabularies (typically, + * features) and column vocabularies (typically, classes) from TensorFlow + * checkpoints. Note that the partitioning logic relies on contiguous vocabularies + * corresponding to div-partitioned variables. Moreover, the underlying remapping + * uses an IndexTable (as opposed to an inexact CuckooTable), so client code should + * use the corresponding index_table_from_file() as the FeatureColumn framework + * does (as opposed to tf.feature_to_id(), which uses a CuckooTable). + * + * @param newVocabFile Path to the new vocab file. + * @param oldVocabFile Path to the old vocab file. + * @param newVocabOffset How many entries into the new vocab file to start reading. + * @param numNewVocab Number of entries in the new vocab file to remap. + * @param options carries optional attribute values + * @return a new instance of GenerateVocabRemapping + * @see org.tensorflow.op.TrainOps.generateVocabRemapping + * @param oldVocabSize Sets the oldVocabSize option. + * + * @param oldVocabSize Number of entries in the old vocab file to consider. If -1, + * use the entire old vocabulary. + * @return this Options instance. + */ + public fun generateVocabRemapping( + newVocabFile: Operand, + oldVocabFile: Operand, + newVocabOffset: Long, + numNewVocab: Long, + oldVocabSize: Long? = null + ): GenerateVocabRemapping = java.generateVocabRemapping( + newVocabFile, + oldVocabFile, + newVocabOffset, + numNewVocab, + *listOfNotNull( + oldVocabSize?.let{ org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } + ).toTypedArray() + ) + + /** + * V2 format specific: merges the metadata files of sharded checkpoints. The + * result is one logical checkpoint, with one physical metadata file and renamed + * data files. + * + * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. + * + * If delete_old_dirs is true, attempts to delete recursively the dirname of each + * path in the input checkpoint_prefixes. This is useful when those paths are non + * user-facing temporary locations. + * + * @param checkpointPrefixes prefixes of V2 checkpoints to merge. + * @param destinationPrefix scalar. The desired final prefix. Allowed to be the same + * as one of the checkpoint_prefixes. + * @param options carries optional attribute values + * @return a new instance of MergeV2Checkpoints + * @see org.tensorflow.op.TrainOps.mergeV2Checkpoints + * @param deleteOldDirs Sets the deleteOldDirs option. + * + * @param deleteOldDirs see above. + * @return this Options instance. + */ + public fun mergeV2Checkpoints( + checkpointPrefixes: Operand, + destinationPrefix: Operand, + deleteOldDirs: Boolean? = null + ): MergeV2Checkpoints = java.mergeV2Checkpoints( + checkpointPrefixes, + destinationPrefix, + *listOfNotNull( + deleteOldDirs?.let{ org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } + ).toTypedArray() + ) + + /** + * Training via negative sampling. + * + * @param wIn input word embedding. + * @param wOut output word embedding. + * @param examples A vector of word ids. + * @param labels A vector of word ids. + * @param lr The lr value + * @param vocabCount Count of words in the vocabulary. + * @param numNegativeSamples Number of negative samples per example. + * @return a new instance of NegTrain + * @see org.tensorflow.op.TrainOps.negTrain + */ + public fun negTrain( + wIn: Operand, + wOut: Operand, + examples: Operand, + labels: Operand, + lr: Operand, + vocabCount: List, + numNegativeSamples: Long + ): NegTrain = java.negTrain( + wIn, + wOut, + examples, + labels, + lr, + vocabCount, + numNegativeSamples + ) + + /** + * An identity op that triggers an error if a gradient is requested. + * When executed in a graph, this op outputs its input tensor as-is. + * + * When building ops to compute gradients, the TensorFlow gradient system + * will return an error when trying to lookup the gradient of this op, + * because no gradient must ever be registered for this function. This + * op exists to prevent subtle bugs from silently returning unimplemented + * gradients in some corner cases. + * + * @param data type for `output` output + * @param input any tensor. + * @param options carries optional attribute values + * @param data type for `PreventGradient` output and operands + * @return a new instance of PreventGradient + * @see org.tensorflow.op.TrainOps.preventGradient + * @param message Sets the message option. + * + * @param message Will be printed in the error when anyone tries to differentiate + * this operation. + * @return this Options instance. + */ + public fun preventGradient(input: Operand, message: String? = null): + PreventGradient = java.preventGradient( + input, + *listOfNotNull( + message?.let{ org.tensorflow.op.train.PreventGradient.message(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the adadelta scheme. + * accum = rho() * accum + (1 - rho()) * grad.square(); + * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + * update_accum = rho() * update_accum + (1 - rho()) * update.square(); + * var -= update; + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param accumUpdate Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ResourceApplyAdadelta` output and operands + * @return a new instance of ResourceApplyAdadelta + * @see org.tensorflow.op.TrainOps.resourceApplyAdadelta + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var, accum and update_accum tensors will be + * protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceApplyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdadelta = java.resourceApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the proximal adagrad scheme. + * + * @param var Should be from a Variable(). + * @param gradientAccumulator Should be from a Variable(). + * @param gradientSquaredAccumulator Should be from a Variable(). + * @param grad The gradient. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param globalStep Training step number. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ResourceApplyAdagradDA` output and operands + * @return a new instance of ResourceApplyAdagradDa + * @see org.tensorflow.op.TrainOps.resourceApplyAdagradDa + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceApplyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the Adam algorithm. + * $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + * $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ + * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - + * \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - + * m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param v Should be from a Variable(). + * @param beta1Power Must be a scalar. + * @param beta2Power Must be a scalar. + * @param lr Scaling factor. Must be a scalar. + * @param beta1 Momentum factor. Must be a scalar. + * @param beta2 Momentum factor. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ResourceApplyAdam` output and operands + * @return a new instance of ResourceApplyAdam + * @see org.tensorflow.op.TrainOps.resourceApplyAdam + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If `True`, uses the nesterov update. + * @return this Options instance. + */ + public fun resourceApplyAdam( + `var`: Operand, + m: Operand, + v: Operand, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyAdam = java.resourceApplyAdam( + `var`, + m, + v, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the Adam algorithm. + * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ + * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ + * $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ + * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param v Should be from a Variable(). + * @param vhat Should be from a Variable(). + * @param beta1Power Must be a scalar. + * @param beta2Power Must be a scalar. + * @param lr Scaling factor. Must be a scalar. + * @param beta1 Momentum factor. Must be a scalar. + * @param beta2 Momentum factor. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ResourceApplyAdamWithAmsgrad` output and operands + * @return a new instance of ResourceApplyAdamWithAmsgrad + * @see org.tensorflow.op.TrainOps.resourceApplyAdamWithAmsgrad + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun resourceApplyAdamWithAmsgrad( + `var`: Operand, + m: Operand, + v: Operand, + vhat: Operand, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( + `var`, + m, + v, + vhat, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the AddSign update. + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- (alpha + sign_decay * sign(g) *sign(m)) * g + * variable <- variable - lr_t * update + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param alpha Must be a scalar. + * @param signDecay Must be a scalar. + * @param beta Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ResourceApplyAddSign` output and operands + * @return a new instance of ResourceApplyAddSign + * @see org.tensorflow.op.TrainOps.resourceApplyAddSign + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and m tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun resourceApplyAddSign( + `var`: Operand, + m: Operand, + lr: Operand, + alpha: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAddSign = java.resourceApplyAddSign( + `var`, + m, + lr, + alpha, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the centered RMSProp algorithm. + * The centered RMSProp algorithm uses an estimate of the centered second moment + * (i.e., the variance) for normalization, as opposed to regular RMSProp, which + * uses the (uncentered) second moment. This often helps with training, but is + * slightly more expensive in terms of computation and memory. + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will + * update even if the grad is zero, but in this sparse implementation, mg, ms, + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * mean_grad = decay * mean_grad + (1-decay) * gradient + * + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * mg <- rho * mg_{t-1} + (1-rho) * grad + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + * var <- var - mom + * + * @param var Should be from a Variable(). + * @param mg Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum Momentum Scale. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ResourceApplyCenteredRMSProp` output and operands + * @return a new instance of ResourceApplyCenteredRmsProp + * @see org.tensorflow.op.TrainOps.resourceApplyCenteredRmsProp + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun resourceApplyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the Ftrl-proximal scheme. + * accum_new = accum + grad * grad + * grad_with_shrinkage = grad + 2 * l2_shrinkage * var + * linear += grad_with_shrinkage + + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * accum = accum_new + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param linear Should be from a Variable(). + * @param grad The gradient. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 shrinkage regularization. Must be a scalar. + * @param l2Shrinkage The l2Shrinkage value + * @param lrPower Scaling factor. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ResourceApplyFtrlV2` output and operands + * @return a new instance of ResourceApplyFtrl + * @see org.tensorflow.op.TrainOps.resourceApplyFtrl + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param multiplyLinearByLr Sets the multiplyLinearByLr option. + * + * @param multiplyLinearByLr the multiplyLinearByLr option + * @return this Options instance. + */ + public fun resourceApplyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ResourceApplyFtrl = java.resourceApplyFtrl( + `var`, + accum, + linear, + grad, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + /** + * Update '*var' by subtracting 'alpha' * 'delta' from it. + * + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param delta The change. + * @param options carries optional attribute values + * @param data type for `ResourceApplyGradientDescent` output and operands + * @return a new instance of ResourceApplyGradientDescent + * @see org.tensorflow.op.TrainOps.resourceApplyGradientDescent + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceApplyGradientDescent( + `var`: Operand, + alpha: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( + `var`, + alpha, + delta, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the momentum scheme. + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * accum = accum * momentum - lr * grad + * var += accum + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param grad The gradient. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ResourceApplyKerasMomentum` output and operands + * @return a new instance of ResourceApplyKerasMomentum + * @see org.tensorflow.op.TrainOps.resourceApplyKerasMomentum + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var + momentum * accum, so in the end, the var you get is actually + * var + momentum * accum. + * @return this Options instance. + */ + public fun resourceApplyKerasMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the momentum scheme. + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * accum = accum * momentum + grad + * var -= lr * accum + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param grad The gradient. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ResourceApplyMomentum` output and operands + * @return a new instance of ResourceApplyMomentum + * @see org.tensorflow.op.TrainOps.resourceApplyMomentum + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var - lr * momentum * accum, so in the end, the var you get is actually + * var - lr * momentum * accum. + * @return this Options instance. + */ + public fun resourceApplyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyMomentum = java.resourceApplyMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the AddSign update. + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + * variable <- variable - lr_t * update + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param logbase Must be a scalar. + * @param signDecay Must be a scalar. + * @param beta Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ResourceApplyPowerSign` output and operands + * @return a new instance of ResourceApplyPowerSign + * @see org.tensorflow.op.TrainOps.resourceApplyPowerSign + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and m tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun resourceApplyPowerSign( + `var`: Operand, + m: Operand, + lr: Operand, + logbase: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyPowerSign = java.resourceApplyPowerSign( + `var`, + m, + lr, + logbase, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + * accum += grad * grad + * prox_v = var - lr * grad * (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lr_l2) * max{|prox_v|-lr_l1,0} + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ResourceApplyProximalAdagrad` output and operands + * @return a new instance of ResourceApplyProximalAdagrad + * @see org.tensorflow.op.TrainOps.resourceApplyProximalAdagrad + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceApplyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' as FOBOS algorithm with fixed learning rate. + * prox_v = var - alpha * delta + * var = sign(prox_v)/(1+alpha_l2) * max{|prox_v|-alpha_l1,0} + * + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param delta The change. + * @param options carries optional attribute values + * @param data type for `ResourceApplyProximalGradientDescent` output and operands + * @return a new instance of ResourceApplyProximalGradientDescent + * @see org.tensorflow.op.TrainOps.resourceApplyProximalGradientDescent + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceApplyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + delta, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the RMSProp algorithm. + * Note that in dense implementation of this algorithm, ms and mom will + * update even if the grad is zero, but in this sparse implementation, ms + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param var Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum The momentum value + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attribute values + * @param data type for `ResourceApplyRMSProp` output and operands + * @return a new instance of ResourceApplyRmsProp + * @see org.tensorflow.op.TrainOps.resourceApplyRmsProp + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun resourceApplyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyRmsProp = java.resourceApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + /** + * var: Should be from a Variable(). + * + * @param var The var value + * @param accum Should be from a Variable(). + * @param accumUpdate : Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param rho Decay factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyAdadelta` output and operands + * @return a new instance of ResourceSparseApplyAdadelta + * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdadelta + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceSparseApplyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + * That is for rows we have grad for, we update var and accum as follows: + * accum += grad * grad + * var -= lr * grad * (1 / sqrt(accum)) + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyAdagrad` output and operands + * @return a new instance of ResourceSparseApplyAdagrad + * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdagrad + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param updateSlots Sets the updateSlots option. + * + * @param updateSlots the updateSlots option + * @return this Options instance. + */ + public fun resourceSparseApplyAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null, + updateSlots: Boolean? = null + ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( + `var`, + accum, + lr, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, + updateSlots?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } + ).toTypedArray() + ) + + /** + * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + * + * @param var Should be from a Variable(). + * @param gradientAccumulator Should be from a Variable(). + * @param gradientSquaredAccumulator Should be from a Variable(). + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param lr Learning rate. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param globalStep Training step number. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyAdagradDA` output and operands + * @return a new instance of ResourceSparseApplyAdagradDa + * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdagradDa + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceSparseApplyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + indices, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the centered RMSProp algorithm. + * The centered RMSProp algorithm uses an estimate of the centered second moment + * (i.e., the variance) for normalization, as opposed to regular RMSProp, which + * uses the (uncentered) second moment. This often helps with training, but is + * slightly more expensive in terms of computation and memory. + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will + * update even if the grad is zero, but in this sparse implementation, mg, ms, + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * mean_grad = decay * mean_grad + (1-decay) * gradient + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param var Should be from a Variable(). + * @param mg Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum The momentum value + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var, ms and mom. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyCenteredRMSProp` output and operands + * @return a new instance of ResourceSparseApplyCenteredRmsProp + * @see org.tensorflow.op.TrainOps.resourceSparseApplyCenteredRmsProp + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun resourceSparseApplyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update relevant entries in '*var' according to the Ftrl-proximal scheme. + * That is for rows we have grad for, we update var, accum and linear as follows: + * grad_with_shrinkage = grad + 2 * l2_shrinkage * var + * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + * linear += grad_with_shrinkage + + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * accum = accum_new + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param linear Should be from a Variable(). + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 shrinkage regularization. Must be a scalar. + * @param l2Shrinkage The l2Shrinkage value + * @param lrPower Scaling factor. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyFtrlV2` output and operands + * @return a new instance of ResourceSparseApplyFtrl + * @see org.tensorflow.op.TrainOps.resourceSparseApplyFtrl + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param multiplyLinearByLr Sets the multiplyLinearByLr option. + * + * @param multiplyLinearByLr the multiplyLinearByLr option + * @return this Options instance. + */ + public fun resourceSparseApplyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( + `var`, + accum, + linear, + grad, + indices, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ + org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + /** + * Update relevant entries in '*var' and '*accum' according to the momentum scheme. + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * That is for rows we have grad for, we update var and accum as follows: + * + * accum = accum * momentum - lr * grad + * var += accum + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyKerasMomentum` output and operands + * @return a new instance of ResourceSparseApplyKerasMomentum + * @see org.tensorflow.op.TrainOps.resourceSparseApplyKerasMomentum + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var + momentum * accum, so in the end, the var you get is actually + * var + momentum * accum. + * @return this Options instance. + */ + public fun resourceSparseApplyKerasMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } + ).toTypedArray() + ) + + /** + * Update relevant entries in '*var' and '*accum' according to the momentum scheme. + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * That is for rows we have grad for, we update var and accum as follows: + * + * accum = accum * momentum + grad + * var -= lr * accum + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyMomentum` output and operands + * @return a new instance of ResourceSparseApplyMomentum + * @see org.tensorflow.op.TrainOps.resourceSparseApplyMomentum + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var - lr * momentum * accum, so in the end, the var you get is actually + * var - lr * momentum * accum. + * @return this Options instance. + */ + public fun resourceSparseApplyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + /** + * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + * That is for rows we have grad for, we update var and accum as follows: + * accum += grad * grad + * prox_v = var + * prox_v -= lr * grad * (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lr_l2) * max{|prox_v|-lr_l1,0} + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyProximalAdagrad` output and operands + * @return a new instance of ResourceSparseApplyProximalAdagrad + * @see org.tensorflow.op.TrainOps.resourceSparseApplyProximalAdagrad + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceSparseApplyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + /** + * Sparse update '*var' as FOBOS algorithm with fixed learning rate. + * That is for rows we have grad for, we update var as follows: + * prox_v = var - alpha * grad + * var = sign(prox_v)/(1+alpha_l2) * max{|prox_v|-alpha_l1,0} + * + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyProximalGradientDescent` output and operands + * @return a new instance of ResourceSparseApplyProximalGradientDescent + * @see org.tensorflow.op.TrainOps.resourceSparseApplyProximalGradientDescent + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun resourceSparseApplyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyProximalGradientDescent = + java.resourceSparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ + org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the RMSProp algorithm. + * Note that in dense implementation of this algorithm, ms and mom will + * update even if the grad is zero, but in this sparse implementation, ms + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param var Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum The momentum value + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var, ms and mom. + * @param options carries optional attribute values + * @param data type for `ResourceSparseApplyRMSProp` output and operands + * @return a new instance of ResourceSparseApplyRmsProp + * @see org.tensorflow.op.TrainOps.resourceSparseApplyRmsProp + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun resourceSparseApplyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + /** + * Restores tensors from a V2 checkpoint. + * For backward compatibility with the V1 format, this Op currently allows + * restoring from a V1 checkpoint as well: + *
    + *
  • This Op first attempts to find the V2 index file pointed to by "prefix", and + * if found proceed to read it as a V2 checkpoint;
  • + *
  • Otherwise the V1 read path is invoked. + * Relying on this behavior is not recommended, as the ability to fall back to read + * V1 might be deprecated and eventually removed.
  • + *
+ * + * By default, restores the named tensors in full. If the caller wishes to restore + * specific slices of stored tensors, "shape_and_slices" should be non-empty + * strings and correspondingly well-formed. + * + * Callers must ensure all the named tensors are indeed stored in the checkpoint. + * + * @param prefix Must have a single element. The prefix of a V2 checkpoint. + * @param tensorNames shape {N}. The names of the tensors to be restored. + * @param shapeAndSlices shape {N}. The slice specs of the tensors to be restored. + * Empty strings indicate that they are non-partitioned tensors. + * @param dtypes shape {N}. The list of expected dtype for the tensors. Must match + * those stored in the checkpoint. + * @return a new instance of Restore + * @see org.tensorflow.op.TrainOps.restore + */ + public fun restore( + prefix: Operand, + tensorNames: Operand, + shapeAndSlices: Operand, + dtypes: List> + ): Restore = java.restore( + prefix, + tensorNames, + shapeAndSlices, + dtypes + ) + + /** + * Restores a tensor from checkpoint files. + * This is like `Restore` except that restored tensor can be listed as filling + * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the + * larger tensor and the slice that the restored tensor covers. + * + * The `shape_and_slice` input has the same format as the + * elements of the `shapes_and_slices` input of the `SaveSlices` op. + * + * @param data type for `tensor` output + * @param filePattern Must have a single element. The pattern of the files from + * which we read the tensor. + * @param tensorName Must have a single element. The name of the tensor to be + * restored. + * @param shapeAndSlice Scalar. The shapes and slice specifications to use when + * restoring a tensors. + * @param dt The type of the tensor to be restored. + * @param options carries optional attribute values + * @param data type for `RestoreSlice` output and operands + * @return a new instance of RestoreSlice + * @see org.tensorflow.op.TrainOps.restoreSlice + * @param preferredShard Sets the preferredShard option. + * + * @param preferredShard Index of file to open first if multiple files match + * `file_pattern`. See the documentation for `Restore`. + * @return this Options instance. + */ + public fun restoreSlice( + filePattern: Operand, + tensorName: Operand, + shapeAndSlice: Operand, + dt: Class, + preferredShard: Long? = null + ): RestoreSlice = java.restoreSlice( + filePattern, + tensorName, + shapeAndSlice, + dt, + *listOfNotNull( + preferredShard?.let{ org.tensorflow.op.train.RestoreSlice.preferredShard(it) } + ).toTypedArray() + ) + + /** + * Saves tensors in V2 checkpoint format. + * By default, saves the named tensors in full. If the caller wishes to save + * specific slices of full tensors, "shape_and_slices" should be non-empty strings + * and correspondingly well-formed. + * + * @param prefix Must have a single element. The prefix of the V2 checkpoint to which we + * write the tensors. + * @param tensorNames shape {N}. The names of the tensors to be saved. + * @param shapeAndSlices shape {N}. The slice specs of the tensors to be saved. + * Empty strings indicate that they are non-partitioned tensors. + * @param tensors `N` tensors to save. + * @return a new instance of Save + * @see org.tensorflow.op.TrainOps.save + */ + public fun save( + prefix: Operand, + tensorNames: Operand, + shapeAndSlices: Operand, + tensors: Iterable> + ): Save = java.save( + prefix, + tensorNames, + shapeAndSlices, + tensors + ) + + /** + * Saves input tensors slices to disk. + * This is like `Save` except that tensors can be listed in the saved file as being + * a slice of a larger tensor. `shapes_and_slices` specifies the shape of the + * larger tensor and the slice that this tensor covers. `shapes_and_slices` must + * have as many elements as `tensor_names`. + * + * Elements of the `shapes_and_slices` input must either be: + *
    + *
  • The empty string, in which case the corresponding tensor is + * saved normally.
  • + *
  • A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the + * `dimI` are the dimensions of the larger tensor and `slice-spec` + * specifies what part is covered by the tensor to save.
  • + *
+ * + * `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` + * where each `sliceI` is either: + *
    + *
  • The string `-` meaning that the slice covers all indices of this dimension
  • + *
  • `start,length` where `start` and `length` are integers. In that + * case the slice covers `length` indices starting at `start`.
  • + *
+ * + * See also `Save`. + * + * @param filename Must have a single element. The name of the file to which we write the + * tensor. + * @param tensorNames Shape `[N]`. The names of the tensors to be saved. + * @param shapesAndSlices Shape `[N]`. The shapes and slice specifications to use when + * saving the tensors. + * @param data `N` tensors to save. + * @return a new instance of SaveSlices + * @see org.tensorflow.op.TrainOps.saveSlices + */ + public fun saveSlices( + filename: Operand, + tensorNames: Operand, + shapesAndSlices: Operand, + `data`: Iterable> + ): SaveSlices = java.saveSlices( + filename, + tensorNames, + shapesAndSlices, + data + ) + + /** + * Computes fingerprints of the input strings. + * + * @param input vector of strings to compute fingerprints on. + * @return a new instance of SdcaFprint + * @see org.tensorflow.op.TrainOps.sdcaFprint + */ + public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( + input + ) + + /** + * Applies L1 regularization shrink step on the parameters. + * + * @param weights a list of vectors where each value is the weight associated with a + * feature group. + * @param l1 Symmetric l1 regularization strength. + * @param l2 Symmetric l2 regularization strength. Should be a positive float. + * @return a new instance of SdcaShrinkL1 + * @see org.tensorflow.op.TrainOps.sdcaShrinkL1 + */ + public fun sdcaShrinkL1( + weights: Iterable>, + l1: Float, + l2: Float + ): SdcaShrinkL1 = java.sdcaShrinkL1( + weights, + l1, + l2 + ) + + /** + * var: Should be from a Variable(). + * + * @param data type for `out` output + * @param var The var value + * @param accum Should be from a Variable(). + * @param accumUpdate : Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param rho Decay factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for `SparseApplyAdadelta` output and operands + * @return a new instance of SparseApplyAdadelta + * @see org.tensorflow.op.TrainOps.sparseApplyAdadelta + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun sparseApplyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyAdadelta = java.sparseApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param gradientAccumulator Should be from a Variable(). + * @param gradientSquaredAccumulator Should be from a Variable(). + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param lr Learning rate. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param globalStep Training step number. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `SparseApplyAdagradDA` output and operands + * @return a new instance of SparseApplyAdagradDa + * @see org.tensorflow.op.TrainOps.sparseApplyAdagradDa + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun sparseApplyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + indices, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the centered RMSProp algorithm. + * The centered RMSProp algorithm uses an estimate of the centered second moment + * (i.e., the variance) for normalization, as opposed to regular RMSProp, which + * uses the (uncentered) second moment. This often helps with training, but is + * slightly more expensive in terms of computation and memory. + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will + * update even if the grad is zero, but in this sparse implementation, mg, ms, + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * mean_grad = decay * mean_grad + (1-decay) * gradient + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + * $$var <- var - mom$$ + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param mg Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum The momentum value + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var, ms and mom. + * @param options carries optional attribute values + * @param data type for `SparseApplyCenteredRMSProp` output and operands + * @return a new instance of SparseApplyCenteredRmsProp + * @see org.tensorflow.op.TrainOps.sparseApplyCenteredRmsProp + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun sparseApplyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update relevant entries in '*var' according to the Ftrl-proximal scheme. + * That is for rows we have grad for, we update var, accum and linear as follows: + * grad_with_shrinkage = grad + 2 * l2_shrinkage * var + * accum_new = accum + grad * grad + * linear += grad_with_shrinkage - + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * accum = accum_new + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param linear Should be from a Variable(). + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 shrinkage regularization. Must be a scalar. + * @param l2Shrinkage The l2Shrinkage value + * @param lrPower Scaling factor. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `SparseApplyFtrlV2` output and operands + * @return a new instance of SparseApplyFtrl + * @see org.tensorflow.op.TrainOps.sparseApplyFtrl + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param multiplyLinearByLr Sets the multiplyLinearByLr option. + * + * @param multiplyLinearByLr the multiplyLinearByLr option + * @return this Options instance. + */ + public fun sparseApplyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): SparseApplyFtrl = java.sparseApplyFtrl( + `var`, + accum, + linear, + grad, + indices, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + /** + * Update relevant entries in '*var' and '*accum' according to the momentum scheme. + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * That is for rows we have grad for, we update var and accum as follows: + * + * $$accum = accum * momentum + grad$$ + * $$var -= lr * accum$$ + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attribute values + * @param data type for `SparseApplyMomentum` output and operands + * @return a new instance of SparseApplyMomentum + * @see org.tensorflow.op.TrainOps.sparseApplyMomentum + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var - lr * momentum * accum, so in the end, the var you get is actually + * var - lr * momentum * accum. + * @return this Options instance. + */ + public fun sparseApplyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): SparseApplyMomentum = java.sparseApplyMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + /** + * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + * That is for rows we have grad for, we update var and accum as follows: + * $$accum += grad * grad$$ + * $$prox_v = var$$ + * $$prox_v -= lr * grad * (1 / sqrt(accum))$$ + * $$var = sign(prox_v)/(1+lr_l2) * max{|prox_v|-lr_l1,0}$$ + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for `SparseApplyProximalAdagrad` output and operands + * @return a new instance of SparseApplyProximalAdagrad + * @see org.tensorflow.op.TrainOps.sparseApplyProximalAdagrad + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun sparseApplyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + /** + * Sparse update '*var' as FOBOS algorithm with fixed learning rate. + * That is for rows we have grad for, we update var as follows: + * $$prox_v = var - alpha * grad$$ + * $$var = sign(prox_v)/(1+alpha_l2) * max{|prox_v|-alpha_l1,0}$$ + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attribute values + * @param data type for `SparseApplyProximalGradientDescent` output and operands + * @return a new instance of SparseApplyProximalGradientDescent + * @see org.tensorflow.op.TrainOps.sparseApplyProximalGradientDescent + * @param useLocking Sets the useLocking option. + * + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. + */ + public fun sparseApplyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + /** + * Update '*var' according to the RMSProp algorithm. + * Note that in dense implementation of this algorithm, ms and mom will + * update even if the grad is zero, but in this sparse implementation, ms + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * + * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + * $$var <- var - mom$$ + * + * @param data type for `out` output + * @param var Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum The momentum value + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var, ms and mom. + * @param options carries optional attribute values + * @param data type for `SparseApplyRMSProp` output and operands + * @return a new instance of SparseApplyRmsProp + * @see org.tensorflow.op.TrainOps.sparseApplyRmsProp + * @param useLocking Sets the useLocking option. + * + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @return this Options instance. + */ + public fun sparseApplyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyRmsProp = java.sparseApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + /** + * Computes the gradient function for function f via backpropagation. + * + * @param input a list of input tensors of size N + M; + * @param Tout the type list for the input list. + * @param f The function we want to compute the gradient for. + * + * The function 'f' must be a numerical function which takes N inputs and + * produces M outputs. Its gradient function 'g', which is computed by + * this SymbolicGradient op is a function taking N + M inputs and + * produces N outputs. + * + * I.e. if we have + * (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), + * then, g is + * (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, + * dL/dy1, dL/dy2, ..., dL/dy_M), + * + * where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the + * loss function). dL/dx_i is the partial derivative of L with respect + * to x_i. + * + * (Needs some math expert to say the comment above better.) + * @return a new instance of SymbolicGradient + * @see org.tensorflow.op.TrainOps.symbolicGradient + */ + public fun symbolicGradient( + input: Iterable>, + Tout: List>, + f: ConcreteFunction + ): SymbolicGradient = java.symbolicGradient( + input, + Tout, + f + ) + + /** + * Returns the gradient of `Tile`. + * Since `Tile` takes an input and repeats the input `multiples` times + * along each dimension, `train.TileGrad` takes in `multiples` and aggregates + * each repeated tile of `input` into `output`. + * + * @param data type for `output` output + * @param input The input value + * @param multiples The multiples value + * @param data type for `TileGrad` output and operands + * @return a new instance of TileGrad + * @see org.tensorflow.op.TrainOps.tileGrad + */ + public fun tileGrad(input: Operand, multiples: Operand): TileGrad = + java.tileGrad( + input, + multiples + ) + + /** + * Extracts the average gradient in the given ConditionalAccumulator. + * The op blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it returns the average of + * the accumulated gradients. Also automatically increments the recorded + * global_step in the accumulator by 1, and resets the aggregate to 0. + * + * @param data type for `average` output + * @param handle The handle to an accumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @param data type for `AccumulatorTakeGradient` output and operands + * @return a new instance of AccumulatorTakeGradient + * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient + */ + @JvmName("accumulatorTakeGradientReified") + public inline fun accumulatorTakeGradient(handle: Operand, + numRequired: Operand): AccumulatorTakeGradient = + accumulatorTakeGradient(handle, numRequired, T::class.java) + + /** + * Multiplies slices of two tensors in batches. + * Multiplies all slices of `Tensor` `x` and `y` (each slice can be + * viewed as an element of a batch), and arranges the individual results + * in a single output tensor of the same batch size. Each of the + * individual slices can optionally be adjointed (to adjoint a matrix + * means to transpose and conjugate it) before multiplication by setting + * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + * + * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + * and `[..., r_y, c_y]`. + * + * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + * ``` + * r_o = c_x if adj_x else r_x + * c_o = r_y if adj_y else c_y + * + * ``` + * + * It is computed as: + * ``` + * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + * + * ``` + * + * _NOTE_: `train.BatchMatMul` supports broadcasting in the batch dimensions. More + * about broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . + * + * @param data type for `output` output + * @param x 2-D or higher with shape `[..., r_x, c_x]`. + * @param y 2-D or higher with shape `[..., r_y, c_y]`. + * @param Tout If not spcified, Tout is the same type to input type. + * @param options carries optional attribute values + * @param data type for `BatchMatMulV3` output and operands + * @return a new instance of BatchMatMul + * @see org.tensorflow.op.TrainOps.batchMatMul + * @param adjX Sets the adjX option. + * + * @param adjX If `True`, adjoint the slices of `x`. Defaults to `False`. + * @return this Options instance. + * @param adjY Sets the adjY option. + * + * @param adjY If `True`, adjoint the slices of `y`. Defaults to `False`. + * @return this Options instance. + */ + @JvmName("batchMatMulReified") + public inline fun batchMatMul( + x: Operand, + y: Operand, + adjX: Boolean? = null, + adjY: Boolean? = null + ): BatchMatMul = batchMatMul(x, y, V::class.java, adjX, adjY) + + /** + * A conditional accumulator for aggregating gradients. + * The accumulator accepts gradients marked with local_step greater or + * equal to the most recent global_step known to the accumulator. The + * average can be extracted from the accumulator, provided sufficient + * gradients have been accumulated. Extracting the average automatically + * resets the aggregate to 0, and increments the global_step recorded by + * the accumulator. + * + * @param dtype The type of the value being accumulated. + * @param shape The shape of the values, can be [], in which case shape is unknown. + * @param options carries optional attribute values + * @param data type for `ConditionalAccumulator` output and operands + * @return a new instance of ConditionalAccumulator + * @see org.tensorflow.op.TrainOps.conditionalAccumulator + * @param container Sets the container option. + * + * @param container If non-empty, this accumulator is placed in the given container. + * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName If non-empty, this accumulator will be shared under the + * given name across multiple sessions. + * @return this Options instance. + * @param reductionType Sets the reductionType option. + * + * @param reductionType the reductionType option + * @return this Options instance. + */ + @JvmName("conditionalAccumulatorReified") + public inline fun conditionalAccumulator( + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): ConditionalAccumulator = conditionalAccumulator(T::class.java, shape, container, + sharedName, reductionType) + + /** + * Restores a tensor from checkpoint files. + * This is like `Restore` except that restored tensor can be listed as filling + * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the + * larger tensor and the slice that the restored tensor covers. + * + * The `shape_and_slice` input has the same format as the + * elements of the `shapes_and_slices` input of the `SaveSlices` op. + * + * @param data type for `tensor` output + * @param filePattern Must have a single element. The pattern of the files from + * which we read the tensor. + * @param tensorName Must have a single element. The name of the tensor to be + * restored. + * @param shapeAndSlice Scalar. The shapes and slice specifications to use when + * restoring a tensors. + * @param dt The type of the tensor to be restored. + * @param options carries optional attribute values + * @param data type for `RestoreSlice` output and operands + * @return a new instance of RestoreSlice + * @see org.tensorflow.op.TrainOps.restoreSlice + * @param preferredShard Sets the preferredShard option. + * + * @param preferredShard Index of file to open first if multiple files match + * `file_pattern`. See the documentation for `Restore`. + * @return this Options instance. + */ + @JvmName("restoreSliceReified") + public inline fun restoreSlice( + filePattern: Operand, + tensorName: Operand, + shapeAndSlice: Operand, + preferredShard: Long? = null + ): RestoreSlice = restoreSlice(filePattern, tensorName, shapeAndSlice, T::class.java, + preferredShard) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt new file mode 100644 index 00000000000..1108d07e8a0 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -0,0 +1,1264 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName +import org.tensorflow.ConcreteFunction +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.xla.AllReduce +import org.tensorflow.op.xla.BroadcastHelper +import org.tensorflow.op.xla.ClusterOutput +import org.tensorflow.op.xla.Conv +import org.tensorflow.op.xla.Dequantize +import org.tensorflow.op.xla.Dot +import org.tensorflow.op.xla.DynamicSlice +import org.tensorflow.op.xla.DynamicUpdateSlice +import org.tensorflow.op.xla.Einsum +import org.tensorflow.op.xla.Gather +import org.tensorflow.op.xla.If +import org.tensorflow.op.xla.KeyValueSort +import org.tensorflow.op.xla.Pad +import org.tensorflow.op.xla.Recv +import org.tensorflow.op.xla.Reduce +import org.tensorflow.op.xla.ReduceScatter +import org.tensorflow.op.xla.ReduceWindow +import org.tensorflow.op.xla.RemoveDynamicDimensionSize +import org.tensorflow.op.xla.ReplicaId +import org.tensorflow.op.xla.RngBitGenerator +import org.tensorflow.op.xla.Scatter +import org.tensorflow.op.xla.SelectAndScatter +import org.tensorflow.op.xla.SelfAdjointEig +import org.tensorflow.op.xla.Send +import org.tensorflow.op.xla.SetDynamicDimensionSize +import org.tensorflow.op.xla.Sharding +import org.tensorflow.op.xla.Sort +import org.tensorflow.op.xla.SpmdFullToShardShape +import org.tensorflow.op.xla.SpmdShardToFullShape +import org.tensorflow.op.xla.Svd +import org.tensorflow.op.xla.While +import org.tensorflow.op.xla.XlaHostCompute +import org.tensorflow.op.xla.XlaLaunch +import org.tensorflow.op.xla.XlaRecvFromHost +import org.tensorflow.op.xla.XlaSendToHost +import org.tensorflow.op.xla.XlaSetBound +import org.tensorflow.op.xla.XlaVariadicReduce +import org.tensorflow.op.xla.XlaVariadicSort +import org.tensorflow.types.TInt32 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building `xla` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class XlaOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.XlaOps = ops.java.xla + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Wraps the XLA AllReduce operator + * documented at https://www.tensorflow.org/xla/operation_semantics#allreduce. + * + * @param data type for `output` output + * @param input Array or a non-empty tuple of arrays to reduce across replicas. + * @param groupAssignment Groups between which the reductions are performed. + * @param reduceOp Reduction computation. + * @param data type for `XlaAllReduce` output and operands + * @return a new instance of AllReduce + * @see org.tensorflow.op.XlaOps.allReduce + */ + public fun allReduce( + input: Operand, + groupAssignment: Operand, + reduceOp: String + ): AllReduce = java.allReduce( + input, + groupAssignment, + reduceOp + ) + + /** + * Helper operator for performing XLA-style broadcasts + * Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to + * whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules + * for binary operators. + * + * @param data type for `lhs_output` output + * @param lhs the LHS input tensor + * @param rhs the RHS input tensor + * @param broadcastDims an XLA-style broadcast dimension specification + * @param data type for `XlaBroadcastHelper` output and operands + * @return a new instance of BroadcastHelper + * @see org.tensorflow.op.XlaOps.broadcastHelper + */ + public fun broadcastHelper( + lhs: Operand, + rhs: Operand, + broadcastDims: Operand + ): BroadcastHelper = java.broadcastHelper( + lhs, + rhs, + broadcastDims + ) + + /** + * Operator that connects the output of an XLA computation to other consumer graph nodes. + * + * @param data type for `outputs` output + * @param input The input value + * @param data type for `XlaClusterOutput` output and operands + * @return a new instance of ClusterOutput + * @see org.tensorflow.op.XlaOps.clusterOutput + */ + public fun clusterOutput(input: Operand): ClusterOutput = + java.clusterOutput( + input + ) + + /** + * Wraps the XLA ConvGeneralDilated operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + * . + * + * @param data type for `output` output + * @param lhs the input tensor + * @param rhs the kernel tensor + * @param windowStrides the inter-window strides + * @param padding the padding to apply at the start and end of each input dimensions + * @param lhsDilation dilation to apply between input elements + * @param rhsDilation dilation to apply between kernel elements + * @param featureGroupCount number of feature groups for grouped convolution. + * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaConvV2` output and operands + * @param data type for `XlaConvV2` output and operands + * @return a new instance of Conv + * @see org.tensorflow.op.XlaOps.conv + */ + public fun conv( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, + dimensionNumbers: String, + precisionConfig: String, + preferredElementType: Class + ): Conv = java.conv( + lhs, + rhs, + windowStrides, + padding, + lhsDilation, + rhsDilation, + featureGroupCount, + dimensionNumbers, + precisionConfig, + preferredElementType + ) + + /** + * Takes the packed uint32 input and unpacks the input to uint8 to do + * Dequantization on device. + * + * @param input Input tensors whose types is uint32, shape is [d0, ..., dn]. + * @param minRange The minimum scalar value possibly produced for the input. + * @param maxRange The maximum scalar value possibly produced for the input. + * @param mode String to determine the dequantize mode in {"MIN_COMBINED", + * "MIN_FIRST", "SCALED"}. + * @param transposeOutput Boolean to determine if output is transposed. transpose_output + * is faster when input is large and rank of input is higher than 1. + * @return a new instance of Dequantize + * @see org.tensorflow.op.XlaOps.dequantize + */ + public fun dequantize( + input: Operand, + minRange: Float, + maxRange: Float, + mode: String, + transposeOutput: Boolean + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + mode, + transposeOutput + ) + + /** + * Wraps the XLA DotGeneral operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + * . + * + * @param data type for `output` output + * @param lhs the LHS tensor + * @param rhs the RHS tensor + * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaDotV2` output and operands + * @return a new instance of Dot + * @see org.tensorflow.op.XlaOps.dot + */ + public fun dot( + lhs: Operand, + rhs: Operand, + dimensionNumbers: String, + precisionConfig: String, + preferredElementType: Class + ): Dot = java.dot( + lhs, + rhs, + dimensionNumbers, + precisionConfig, + preferredElementType + ) + + /** + * Wraps the XLA DynamicSlice operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice + * . + * + * DynamicSlice extracts a sub-array from the input array at dynamic + * start_indices. The size of the slice in each dimension is passed in + * size_indices, which specify the end point of exclusive slice intervals in each + * dimension -- [start, start + size). The shape of start_indices must have rank 1, + * with dimension size equal to the rank of operand. + * + * @param data type for `output` output + * @param input A `Tensor` of type T. + * @param startIndices List of N integers containing the slice size for each + * dimension. Each value must be strictly greater than zero, and start + size + * must be less than or equal to the size of the dimension to avoid + * implementation defined behavior. + * @param sizeIndices The sizeIndices value + * @param data type for `XlaDynamicSlice` output and operands + * @param data type for `XlaDynamicSlice` output and operands + * @return a new instance of DynamicSlice + * @see org.tensorflow.op.XlaOps.dynamicSlice + */ + public fun dynamicSlice( + input: Operand, + startIndices: Operand, + sizeIndices: Operand + ): DynamicSlice = java.dynamicSlice( + input, + startIndices, + sizeIndices + ) + + /** + * Wraps the XLA DynamicUpdateSlice operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice + * . + * + * XlaDynamicUpdateSlice generates a result which is the value of the `input` + * operand, with a slice update overwritten at `indices`. The shape of `update` + * determines the shape of the sub-array of the result which is updated. The shape + * of indices must be rank == 1, with dimension size equal to the rank of `input`. + * + * Handling of out-of-bounds slice indices is implementation-defined. + * + * @param data type for `output` output + * @param input A `Tensor` of type T. + * @param update A `Tensor` of type T. Same rank as `input`. + * @param indices A vector of indices into `input`. Must have length equal to the rank of + * `input`. + * @param data type for `XlaDynamicUpdateSlice` output and operands + * @return a new instance of DynamicUpdateSlice + * @see org.tensorflow.op.XlaOps.dynamicUpdateSlice + */ + public fun dynamicUpdateSlice( + input: Operand, + update: Operand, + indices: Operand + ): DynamicUpdateSlice = java.dynamicUpdateSlice( + input, + update, + indices + ) + + /** + * An op which supports basic einsum op with 2 inputs and 1 output. + * This op has better TPU performance since it doesn't have explicitly reshape and + * transpose operations as tf.einsum does. + * + * @param data type for `product` output + * @param a The a value + * @param b The b value + * @param equation The value of the equation attribute + * @param data type for `XlaEinsum` output and operands + * @return a new instance of Einsum + * @see org.tensorflow.op.XlaOps.einsum + */ + public fun einsum( + a: Operand, + b: Operand, + equation: String + ): Einsum = java.einsum( + a, + b, + equation + ) + + /** + * Wraps the XLA Gather operator documented at + * https://www.tensorflow.org/xla/operation_semantics#gather + * + * @param data type for `output` output + * @param operand The array we're gathering from. + * @param startIndices Array containing the starting indices of the slices we gather. + * @param sliceSizes slice_sizes[i] is the bounds for the slice on dimension i. + * @param dimensionNumbers A serialized xla::GatherDimensionNumbers proto. + * @param indicesAreSorted Boolean indicating if the indices are sorted. + * @param data type for `XlaGather` output and operands + * @param data type for `XlaGather` output and operands + * @return a new instance of Gather + * @see org.tensorflow.op.XlaOps.gather + */ + public fun gather( + operand: Operand, + startIndices: Operand, + sliceSizes: Operand, + dimensionNumbers: String, + indicesAreSorted: Boolean + ): Gather = java.gather( + operand, + startIndices, + sliceSizes, + dimensionNumbers, + indicesAreSorted + ) + + /** + * output = cond ? then_branch(inputs) : else_branch(inputs). + * + * @param cond A boolean scalar. + * @param inputs A list of input tensors. + * @param thenBranch A function takes 'inputs' and returns a list of tensors, + * whose types are the same as what else_branch returns. + * @param elseBranch A function takes 'inputs' and returns a list of tensors. + * whose types are the same as what then_branch returns. + * @param Tout The value of the Tout attribute + * @return a new instance of If + * @see org.tensorflow.op.XlaOps.ifOp + */ + public fun ifOp( + cond: Operand, + inputs: Iterable>, + thenBranch: ConcreteFunction, + elseBranch: ConcreteFunction, + Tout: List> + ): If = java.ifOp( + cond, + inputs, + thenBranch, + elseBranch, + Tout + ) + + /** + * Wraps the XLA Sort operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#sort + * . + * + * Sorts a tensor. Currently only sorts in ascending order are supported. + * + * @param data type for `sorted_keys` output + * @param data type for `sorted_values` output + * @param keys A `Tensor` of type K. + * @param values A `Tensor` of type V. + * @param data type for `XlaKeyValueSort` output and operands + * @param data type for `XlaKeyValueSort` output and operands + * @return a new instance of KeyValueSort + * @see org.tensorflow.op.XlaOps.keyValueSort + */ + public fun keyValueSort(keys: Operand, values: Operand): + KeyValueSort = java.keyValueSort( + keys, + values + ) + + /** + * Wraps the XLA Pad operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#pad + * . + * + * @param data type for `output` output + * @param input A `Tensor` of type T. + * @param paddingValue A scalar `Tensor` of type T. + * @param paddingLow the padding to apply at the start of each input dimensions. Must + * be a compile-time constant 1D tensor of length equal to rank of input. + * @param paddingHigh the padding to apply at the end of each input dimension. Must + * be a compile-time constant 1D tensor of length equal to rank of input. + * @param paddingInterior the padding to apply between each input element. Must + * be a compile-time constant 1D tensor of length equal to rank of input, + * containing only non-negative values. + * @param data type for `XlaPad` output and operands + * @param data type for `XlaPad` output and operands + * @return a new instance of Pad + * @see org.tensorflow.op.XlaOps.pad + */ + public fun pad( + input: Operand, + paddingValue: Operand, + paddingLow: Operand, + paddingHigh: Operand, + paddingInterior: Operand + ): Pad = java.pad( + input, + paddingValue, + paddingLow, + paddingHigh, + paddingInterior + ) + + /** + * Receives the named tensor from another XLA computation. Wraps the XLA Recv + * operator documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#recv . + * + * @param data type for `tensor` output + * @param dtype The type of the tensor. + * @param tensorName A string key that identifies the channel. + * @param shape The shape of the tensor. + * @param data type for `XlaRecv` output and operands + * @return a new instance of Recv + * @see org.tensorflow.op.XlaOps.recv + */ + public fun recv( + dtype: Class, + tensorName: String, + shape: Shape + ): Recv = java.recv( + dtype, + tensorName, + shape + ) + + /** + * Wraps the XLA Reduce operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#reduce . + * + * @param data type for `output` output + * @param input the input tensor + * @param initValue a scalar representing the initial value for the reduction + * @param dimensionsToReduce dimension numbers over which to reduce + * @param reducer a reducer function to apply + * @param data type for `XlaReduce` output and operands + * @return a new instance of Reduce + * @see org.tensorflow.op.XlaOps.reduce + */ + public fun reduce( + input: Operand, + initValue: Operand, + dimensionsToReduce: List, + reducer: ConcreteFunction + ): Reduce = java.reduce( + input, + initValue, + dimensionsToReduce, + reducer + ) + + /** + * Wraps the XLA ReduceScatter operator + * documented at https://www.tensorflow.org/xla/operation_semantics#reducescatter. + * + * @param data type for `output` output + * @param input Array or a non-empty tuple of arrays to reduce across replicas. + * @param groupAssignment Groups between which the reductions are performed. + * @param scatterDimension Dimension to scatter. + * @param reduceOp Reduction computation. + * @param data type for `XlaReduceScatter` output and operands + * @return a new instance of ReduceScatter + * @see org.tensorflow.op.XlaOps.reduceScatter + */ + public fun reduceScatter( + input: Operand, + groupAssignment: Operand, + scatterDimension: Operand, + reduceOp: String + ): ReduceScatter = java.reduceScatter( + input, + groupAssignment, + scatterDimension, + reduceOp + ) + + /** + * Wraps the XLA ReduceWindow operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow . + * + * @param data type for `output` output + * @param input the input tensor + * @param initValue a scalar representing the initial value for the reduction + * @param windowDimensions the shape of the window + * @param windowStrides the inter-window strides + * @param baseDilations The baseDilations value + * @param windowDilations The windowDilations value + * @param padding the padding to apply at the start and end of each input dimensions + * @param computation a reducer function to apply + * @param data type for `XlaReduceWindow` output and operands + * @param data type for `XlaReduceWindow` output and operands + * @return a new instance of ReduceWindow + * @see org.tensorflow.op.XlaOps.reduceWindow + */ + public fun reduceWindow( + input: Operand, + initValue: Operand, + windowDimensions: Operand, + windowStrides: Operand, + baseDilations: Operand, + windowDilations: Operand, + padding: Operand, + computation: ConcreteFunction + ): ReduceWindow = java.reduceWindow( + input, + initValue, + windowDimensions, + windowStrides, + baseDilations, + windowDilations, + padding, + computation + ) + + /** + * Inverse of XlaSetDynamicDimensionSize. + * Make an xla bounded dynamic dimension into a static dimension. The bound of the + * size of dimension `dim_index` becomes the static dimension size. + * + * @param data type for `output` output + * @param input The input value + * @param dimIndex The dimIndex value + * @param data type for `XlaRemoveDynamicDimensionSize` output and operands + * @return a new instance of RemoveDynamicDimensionSize + * @see org.tensorflow.op.XlaOps.removeDynamicDimensionSize + */ + public fun removeDynamicDimensionSize(input: Operand, dimIndex: Operand): + RemoveDynamicDimensionSize = java.removeDynamicDimensionSize( + input, + dimIndex + ) + + /** + * Replica ID. + * + * @return a new instance of ReplicaId + * @see org.tensorflow.op.XlaOps.replicaId + */ + public fun replicaId(): ReplicaId = java.replicaId( + + ) + + /** + * Stateless PRNG bit generator. + * Wraps the XLA RngBitGenerator operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator. + * + * @param data type for `output` output + * @param algorithm The PRNG algorithm to use, one of + * tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. + * @param initialState Initial state for the PRNG algorithm. For THREEFRY, it should be + * a u64[2] and for PHILOX a u64[3]. + * @param shape The output shape of the generated data. + * @param dtype The type of the tensor. + * @param data type for `XlaRngBitGenerator` output and operands + * @return a new instance of RngBitGenerator + * @see org.tensorflow.op.XlaOps.rngBitGenerator + */ + public fun rngBitGenerator( + algorithm: Operand, + initialState: Operand, + shape: Operand, + dtype: Class + ): RngBitGenerator = java.rngBitGenerator( + algorithm, + initialState, + shape, + dtype + ) + + /** + * Wraps the XLA Scatter operator documented at + * https://www.tensorflow.org/xla/operation_semantics#scatter. + * + * @param data type for `output` output + * @param operand Array to be scattered into. + * @param scatterIndices Array containing the starting indices of the slices that must + * be scattered to. + * @param updates Array containing the values that must be used for scattering. + * @param updateComputation Computation to be used for combining the existing values in + * the input array and the updates during scatter. + * @param dimensionNumbers A serialized xla::ScatterDimensionNumbers proto. + * @param indicesAreSorted Boolean indicating if the indices are sorted. + * @param data type for `XlaScatter` output and operands + * @return a new instance of Scatter + * @see org.tensorflow.op.XlaOps.scatter + */ + public fun scatter( + operand: Operand, + scatterIndices: Operand, + updates: Operand, + updateComputation: ConcreteFunction, + dimensionNumbers: String, + indicesAreSorted: Boolean + ): Scatter = java.scatter( + operand, + scatterIndices, + updates, + updateComputation, + dimensionNumbers, + indicesAreSorted + ) + + /** + * Wraps the XLA SelectAndScatter operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter + * . + * + * @param data type for `output` output + * @param operand the input tensor + * @param windowDimensions the shape of the window + * @param windowStrides the inter-window strides + * @param padding the padding to apply at the start and end of each input dimensions + * @param source a tensor of values to scatter + * @param initValue a scalar representing the initial value for the output tensor + * @param select a selection function to apply + * @param scatter a scatter function to apply + * @param data type for `XlaSelectAndScatter` output and operands + * @param data type for `XlaSelectAndScatter` output and operands + * @return a new instance of SelectAndScatter + * @see org.tensorflow.op.XlaOps.selectAndScatter + */ + public fun selectAndScatter( + operand: Operand, + windowDimensions: Operand, + windowStrides: Operand, + padding: Operand, + source: Operand, + initValue: Operand, + select: ConcreteFunction, + scatter: ConcreteFunction + ): SelectAndScatter = java.selectAndScatter( + operand, + windowDimensions, + windowStrides, + padding, + source, + initValue, + select, + scatter + ) + + /** + * Computes the eigen decomposition of a batch of self-adjoint matrices + * (Note: Only real inputs are supported). + * + * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in + * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * + * v[...,:,i], for + * i=0...N-1. + * + * @param data type for `w` output + * @param a the input tensor. + * @param lower a boolean specifies whether the calculation is done with the lower + * triangular part or the upper triangular part. + * @param maxIter maximum number of sweep update, i.e., the whole lower triangular + * part or upper triangular part based on parameter lower. Heuristically, it has + * been argued that approximately logN sweeps are needed in practice (Ref: Golub & + * van Loan "Matrix Computation"). + * @param epsilon the tolerance ratio. + * @param data type for `XlaSelfAdjointEig` output and operands + * @return a new instance of SelfAdjointEig + * @see org.tensorflow.op.XlaOps.selfAdjointEig + */ + public fun selfAdjointEig( + a: Operand, + lower: Boolean, + maxIter: Long, + epsilon: Float + ): SelfAdjointEig = java.selfAdjointEig( + a, + lower, + maxIter, + epsilon + ) + + /** + * Sends the named tensor to another XLA computation. Wraps the XLA Send operator + * documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#send . + * + * @param tensor The tensor to send. + * @param tensorName A string key that identifies the channel. + * @return a new instance of Send + * @see org.tensorflow.op.XlaOps.send + */ + public fun send(tensor: Operand, tensorName: String): Send = java.send( + tensor, + tensorName + ) + + /** + * Make a static dimension into a xla bounded dynamic dimension. + * ``` + * The current static dimension size will become the bound and the second + * operand becomes the dynamic size of the dimension. + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param dimIndex The dimIndex value + * @param sizeOutput The sizeOutput value + * @param data type for `XlaSetDynamicDimensionSize` output and operands + * @return a new instance of SetDynamicDimensionSize + * @see org.tensorflow.op.XlaOps.setDynamicDimensionSize + */ + public fun setDynamicDimensionSize( + input: Operand, + dimIndex: Operand, + sizeOutput: Operand + ): SetDynamicDimensionSize = java.setDynamicDimensionSize( + input, + dimIndex, + sizeOutput + ) + + /** + * An op which shards the input based on the given sharding attribute. It can + * selectively annotate a subset of tensor dimensions by skipping unspecified_dims, + * and the sharding annotation should be replicated in those dims. + * + * @param data type for `output` output + * @param input The input value + * @param options carries optional attribute values + * @param data type for `XlaSharding` output and operands + * @return a new instance of Sharding + * @see org.tensorflow.op.XlaOps.sharding + * @param sharding Sets the sharding option. + * + * @param sharding the sharding option + * @return this Options instance. + * @param unspecifiedDims Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public fun sharding( + input: Operand, + sharding: String? = null, + unspecifiedDims: List? = null + ): Sharding = java.sharding( + input, + *listOfNotNull( + sharding?.let{ org.tensorflow.op.xla.Sharding.sharding(it) }, + unspecifiedDims?.let{ org.tensorflow.op.xla.Sharding.unspecifiedDims(it) } + ).toTypedArray() + ) + + /** + * Wraps the XLA Sort operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#sort + * . + * + * Sorts a tensor. Currently only sorts in ascending order are supported. + * + * @param data type for `output` output + * @param input A `Tensor` of type T. + * @param data type for `XlaSort` output and operands + * @return a new instance of Sort + * @see org.tensorflow.op.XlaOps.sort + */ + public fun sort(input: Operand): Sort = java.sort( + input + ) + + /** + * An op used by XLA SPMD partitioner to switch from automatic partitioning to + * manual partitioning. It annotates the input (full-shape, to be automatically + * partitioned) with the same sharding used by manual partitioning, and outputs a + * shard-shaped tensor to be consumed by later manually-partitioned ops. If the + * shape is not evenly partitionable, the padding region will be masked with 0s. + * The conversion can happen partially in subgroups, by specifying the dim + * attribute, where only that dim will be converted. + * + * @param data type for `output` output + * @param input The input value + * @param manualSharding The value of the manualSharding attribute + * @param options carries optional attribute values + * @param data type for `XlaSpmdFullToShardShape` output and operands + * @return a new instance of SpmdFullToShardShape + * @see org.tensorflow.op.XlaOps.spmdFullToShardShape + * @param dim Sets the dim option. + * + * @param dim the dim option + * @return this Options instance. + * @param unspecifiedDims Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public fun spmdFullToShardShape( + input: Operand, + manualSharding: String, + dim: Long? = null, + unspecifiedDims: List? = null + ): SpmdFullToShardShape = java.spmdFullToShardShape( + input, + manualSharding, + *listOfNotNull( + dim?.let{ org.tensorflow.op.xla.SpmdFullToShardShape.dim(it) }, + unspecifiedDims?.let{ org.tensorflow.op.xla.SpmdFullToShardShape.unspecifiedDims(it) } + ).toTypedArray() + ) + + /** + * An op used by XLA SPMD partitioner to switch from manual partitioning to + * automatic partitioning. It converts the shard-shaped, manually partitioned input + * into full-shaped tensor to be partitioned automatically with the same sharding + * used by manual partitioning. The conversion can happen partially in subgroups, + * by specifying the dim attribute, where only that dim will be converted. + * + * @param data type for `output` output + * @param input The input value + * @param manualSharding The value of the manualSharding attribute + * @param fullShape The value of the fullShape attribute + * @param options carries optional attribute values + * @param data type for `XlaSpmdShardToFullShape` output and operands + * @return a new instance of SpmdShardToFullShape + * @see org.tensorflow.op.XlaOps.spmdShardToFullShape + * @param dim Sets the dim option. + * + * @param dim the dim option + * @return this Options instance. + * @param unspecifiedDims Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public fun spmdShardToFullShape( + input: Operand, + manualSharding: String, + fullShape: Shape, + dim: Long? = null, + unspecifiedDims: List? = null + ): SpmdShardToFullShape = java.spmdShardToFullShape( + input, + manualSharding, + fullShape, + *listOfNotNull( + dim?.let{ org.tensorflow.op.xla.SpmdShardToFullShape.dim(it) }, + unspecifiedDims?.let{ org.tensorflow.op.xla.SpmdShardToFullShape.unspecifiedDims(it) } + ).toTypedArray() + ) + + /** + * Computes the eigen decomposition of a batch of self-adjoint matrices + * (Note: Only real inputs are supported). + * + * Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in + * tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * + * Transpose(v[...,:,:]). + * + * @param data type for `s` output + * @param a the input tensor. + * @param maxIter maximum number of sweep update, i.e., the whole lower triangular + * part or upper triangular part based on parameter lower. Heuristically, it has + * been argued that approximately log(min (M, N)) sweeps are needed in practice + * (Ref: Golub & van Loan "Matrix Computation"). + * @param epsilon the tolerance ratio. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param data type for `XlaSvd` output and operands + * @return a new instance of Svd + * @see org.tensorflow.op.XlaOps.svd + */ + public fun svd( + a: Operand, + maxIter: Long, + epsilon: Float, + precisionConfig: String + ): Svd = java.svd( + a, + maxIter, + epsilon, + precisionConfig + ) + + /** + * output = input; While (Cond(output)) { output = Body(output) } + * + * @param input A list of input tensors whose types are T. + * @param cond A function takes 'input' and returns a tensor. If the tensor is + * a scalar of non-boolean, the scalar is converted to a boolean + * according to the following rule: if the scalar is a numerical + * value, non-zero means True and zero means False; if the scalar is + * a string, non-empty means True and empty means False. If the + * tensor is not a scalar, non-emptiness means True and False + * otherwise. + * @param body A function that takes a list of tensors and returns another + * list of tensors. Both lists have the same types as specified by T. + * @return a new instance of While + * @see org.tensorflow.op.XlaOps.whileOp + */ + public fun whileOp( + input: Iterable>, + cond: ConcreteFunction, + body: ConcreteFunction + ): While = java.whileOp( + input, + cond, + body + ) + + /** + * A pseudo-op to represent host-side computation in an XLA program. + * + * @param inputs A list of tensors that will be sent to the host. + * @param Toutputs The element types of each element in `outputs`. + * @param ancestors A list of names of HostCompute computations that must be + * sequenced before this computation. + * @param shapes If shape_inference_graph is empty, a list of the shapes of `outputs`. + * @param shapeInferenceGraph If non-empty, a serialized GraphDef representing a graph + * that must be analyzed at compile time to determine the shapes of the outputs. + * @param key A unique identifier for this region used to match up host transfers. + * @param options carries optional attribute values + * @return a new instance of XlaHostCompute + * @see org.tensorflow.op.XlaOps.xlaHostCompute + * @param sendKey Sets the sendKey option. + * + * @param sendKey the sendKey option + * @return this Options instance. + * @param recvKey Sets the recvKey option. + * + * @param recvKey the recvKey option + * @return this Options instance. + * @param costEstimateNs Sets the costEstimateNs option. + * + * @param costEstimateNs Estimated duration of the host computation in nanoseconds. + * @return this Options instance. + * @param tpuCore Sets the tpuCore option. + * + * @param tpuCore Default core to use for host to device transfers. + * @return this Options instance. + */ + public fun xlaHostCompute( + inputs: Iterable>, + Toutputs: List>, + ancestors: List, + shapes: List, + shapeInferenceGraph: ConcreteFunction, + key: String, + sendKey: String? = null, + recvKey: String? = null, + costEstimateNs: Long? = null, + tpuCore: Long? = null + ): XlaHostCompute = java.xlaHostCompute( + inputs, + Toutputs, + ancestors, + shapes, + shapeInferenceGraph, + key, + *listOfNotNull( + sendKey?.let{ org.tensorflow.op.xla.XlaHostCompute.sendKey(it) }, + recvKey?.let{ org.tensorflow.op.xla.XlaHostCompute.recvKey(it) }, + costEstimateNs?.let{ org.tensorflow.op.xla.XlaHostCompute.costEstimateNs(it) }, + tpuCore?.let{ org.tensorflow.op.xla.XlaHostCompute.tpuCore(it) } + ).toTypedArray() + ) + + /** + * XLA Launch Op. For use by the XLA JIT only. + * + * @param constants The constants value + * @param args The args value + * @param resources The resources value + * @param Tresults The value of the Tresults attribute + * @param function The value of the function attribute + * @return a new instance of XlaLaunch + * @see org.tensorflow.op.XlaOps.xlaLaunch + */ + public fun xlaLaunch( + constants: Iterable>, + args: Iterable>, + resources: Iterable>, + Tresults: List>, + function: ConcreteFunction + ): XlaLaunch = java.xlaLaunch( + constants, + args, + resources, + Tresults, + function + ) + + /** + * An op to receive a tensor from the host. + * output: the tensor that will be received from the host. + * Toutput: element type for output. + * shape: shape for output. + * key: A unique identifier for this region used to match up host transfers. + * + * @param data type for `output` output + * @param Toutput The value of the Toutput attribute + * @param shape The value of the shape attribute + * @param key The value of the key attribute + * @param data type for `XlaRecvFromHost` output and operands + * @return a new instance of XlaRecvFromHost + * @see org.tensorflow.op.XlaOps.xlaRecvFromHost + */ + public fun xlaRecvFromHost( + Toutput: Class, + shape: Shape, + key: String + ): XlaRecvFromHost = java.xlaRecvFromHost( + Toutput, + shape, + key + ) + + /** + * An op to send a tensor to the host. + * input: the tensor that will be sent to the host. + * Tinput: element type for input. + * key: A unique identifier for this region used to match up host transfers. + * + * @param input The input value + * @param key The value of the key attribute + * @return a new instance of XlaSendToHost + * @see org.tensorflow.op.XlaOps.xlaSendToHost + */ + public fun xlaSendToHost(input: Operand, key: String): XlaSendToHost = + java.xlaSendToHost( + input, + key + ) + + /** + * Set a bound for the given input value as a hint to Xla compiler, + * ``` + * returns the same value. + * + * ``` + * + * @param input The input value + * @param bound The bound value + * @return a new instance of XlaSetBound + * @see org.tensorflow.op.XlaOps.xlaSetBound + */ + public fun xlaSetBound(input: Operand, bound: Operand): XlaSetBound = + java.xlaSetBound( + input, + bound + ) + + /** + * Wraps the variadic XLA Reduce operator. + * Semantics are documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce. + * + * This is an expanded version of XlaVariadicReduce, with support for + * operands of different dtypes, and improved shape inference. + * + * @param inputs the input tensor(s) + * @param initValues scalar initial value(s) for the reduction + * @param dimensionsToReduce dimension numbers over which to reduce + * @param reducer a reducer function to apply + * @return a new instance of XlaVariadicReduce + * @see org.tensorflow.op.XlaOps.xlaVariadicReduce + */ + public fun xlaVariadicReduce( + inputs: Iterable>, + initValues: Iterable>, + dimensionsToReduce: List, + reducer: ConcreteFunction + ): XlaVariadicReduce = java.xlaVariadicReduce( + inputs, + initValues, + dimensionsToReduce, + reducer + ) + + /** + * Wraps the XLA Sort operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#sort + * . + * + * Sorts one or more tensors, with support for custom comparator, dimension, and + * is_stable attributes. + * + * @param inputs A list of `Tensor` of identical shape but possibly different types. + * @param dimension The dimension along which to sort. Must be a compile-time constant. + * @param comparator A comparator function to apply to 2*N scalars and returning a + * boolean. N is the number of sort inputs. If you want to sort in ascending + * order then the comparator should perform a less-than comparison. + * @param isStable Whether to use stable sort. + * @return a new instance of XlaVariadicSort + * @see org.tensorflow.op.XlaOps.xlaVariadicSort + */ + public fun xlaVariadicSort( + inputs: Iterable>, + dimension: Operand, + comparator: ConcreteFunction, + isStable: Boolean + ): XlaVariadicSort = java.xlaVariadicSort( + inputs, + dimension, + comparator, + isStable + ) + + /** + * Wraps the XLA ConvGeneralDilated operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + * . + * + * @param data type for `output` output + * @param lhs the input tensor + * @param rhs the kernel tensor + * @param windowStrides the inter-window strides + * @param padding the padding to apply at the start and end of each input dimensions + * @param lhsDilation dilation to apply between input elements + * @param rhsDilation dilation to apply between kernel elements + * @param featureGroupCount number of feature groups for grouped convolution. + * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaConvV2` output and operands + * @param data type for `XlaConvV2` output and operands + * @return a new instance of Conv + * @see org.tensorflow.op.XlaOps.conv + */ + @JvmName("convReified") + public inline fun conv( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Conv = conv(lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, + featureGroupCount, dimensionNumbers, precisionConfig, W::class.java) + + /** + * Wraps the XLA DotGeneral operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + * . + * + * @param data type for `output` output + * @param lhs the LHS tensor + * @param rhs the RHS tensor + * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaDotV2` output and operands + * @return a new instance of Dot + * @see org.tensorflow.op.XlaOps.dot + */ + @JvmName("dotReified") + public inline fun dot( + lhs: Operand, + rhs: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Dot = dot(lhs, rhs, dimensionNumbers, precisionConfig, V::class.java) + + /** + * Receives the named tensor from another XLA computation. Wraps the XLA Recv + * operator documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#recv . + * + * @param data type for `tensor` output + * @param dtype The type of the tensor. + * @param tensorName A string key that identifies the channel. + * @param shape The shape of the tensor. + * @param data type for `XlaRecv` output and operands + * @return a new instance of Recv + * @see org.tensorflow.op.XlaOps.recv + */ + @JvmName("recvReified") + public inline fun recv(tensorName: String, shape: Shape): Recv = + recv(T::class.java, tensorName, shape) + + /** + * Stateless PRNG bit generator. + * Wraps the XLA RngBitGenerator operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator. + * + * @param data type for `output` output + * @param algorithm The PRNG algorithm to use, one of + * tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. + * @param initialState Initial state for the PRNG algorithm. For THREEFRY, it should be + * a u64[2] and for PHILOX a u64[3]. + * @param shape The output shape of the generated data. + * @param dtype The type of the tensor. + * @param data type for `XlaRngBitGenerator` output and operands + * @return a new instance of RngBitGenerator + * @see org.tensorflow.op.XlaOps.rngBitGenerator + */ + @JvmName("rngBitGeneratorReified") + public inline fun rngBitGenerator( + algorithm: Operand, + initialState: Operand, + shape: Operand + ): RngBitGenerator = rngBitGenerator(algorithm, initialState, shape, U::class.java) + + /** + * An op to receive a tensor from the host. + * output: the tensor that will be received from the host. + * Toutput: element type for output. + * shape: shape for output. + * key: A unique identifier for this region used to match up host transfers. + * + * @param data type for `output` output + * @param Toutput The value of the Toutput attribute + * @param shape The value of the shape attribute + * @param key The value of the key attribute + * @param data type for `XlaRecvFromHost` output and operands + * @return a new instance of XlaRecvFromHost + * @see org.tensorflow.op.XlaOps.xlaRecvFromHost + */ + @JvmName("xlaRecvFromHostReified") + public inline fun xlaRecvFromHost(shape: Shape, key: String): + XlaRecvFromHost = xlaRecvFromHost(T::class.java, shape, key) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt new file mode 100644 index 00000000000..d9c5c2bfb78 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt @@ -0,0 +1,124 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow + +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract +import org.tensorflow.op.kotlin.KotlinOps +import org.tensorflow.op.kotlin.tf + +/** + * Create a [ConcreteFunction] by building a new graph. + * @see ConcreteFunction.create + */ +public inline fun ConcreteFunction( + crossinline function: KotlinOps.() -> Signature +): ConcreteFunction { + contract { callsInPlace(function, InvocationKind.EXACTLY_ONCE) } + return ConcreteFunction.create { function(it.tf) } +} + +/** + * Call this function with the specified arguments. + * @see ConcreteFunction.call + */ +public operator fun ConcreteFunction.invoke(arguments: Map): Map = + this.call(arguments) + +/** + * Call this function with the specified arguments. + * @see ConcreteFunction.call + */ +public operator fun ConcreteFunction.invoke( + vararg arguments: Pair +): Map = this.invoke(arguments.toMap()) + +/** + * Call this function with a single argument. Requires this function to be a single argument + * function. + * @see ConcreteFunction.call + */ +public operator fun ConcreteFunction.invoke(argument: Tensor): Tensor = this.call(argument) + +/** Create a [Signature] for a [ConcreteFunction]. */ +public fun Signature( + methodName: String, + inputs: Map>, + outputs: Map>, + key: String = Signature.DEFAULT_KEY, +): Signature = + Signature.builder().methodName(methodName).key(key).inputs(inputs).outputs(outputs).build() + +/** Create a [Signature] for a [ConcreteFunction]. */ +public fun Signature( + methodName: String, + inputs: Operand<*>, + outputs: Map>, + key: String = Signature.DEFAULT_KEY, +): Signature = + Signature.builder() + .methodName(methodName) + .key(key) + .input("input", inputs) + .outputs(outputs) + .build() + +/** Create a [Signature] for a [ConcreteFunction]. */ +public fun Signature( + methodName: String, + inputs: Map>, + outputs: Operand<*>, + key: String = Signature.DEFAULT_KEY, +): Signature = + Signature.builder() + .methodName(methodName) + .key(key) + .inputs(inputs) + .output("output", outputs) + .build() + +/** Create a [Signature] for a [ConcreteFunction]. */ +public fun Signature( + methodName: String, + inputs: Operand<*>, + outputs: Operand<*>, + key: String = Signature.DEFAULT_KEY, +): Signature = + Signature.builder() + .methodName(methodName) + .key(key) + .input("input", inputs) + .output("output", outputs) + .build() + +/** Add [inputs] to the signature. */ +public fun Signature.Builder.inputs(inputs: Map>): Signature.Builder = apply { + inputs.forEach { input(it.key, it.value) } +} + +/** Add [outputs] to the signature. */ +public fun Signature.Builder.outputs(outputs: Map>): Signature.Builder = apply { + outputs.forEach { output(it.key, it.value) } +} + +/** Add [inputs] to the signature. */ +public fun Signature.Builder.inputs(vararg inputs: Pair>): Signature.Builder = + inputs(inputs.toMap()) + +/** Add [outputs] to the signature. */ +public fun Signature.Builder.outputs(vararg outputs: Pair>): Signature.Builder = + outputs(outputs.toMap()) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt new file mode 100644 index 00000000000..49216fd79b5 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -0,0 +1,119 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +package org.tensorflow + +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract +import org.tensorflow.EagerSession.DevicePlacementPolicy +import org.tensorflow.proto.framework.ConfigProto + +/** Construct a TensorFlow [Graph] and run [block] on it. */ +public inline fun Graph(block: Graph.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return Graph().use { it.run(block) } +} + +/** + * Construct a new session with the associated {@link Graph} and configuration options, and run + * [block] on it. Closes the session afterwards. + * + * @param g The {@link Graph} the created Session will operate on. + * @param config Configuration parameters for the session specified as a + * [ConfigProto](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) + * ``` + * protocol buffer. + * @throws IllegalArgumentException + * ``` + * if the config is not a valid serialization of the ConfigProto + * ``` + * protocol buffer. + * ``` + */ +public inline fun Graph.useSession(config: ConfigProto? = null, block: (Session) -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return Session(this, config).use(block) +} + +/** + * An environment for executing TensorFlow operations eagerly. + * + * Eager execution is an imperative programming environment that evaluates operations immediately, + * without building graphs. Operations return concrete values instead of constructing a + * computational graph to run later, as with {@link Graph}s and {@link Session}s. + * + * This makes it easy to develop with TensorFlow and debug models, as it behaves more like a + * standard programming library. + * + * Instances of a {@code EagerSession} are thread-safe. + * + * @param options The options for this session. + * @see EagerSession.Options + */ +public inline fun EagerSession( + options: EagerSession.Options? = null, + block: EagerSession.() -> R, +): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + + val ses = options?.build() ?: EagerSession.create() + return ses.use(block) +} + +/** + * An environment for executing TensorFlow operations eagerly. + * + * Eager execution is an imperative programming environment that evaluates operations immediately, + * without building graphs. Operations return concrete values instead of constructing a + * computational graph to run later, as with {@link Graph}s and {@link Session}s. + * + * This makes it easy to develop with TensorFlow and debug models, as it behaves more like a + * standard programming library. + * + * Instances of a {@code EagerSession} are thread-safe. + * + * @param config The session configuration to use. See [EagerSession.Options.config] and + * [ConfigProto]. + * @param async Whether to return from op methods before the outputs have been calculated. See + * [EagerSession.Options.async]. + * @param devicePlacementPolicy How to handle tensors on different devices. See + * [EagerSession.Options.devicePlacementPolicy]. + * @see EagerSession.Options + */ +public inline fun EagerSession( + config: ConfigProto? = null, + async: Boolean = false, + devicePlacementPolicy: DevicePlacementPolicy = DevicePlacementPolicy.SILENT, + block: EagerSession.() -> R, +): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + + val options = + EagerSession.options() + .config(config) + .async(async) + .devicePlacementPolicy(devicePlacementPolicy) + + return EagerSession(options, block) +} + +/** + * Executed [block] in the default eager session, creating it if necessary. + * + * To configure the default session, use [EagerSession.initDefault]. + */ +public fun withDefaultEagerSession(block: EagerSession.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return EagerSession.getDefault().use(block) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt new file mode 100644 index 00000000000..79992bed2b1 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt @@ -0,0 +1,107 @@ +/* + Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow + +import org.tensorflow.ndarray.Shape +import org.tensorflow.ndarray.Shaped + +/** + * The (possibly partially known) shape of the tensor referred to by the {@link Output} of this + * operand. + * @see Operand.shape + */ +public val Operand<*>.shape: Shape + get() = this.shape() + +public fun interface ShapeErrorLazyMessage { + public fun message(actual: Shape, required: Shape): String +} + +@PublishedApi +internal val defaultShapeErrorMessage: ShapeErrorLazyMessage = + ShapeErrorLazyMessage { actual, required -> + "Shape $actual is not compatible with the required shape $required" +} + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws AssertionError if the shapes are not compatible + */ +public inline fun T.assertShape( + requiredShape: Shape, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = apply { + val actual = this.shape() + assert(actual.isCompatibleWith(requiredShape)) { exception.message(actual, requiredShape) } +} + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws AssertionError if the shapes are not compatible + */ +public inline fun T.assertShape( + vararg shape: Long, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = checkShape(Shape.of(*shape), exception) + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws IllegalArgumentException if the shapes are not compatible + */ +public inline fun T.requireShape( + requiredShape: Shape, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = apply { + val actual = this.shape() + require(actual.isCompatibleWith(requiredShape)) { exception.message(actual, requiredShape) } +} + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws IllegalArgumentException if the shapes are not compatible + */ +public inline fun T.requireShape( + vararg shape: Long, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = checkShape(Shape.of(*shape), exception) + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws IllegalStateException if the shapes are not compatible + */ +public inline fun T.checkShape( + requiredShape: Shape, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = apply { + val actual = this.shape() + check(actual.isCompatibleWith(requiredShape)) { exception.message(actual, requiredShape) } +} + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws IllegalStateException if the shapes are not compatible + */ +public inline fun T.checkShape( + vararg shape: Long, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = checkShape(Shape.of(*shape), exception) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt new file mode 100644 index 00000000000..b9660905533 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt @@ -0,0 +1,54 @@ +/* + Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow.op + +import kotlin.reflect.KClass +import org.tensorflow.internal.types.registry.TensorTypeRegistry +import org.tensorflow.proto.framework.DataType +import org.tensorflow.types.family.TType + +/** + * Converts a tensor type class to a [DataType] attribute. + * + * @return data type + * @see Operands.toDataType + */ +public fun Class.dataType(): DataType = Operands.toDataType(this) + +/** + * Converts a tensor type class to a [DataType] attribute. + * + * @return data type + * @see Operands.toDataType + */ +public fun KClass.dataType(): DataType = Operands.toDataType(this.java) + +/** + * Converts a tensor type class to a [DataType] attribute. + * + * @return data type + * @see Operands.toDataType + */ +public inline fun dataType(): DataType = T::class.dataType() + +/** + * Converts a [DataType] attribute to a tensor type class. + * + * @return the tensor type class + * @see TensorTypeRegistry.find + */ +public fun DataType.tType(): Class = TensorTypeRegistry.find(this).type() diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt new file mode 100644 index 00000000000..c1e14504f05 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt @@ -0,0 +1,18 @@ +// ktlint-disable filename +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +package org.tensorflow.op + +public typealias JavaOps = Ops diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt new file mode 100644 index 00000000000..c8b296fdc31 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt @@ -0,0 +1,354 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow.op.kotlin + +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract +import org.tensorflow.DeviceSpec +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.ndarray.index.Index +import org.tensorflow.op.Op +import org.tensorflow.op.Ops +import org.tensorflow.op.WithOps +import org.tensorflow.op.core.Constant +import org.tensorflow.op.core.StopGradient +import org.tensorflow.op.core.StridedSlice +import org.tensorflow.op.dtypes.Cast +import org.tensorflow.op.linalg.MatMul +import org.tensorflow.op.math.Add +import org.tensorflow.op.math.Div +import org.tensorflow.op.math.Equal +import org.tensorflow.op.math.Greater +import org.tensorflow.op.math.GreaterEqual +import org.tensorflow.op.math.Less +import org.tensorflow.op.math.LessEqual +import org.tensorflow.op.math.LogicalAnd +import org.tensorflow.op.math.LogicalNot +import org.tensorflow.op.math.LogicalOr +import org.tensorflow.op.math.Mod +import org.tensorflow.op.math.Mul +import org.tensorflow.op.math.Neg +import org.tensorflow.op.math.NotEqual +import org.tensorflow.op.math.Pow +import org.tensorflow.op.math.Sub +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TFloat64 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TUint8 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * Interface extended by [KotlinOps], used for now to declare extensions on Operand + * + * FIXME: Should be replaced by multiple receivers when available + */ +public abstract class OpsBase : WithOps { + + public abstract val java: Ops + + override fun tf(): Ops { + return java + } + + override fun withSubScope(childScopeName: String): KotlinOps = + java.withSubScope(childScopeName).tf + + /** + * Runs [block] on a child [KotlinOps] builder that builds operations with the provided name + * prefix. + * + * @see org.tensorflow.op.Scope.withSubScope + */ + // TODO should be a decorator too, when possible, and the same for the rest of the with methods + public inline fun withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withSubScope(childScopeName).run(block) + } + + override fun withName(opName: String): KotlinOps = java.withName(opName).tf + + override fun withDevice(deviceSpec: DeviceSpec): KotlinOps = java.withDevice(deviceSpec).tf + + /** + * Runs [block] on a child [KotlinOps] builder that uses the provided device for created ops. + * + * @see org.tensorflow.op.Scope.withDevice + */ + public inline fun withDevice(device: DeviceSpec, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withDevice(device).run(block) + } + + override fun withControlDependencies(controls: Iterable): KotlinOps = + java.withControlDependencies(controls).tf + + /** + * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided + * control dependencies. + * + * @see org.tensorflow.op.Scope.withControlDependencies + */ + public inline fun withControlDependencies( + controls: Iterable, + block: KotlinOps.() -> R + ): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withControlDependencies(controls).run(block) + } + + override fun withControlDependencies(vararg controls: Op): KotlinOps = + java.withControlDependencies(listOf(*controls)).tf + + /** + * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided + * control dependencies. + * + * @see org.tensorflow.op.Scope.withControlDependencies + */ + public inline fun withControlDependencies(vararg controls: Op, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withControlDependencies(*controls).run(block) + } + + /** + * Returns a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], and + * [withDevice]. Null arguments are ignored. + * + * @see org.tensorflow.op.Scope.withSubScope + * @see org.tensorflow.op.Scope.withControlDependencies + * @see org.tensorflow.op.Scope.withDevice + */ + public fun withSubScope( + childScopeName: String? = null, + controlDependencies: Iterable? = null, + device: DeviceSpec? = null, + ): KotlinOps { + var ops = java + childScopeName?.let { ops = ops.withSubScope(it) } + controlDependencies?.let { ops = ops.withControlDependencies(it) } + device?.let { ops = ops.withDevice(it) } + return ops.tf + } + + /** + * Runs [block] on a child [KotlinOps] builder, combining [withSubScope], + * [withControlDependencies], and [withDevice]. Null arguments are ignored. + * + * @see org.tensorflow.op.Scope.withSubScope + * @see org.tensorflow.op.Scope.withControlDependencies + * @see org.tensorflow.op.Scope.withDevice + */ + public inline fun withSubScope( + childScopeName: String? = null, + controlDependencies: Iterable? = null, + device: DeviceSpec? = null, + block: KotlinOps.() -> R, + ): R { + return withSubScope(childScopeName, controlDependencies, device).run(block) + } + + // TODO all of these should be context functions on WithOps. + + /** @see LinalgOps.matMul */ + public fun Operand.matMul( + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null, + ): MatMul = tf.linalg.matMul(this, b, transposeA, transposeB) + + /** @see LinalgOps.matMul */ + public infix fun Operand.matMul(b: Operand): MatMul = + matMul(b, transposeB = null) + + /** @see MathOps.add */ + public operator fun Operand.plus(b: Operand): Add = tf.math.add(this, b) + + /** @see MathOps.sub */ + public operator fun Operand.minus(b: Operand): Sub = tf.math.sub(this, b) + + /** @see MathOps.mul */ + public operator fun Operand.times(b: Operand): Mul = tf.math.mul(this, b) + + /** @see MathOps.div */ + public operator fun Operand.div(b: Operand): Div = tf.math.div(this, b) + + /** @see MathOps.mod */ + public operator fun Operand.rem(b: Operand): Mod = tf.math.mod(this, b) + + /** @see MathOps.pow */ + public infix fun Operand.pow(b: Operand): Pow = tf.math.pow(this, b) + + /** @see MathOps.add */ + public operator fun Operand.plus(scalar: Number): Add = + this + tf.constantOfSameType(this, scalar) + + /** @see MathOps.sub */ + public operator fun Operand.minus(scalar: Number): Sub = + this - tf.constantOfSameType(this, scalar) + + /** @see MathOps.mul */ + public operator fun Operand.times(scalar: Number): Mul = + this * tf.constantOfSameType(this, scalar) + + /** @see MathOps.div */ + public operator fun Operand.div(scalar: Number): Div = + this / tf.constantOfSameType(this, scalar) + + /** @see MathOps.mod */ + public operator fun Operand.rem(scalar: Number): Mod = + this % tf.constantOfSameType(this, scalar) + + /** @see MathOps.pow */ + public infix fun Operand.pow(scalar: Number): Pow = + this pow tf.constantOfSameType(this, scalar) + + /** @see MathOps.neg */ + public operator fun Operand.unaryMinus(): Neg = tf.math.neg(this) + + /** @see MathOps.logicalNot */ + public operator fun Operand.not(): LogicalNot = tf.math.logicalNot(this) + + /** @see MathOps.logicalAnd */ + public infix fun Operand.and(b: Operand): LogicalAnd = tf.math.logicalAnd(this, b) + + /** @see MathOps.logicalOr */ + public infix fun Operand.or(b: Operand): LogicalOr = tf.math.logicalOr(this, b) + + /** @see MathOps.equal */ + public infix fun Operand.eq(b: Operand): Equal = tf.math.equal(this, b) + + /** @see MathOps.notEqual */ + public infix fun Operand.neq(b: Operand): NotEqual = tf.math.notEqual(this, b) + + /** @see MathOps.less */ + public infix fun Operand.lt(b: Operand): Less = tf.math.less(this, b) + + /** @see MathOps.greater */ + public infix fun Operand.gt(b: Operand): Greater = tf.math.greater(this, b) + + /** @see MathOps.lessEqual */ + public infix fun Operand.lte(b: Operand): LessEqual = + tf.math.lessEqual(this, b) + + /** @see MathOps.greaterEqual */ + public infix fun Operand.gte(b: Operand): GreaterEqual = + tf.math.greaterEqual(this, b) + + /** @see KotlinOps.stopGradient */ + @JvmName("stopGradientExtension") + public fun Operand.stopGradient(): StopGradient = tf.stopGradient(this) + + /** @see DtypesOps.cast */ + public inline fun Operand<*>.cast(truncate: Boolean? = null): Cast = + tf.dtypes.cast(this, truncate) + + /** @see KotlinOps.constant */ + public fun Int.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Long.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Float.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Double.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Byte.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Boolean.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun IntArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun LongArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun FloatArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun DoubleArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun ByteArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun BooleanArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Shape.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("intsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("longsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("floatsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("doublesAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("bytesAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("booleansAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + // TODO look at syntax `W[1][3..4]()` + /** @see KotlinOps.stridedSlice */ + public operator fun Operand.get(vararg indices: Index): StridedSlice = + tf.stridedSlice(this, *indices) +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt new file mode 100644 index 00000000000..4cf840259ac --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -0,0 +1,85 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +package org.tensorflow.op.kotlin + +import org.tensorflow.op.WithOps +import org.tensorflow.op.core.Constant +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TFloat64 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TUint8 + +/** Get the Kotlin ops builder. */ +public val WithOps.tf: KotlinOps + get() = if (this is KotlinOps) this else KotlinOps(tf()) + +/** Get the Kotlin ops builder. */ +public val KotlinOps.tf: KotlinOps + get() = this + +// TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be +// too error prone to be worth doing + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantDoubles") +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toDoubleArray()) + +/** @see KotlinOps.constant */ +@JvmName("constantFloats") +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toFloatArray()) + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantInts") +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toIntArray()) + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantLongs") +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toLongArray()) + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantBytes") +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toByteArray()) + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantBooleans") +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toBooleanArray()) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt new file mode 100644 index 00000000000..94e78047e56 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -0,0 +1,60 @@ +/* + Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow + +import kotlin.test.Test +import kotlin.test.assertEquals +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.WithOps +import org.tensorflow.op.kotlin.tf +import org.tensorflow.types.TFloat32 + +private fun WithOps.DenseLayer( + name: String, + x: Operand, + n: Int, + activation: WithOps.(Operand) -> Operand = { tf.nn.relu(it) }, +): Operand = + tf.withSubScope(name) { + // TODO should be dynamic + val inputDims = x.shape()[1] + val W = tf.variable(tf.ones(tf.array(inputDims.toInt(), n))) + val b = tf.variable(tf.ones(tf.array(n))) + activation((x matMul W) + b) + } + +public class ExampleTest { + @Test + public fun mnistExample() { + Graph { + val input = + tf.placeholderWithDefault( + tf.ones(tf.array(1, 28, 28, 3)), Shape.of(-1, 28, 28, 3)) + + var x: Operand = tf.reshape(input, tf.array(-1, 28 * 28 * 3)) + x = DenseLayer("Layer1", x, 256) + x = DenseLayer("Layer2", x, 64) + val output = DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(it) } + + useSession { session -> + val outputValue = session.runner().fetch(output).run()[0] as TFloat32 + assertEquals(Shape.of(1, 10), outputValue.shape()) + assertEquals(1.0f, outputValue.getFloat(0, 0)) + } + } + } +} diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml new file mode 100644 index 00000000000..57a21dfe735 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml @@ -0,0 +1,123 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-kotlin-parent + 0.5.0-SNAPSHOT + + tensorflow-framework-kotlin + jar + + TensorFlow Framework Kotlin Library + Kotlin API wrappers for the TensorFlow Framework Java library + + + + + + + + org.tensorflow + tensorflow-framework + ${project.version} + + + org.tensorflow + tensorflow-core-kotlin + ${project.version} + + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.openjdk.jmh + jmh-core + test + + + org.openjdk.jmh + jmh-generator-annprocess + test + + + org.jetbrains.kotlin + kotlin-test-junit5 + ${kotlin.version} + test + + + + org.tensorflow + tensorflow-core-platform${javacpp.platform.extension} + ${project.version} + test + + + + + ${project.basedir}/src/main/kotlin + ${project.basedir}/src/test/kotlin + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + + + + + compile + + compile + + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.22.2 + + + + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt new file mode 100644 index 00000000000..f97b55f95d0 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt @@ -0,0 +1,32 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.framework.activations + +import org.tensorflow.Operand +import org.tensorflow.op.kotlin.KotlinOps +import org.tensorflow.op.kotlin.tf +import org.tensorflow.types.family.TNumber + +/** + * Create an initializer. + * @see org.tensorflow.framework.activations.Activation + */ +public inline fun Activation( + crossinline activation: KotlinOps.(Operand) -> Operand +): Activation = + org.tensorflow.framework.activations.Activation { tf, input -> activation(tf.tf, input) } diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt new file mode 100644 index 00000000000..cb697892bde --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt @@ -0,0 +1,42 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.framework.initializers + +import org.tensorflow.Operand +import org.tensorflow.op.Ops +import org.tensorflow.op.kotlin.KotlinOps +import org.tensorflow.op.kotlin.tf +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TType + +/** + * Create an initializer + * @see org.tensorflow.framework.initializers.Initializer + */ +public inline fun Initializer( + crossinline initializer: KotlinOps.(dims: Operand, dataType: Class) -> Operand +): Initializer = + org.tensorflow.framework.initializers.Initializer { tf, dims, dataType -> + initializer(tf.tf, dims, dataType) + } + +/** Call an initializer. */ +public inline fun Initializer.call( + tf: Ops, + dims: Operand +): Operand = call(tf, dims, T::class.java)!! diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml new file mode 100644 index 00000000000..9ab76d6db32 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml @@ -0,0 +1,51 @@ + + + + 4.0.0 + + org.tensorflow + tensorflow-kotlin-parent + 0.5.0-SNAPSHOT + + tensorflow-kotlin-generator + jar + + TensorFlow Kotlin Annotation Processor + Annotation processor for the TensorFlow Kotlin API + + + + org.tensorflow + tensorflow-core-generator + ${project.version} + + + com.squareup + kotlinpoet + 1.7.2 + + + + + ${project.basedir}/src/main/kotlin + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt new file mode 100644 index 00000000000..39ef3dac9a6 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt @@ -0,0 +1,146 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ +package org.tensorflow.processor.operator + +import com.github.javaparser.javadoc.Javadoc +import com.github.javaparser.javadoc.JavadocBlockTag +import com.github.javaparser.javadoc.description.JavadocDescription +import com.github.javaparser.javadoc.description.JavadocDescriptionElement +import com.github.javaparser.javadoc.description.JavadocInlineTag + +private fun JavadocDescription.preParseTransform(): JavadocDescription { + val transformedElements = + elements.map { + if (it is JavadocInlineTag && it.type == JavadocInlineTag.Type.CODE) it.toText() + else + it.toText() + .replace("\r\n", "\n") + .replace("
", "{@code ")
+                .replace("
", "}") + .replace(Regex("\n?
\\s*
\\s*
\n"), "{@code ") + .replace(Regex("\n?\\s*
\\s*
\\s*
"), "}") + } + return JavadocDescription.parseText(transformedElements.joinToString("").trimIndent()) +} + +internal fun Javadoc.toKDoc(): String = buildString { + append(description.toKDoc()) + appendLine() + appendLine() + this@toKDoc.blockTags.mapNotNull { it.toKDoc() }.forEach { append(it + "\n") } +} + +private inline fun JavadocBlockTag.directToKDoc(mapContent: (String) -> String = { it }) = + buildString { + append("@") + append(this@directToKDoc.tagName) + append(" ") + this@directToKDoc.name.ifPresent { append("$it ") } + append(this@directToKDoc.content.toKDoc().let(mapContent)) +} + +private fun JavadocBlockTag.toKDoc(): String = + when (type) { + JavadocBlockTag.Type.DEPRECATED -> "" + JavadocBlockTag.Type.SEE -> + directToKDoc { convertRef(it) } // TODO or does this parse as link? + JavadocBlockTag.Type.SERIAL -> "Serial: ${content.toKDoc()}" + JavadocBlockTag.Type.SERIAL_DATA -> "Serial Data: ${content.toKDoc()}" + JavadocBlockTag.Type.SERIAL_FIELD -> "Serial Field: ${content.toKDoc()}" + JavadocBlockTag.Type.SINCE -> "Since Java ${content.toKDoc()}" + JavadocBlockTag.Type.VERSION -> "Version: ${content.toKDoc()}" + JavadocBlockTag.Type.UNKNOWN -> + buildString { + append(this@toKDoc.tagName) + append(": ") + this@toKDoc.name.ifPresent { append("$it ") } + append(this@toKDoc.content.toKDoc()) + } + else -> directToKDoc() + }.replace("```", "`") + +private fun String.replaceTag(with: String, vararg tags: String) = + tags.fold(this) { current, tag -> current.replace("<$tag>", with).replace("", with) } + +// TODO get rid of once KT-46290 is fixed +private fun String.replaceProblematicBrackets() = + replace(Regex("\\[([^\\]]*.[^\\]*])\\]")) { "[${it.groupValues[1]}]" } + +private fun JavadocDescription.toKDoc(): String { + if (this.isEmpty) return "" + return preParseTransform() + .elements + .joinToString("") { it.toKDoc() } + .replace("\r\n", "\n") + .replace("<", "<") + .replace(">", ">") + .replaceTag("\n", "p", "br") + .replaceTag("_", "em", "i") + .replaceTag("**", "strong", "b") + .replaceTag("~~", "strike", "del", "s") + .replace("
", "") + .replace("
", "") + .replace("\\(", "`\\(") + .replace("\\)", "\\)`") + .replace(Regex("\n\\s*\n", "") + .replace(Regex("]+)\">([^<]*)")) { + "[${it.groupValues[2]}](${it.groupValues[1]})" + } +} + +private fun JavadocDescriptionElement.toKDoc(): String = + if (this is JavadocInlineTag) this.toKDoc() else this.toText().replaceProblematicBrackets() + +private fun convertRef(ref: String) = ref.substringBefore('(').replace("#", ".") + +private fun convertLink(link: String): String = + if (" " in link) { + val (link, label) = link.split(' ') + "[$label][${convertRef(link)}]" + } else { + "[${convertRef(link)}]" + } + +private val JavadocInlineTag.trimmedContent + get() = content.trimStart() + +private fun makeCodeBlock(content: String): String { + val stripedContent = + if (content.startsWith("{@code ")) content.removePrefix("{@code ").removeSuffix("}") + else content + + val isMultiline = stripedContent.lines().size > 1 + + val escapedContent = + if (isMultiline) stripedContent else stripedContent.replaceProblematicBrackets() + + return if (isMultiline) "```\n$escapedContent\n```" else "`$escapedContent`" +} + +internal fun JavadocInlineTag.toKDoc(): String = + when (type) { + JavadocInlineTag.Type.CODE -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.DOC_ROOT -> trimmedContent + JavadocInlineTag.Type.INHERIT_DOC -> trimmedContent + JavadocInlineTag.Type.LINK -> convertLink(trimmedContent) + JavadocInlineTag.Type.LINKPLAIN -> convertLink(trimmedContent) + JavadocInlineTag.Type.LITERAL -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.VALUE -> convertLink(trimmedContent) + JavadocInlineTag.Type.SYSTEM_PROPERTY -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.UNKNOWN -> trimmedContent + } diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt new file mode 100644 index 00000000000..fee4f8c5e2b --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -0,0 +1,444 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +package org.tensorflow.processor.operator + +import com.squareup.javapoet.ClassName as JavaClassName +import com.squareup.kotlinpoet.* +import com.squareup.kotlinpoet.ParameterizedTypeName.Companion.parameterizedBy +import java.io.File +import java.io.IOException +import javax.annotation.processing.ProcessingEnvironment +import javax.lang.model.element.TypeElement +import javax.lang.model.type.ArrayType +import javax.lang.model.util.ElementFilter +import org.tensorflow.Names + +val JavaClassName.kotlin + get() = ClassName(this.packageName(), this.simpleNames()) + +class KotlinOpsProcessor : BaseOperatorProcessor() { + private val T_KOTLIN_OPS = ClassName("org.tensorflow.op.kotlin", "KotlinOps") + private val T_KOTLIN_OPS_BASE = ClassName("org.tensorflow.op.kotlin", "OpsBase") + private val PACKAGE = "org.tensorflow.op.kotlin" + private val T_OPERAND = Names.Operand.kotlin + private val T_CLASS = ClassName("java.lang", "Class") + private val T_JAVA_LIST = ClassName("java.util", "List") + + private lateinit var sourceDir: File + + @Synchronized + override fun init(processingEnv: ProcessingEnvironment) { + super.init(processingEnv) + val kotlinDir = + File( + processingEnv.options["kapt.kotlin.generated"] + ?: error("Kotlin source dir not specified")) + val projectDir = kotlinDir.parentFile.parentFile.parentFile.parentFile + require(projectDir.name == "tensorflow-core-kotlin") { + "Could not find project directory. Found $projectDir" + } + sourceDir = File(projectDir, "src/gen/annotations") + sourceDir.mkdirs() + } + + override fun write(spec: TypeSpec) { + try { + val text = + buildString { + FileSpec.builder(PACKAGE, spec.name ?: error("Type spec has no name")) + .indent(" ") + .addComment(LICENSE) + .addComment("\nThis class has been generated, DO NOT EDIT!\n") + .addType(spec) + .build() + .writeTo(this) + } + .replace("import java.(lang|util).[\\w.*]+\r?\n".toRegex(), "") + .replace("java.lang.", "") + .replace("java.util.List", "List") + .replace("\t", " ") + + val packageFile = File(sourceDir, PACKAGE.replace(".", "/")) + packageFile.mkdirs() + + File(packageFile, spec.name!! + ".kt").writeText(text) + } catch (e: IOException) { + throw AssertionError(e) + } + } + + private val OpsSpec.parents: List + get() = this.parent?.let { listOf(it) + it.parents }.orEmpty() + + /** @see adjustType */ + private fun adjustSingleType(type: TypeName, isVararg: Boolean): TypeName { + if (type == T_OPERAND) return T_OPERAND.parameterizedBy(STAR) + + if (type is ParameterizedTypeName && !isVararg) { + if (type.rawType == ARRAY) { + when (type.typeArguments.single()) { + BOOLEAN -> return BOOLEAN_ARRAY + BYTE -> return BYTE_ARRAY + SHORT -> return SHORT_ARRAY + INT -> return INT_ARRAY + LONG -> return LONG_ARRAY + CHAR -> return CHAR_ARRAY + FLOAT -> return FLOAT_ARRAY + DOUBLE -> return DOUBLE_ARRAY + else -> {} + } + } + } + + // may not be corrected sometimes. Can't compare to classes b/c + // java.lang.Boolean::class.asTypeName() is converted to kotlin.Boolean + when (type.toString().removeSuffix("?").removeSuffix("!")) { + "java.lang.Boolean" -> return BOOLEAN.copy(nullable = type.isNullable) + "java.lang.Byte " -> return BYTE.copy(nullable = type.isNullable) + "java.lang.Short" -> return SHORT.copy(nullable = type.isNullable) + "java.lang.Integer" -> return INT.copy(nullable = type.isNullable) + "java.lang.Long" -> return LONG.copy(nullable = type.isNullable) + "java.lang.Character" -> return CHAR.copy(nullable = type.isNullable) + "java.lang.Float" -> return FLOAT.copy(nullable = type.isNullable) + "java.lang.Double" -> return DOUBLE.copy(nullable = type.isNullable) + "java.lang.String" -> return STRING.copy(nullable = type.isNullable) + else -> {} + } + + return type + } + + /** + * Adjust types to their Kotlin counterparts. Currently only changes Operand to Operand<*> and + * primitive arrays to their Kotlin counterparts. Changes should be made to [adjustSingleType], + * this is a helper for parameterized types. + */ + private fun adjustType(type: TypeName, isVararg: Boolean = false): TypeName { + val adjusted = adjustSingleType(type, isVararg) + if (adjusted is ParameterizedTypeName) { + val newArgs = adjusted.typeArguments.map { adjustType(it) } + return adjusted.rawType.parameterizedBy(newArgs) + } + return adjusted + } + + private fun List.toKotlin(javaOpsClass: ClassName): List { + val methods = map { it.toKotlin(javaOpsClass) }.toMutableList() + methods += methods.mapNotNull { makeCopyWithReified(it) } + + val duplicates = + methods + .filter { it.annotations.any { it.typeName == JvmName::class.asTypeName() } } + .mapNotNull { orig -> + val others = + methods.minus(orig).filter { + it.name == orig.name && + it.parameters.map { it.name to it.type } == + orig.parameters.map { it.name to it.type } + } + if (others.isEmpty()) { + null + } else { + setOf(orig) + others + } + } + .toSet() + + duplicates.forEach { + val original = + it.single { it.annotations.none { it.typeName == JvmName::class.asTypeName() } } + var i = 0 + it.minus(original).forEach { + val idx = methods.indexOf(it) + methods[idx] = it.toBuilder(it.name + "Typed" + if (i == 0) "" else "$i").build() + i++ + } + } + return methods + } + + private fun OpMethod.toKotlin(javaOpsClass: ClassName): FunSpec { + val builder = FunSpec.builder(name).returns(adjustType(endpointMethod.returnType.asTypeName())) + + if (deprecated) + builder.addAnnotation( + AnnotationSpec.builder(Deprecated::class) + .addMember("message = Op is Deprecated") + .build()) + + val typeParameters = + endpointMethod.typeParameters.map { it.asTypeVariableName() }.toMutableList() + + val parameters = + endpointMethod.parameters + .filter { com.squareup.javapoet.TypeName.get(it.asType()) != T_SCOPE } + .map { ParameterSpec.get(it) } + + val optionsParameter = + parameters.singleOrNull { + if (endpointMethod.isVarArgs && "Array<" in it.type.toString()) + ((it.type as? ParameterizedTypeName)?.typeArguments?.singleOrNull() as? ClassName) + ?.simpleName == "Options" + else false + } + + builder.addTypeVariables(typeParameters) + + val typeParamNames = typeParameters.map { it.name }.toSet() + + builder.addParameters( + parameters.filter { it != optionsParameter }.map { + var param = it + if (param.name in typeParamNames) param = param.toBuilder(param.name + "_").build() + + if (endpointMethod.isVarArgs && "Array<" in param.type.toString()) + param = + param + .toBuilder( + type = (param.type as ParameterizedTypeName).typeArguments.single()) + .addModifiers(KModifier.VARARG) + .build() + + param + .toBuilder(type = adjustType(param.type, KModifier.VARARG in param.modifiers)) + .build() + }) + + val optionsClass = + if (optionsParameter != null) { + val paramElement = + endpointMethod.parameters.single { + it.simpleName.contentEquals(optionsParameter.name) + } + val type = paramElement.asType()?.let { if (it is ArrayType) it.componentType else it } + types.asElement(type) as TypeElement + } else null + + val opClassSpec = (optionsClass?.enclosingElement as TypeElement?)?.asClassName() + + val optionParams = + if (optionsClass != null) { + val params = + ElementFilter.methodsIn(optionsClass.enclosedElements) + .map { + ParameterSpec.builder( + it.simpleName.toString(), + adjustType(it.parameters.single().asType().asTypeName()) + .copy(nullable = true)) + .addKdoc( + "%L", + parseJavadoc(it).toKDoc().removePrefix("@param ${it.simpleName} ")) + .defaultValue("null") + .build() + } + .toMutableList() + + // ensure vararg options are the ones that get removed + params.toList().forEach { param -> + val type = param.type + if (type is ParameterizedTypeName && type.rawType == T_JAVA_LIST) { + params.removeAll { it.name == param.name && it != param } + } + } + + params.distinctBy { it.name }.toSet() + } else emptySet() + + if (optionParams.isNotEmpty()) builder.addParameters(optionParams) + + builder.addStatement( + buildString { + append("return java.$name") + if (typeParamNames.isNotEmpty()) append("<${typeParamNames.joinToString(", ")}>") + + append("(") + + val paramStrings = + builder + .parameters + .filter { it !in optionParams } + .map { + val name = if (it.name == "var") "`var`" else it.name + + if (KModifier.VARARG in it.modifiers) "*${name}" else name + } + .plus( + if (optionParams.isNotEmpty()) + listOf( + "*listOfNotNull(${ + optionParams.joinToString(",\n", "\n", "\n") { + "\t${it.name}?.let{ ${opClassSpec!!.canonicalName}.${it.name}(it) }" + } + }).toTypedArray()") + else emptyList()) + + append(paramStrings.joinToString(",\n", "\n", "\n").prependIndent("\t")) + + append(")") + }) + + val javadoc = buildOpMethodJavadoc(opClass, endpointMethod, describeByClass) + javadoc.addBlockTag("see", "${javaOpsClass.canonicalName}.$name") + + builder.addKdoc("%L", javadoc.toKDoc()) + + return builder.build() + } + + private fun makeCopyWithReified(method: FunSpec): FunSpec? { + + val dataTypeParameters = + method + .parameters + .mapNotNull { param -> + param.type.let { + if (it is ParameterizedTypeName && + it.rawType == T_CLASS && + it.typeArguments.singleOrNull() in method.typeVariables) + param to it.typeArguments.single() as TypeVariableName + else null + } + } + .toMap() + val builder = method.toBuilder() + + if (dataTypeParameters.isEmpty()) return null + + dataTypeParameters.values.forEach { + val i = builder.typeVariables.indexOf(it) + builder.typeVariables[i] = builder.typeVariables[i].copy(reified = true) + } + if (dataTypeParameters.isNotEmpty()) { + builder.addModifiers(KModifier.INLINE) + builder.addAnnotation( + AnnotationSpec.builder(JvmName::class).addMember("\"${method.name}Reified\"").build()) + } + + val paramString = + builder.parameters.joinToString { + if (it in dataTypeParameters) dataTypeParameters[it]!!.name + "::class.java" + else { + val name = if (it.name == "var") "`var`" else it.name + + if (KModifier.VARARG in it.modifiers) "*${name}" else name + } + } + + builder.parameters.removeAll(dataTypeParameters.keys) + + builder.clearBody() + + builder.addStatement( + "return ${method.name}<${builder.typeVariables.joinToString(", ") { it.name }}>($paramString)") + return builder.build() + } + + override fun buildGroupClass(spec: OpsSpec): TypeSpec { + + val builder = + TypeSpec.classBuilder(spec.className.kotlin) + .addKdoc( + """ + An API for building `%L` operations as [Op][%T]s + + @see %T + + """.trimIndent(), + spec.groupName, + T_OP.kotlin, + T_OPS.kotlin) + + builder.primaryConstructor( + FunSpec.constructorBuilder().addParameter("ops", T_KOTLIN_OPS).build()) + + val accessorName = + (listOf(spec.fieldName) + spec.parents.mapNotNull { it.fieldName }) + .reversed() + .joinToString(".") + + builder.addProperty( + PropertySpec.builder("java", spec.className.kotlin) + .initializer("ops.java.$accessorName") + .build()) + + builder.addProperty( + PropertySpec.builder("ops", T_KOTLIN_OPS) + .initializer("ops") + .addKdoc("Get the parent [" + T_KOTLIN_OPS.simpleName + "] object.") + .build()) + + builder.addProperty( + PropertySpec.builder("scope", T_SCOPE.kotlin) + .initializer("ops.scope") + .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) + .build()) + + addGroupFields(builder, spec.subGroups, false) + + builder.addFunctions(spec.methods.toKotlin(spec.className.kotlin)) + + return builder.build() + } + + override fun buildTopClass(spec: OpsSpec): TypeSpec { + val builder = + TypeSpec.classBuilder(T_KOTLIN_OPS) + .addKdoc( + """ + An API for building operations as [Op][%T]s + + @see %T + + """.trimIndent(), + T_OP.kotlin, + T_OPS.kotlin) + + builder.primaryConstructor( + FunSpec.constructorBuilder().addParameter("java", T_OPS.kotlin).build()) + builder.addProperty( + PropertySpec.builder("java", T_OPS.kotlin) + .initializer("java") + .addModifiers(KModifier.OVERRIDE) + .addKdoc("Returns the java counterpart of this API\n") + .build()) + builder.addProperty( + PropertySpec.builder("scope", T_SCOPE.kotlin) + .initializer("java.scope()") + .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) + .build()) + + builder.superclass(T_KOTLIN_OPS_BASE) + + addGroupFields(builder, spec.subGroups, true) + + builder.addFunctions(spec.methods.toKotlin(T_OPS.kotlin)) + + return builder.build() + } + + private fun addGroupFields( + classBuilder: TypeSpec.Builder, + groups: List, + isTopClass: Boolean, + ) = + groups.forEach { + val kotlinGroup = + ClassName(it.className.packageName() + ".kotlin", it.className.simpleNames()) + classBuilder.addProperty( + PropertySpec.builder(it.fieldName, kotlinGroup) + .initializer("%T(${if (isTopClass) "this" else "ops"})", kotlinGroup) + .build()) + } +} diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml new file mode 100644 index 00000000000..d98c3e0924c --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml @@ -0,0 +1,79 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-kotlin-parent + 0.5.0-SNAPSHOT + + tensorflow-kotlin-jupyter + jar + + TensorFlow Kotlin Jupyter Integration + Kotlin Jupyter integration for tensorflow core and platform + + + + ${project.version} + + + + + org.jetbrains.kotlinx + kotlin-jupyter-api + ${kotlin_jupyter.version} + + + + + ${project.basedir}/src/main/kotlin + + + src/main/resources + true + + + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + + + compile + + compile + + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt new file mode 100644 index 00000000000..0e11a2ebb0e --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt @@ -0,0 +1,50 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.jupyter + +import java.util.* +import org.jetbrains.kotlinx.jupyter.api.libraries.JupyterIntegration + +private const val tensorflowPropertiesFile = "org/tensorflow/jupyter/tensorflow.properties" + +public class TensorflowKotlinIntegration : JupyterIntegration() { + override fun Builder.onLoaded() { + val os = System.getProperty("os.name").lowercase() + val ext = + when { + os.contains("mac") -> "macosx-x86_64" + os.startsWith("windows") -> "windows-x86_64" + else -> "linux-x86_64" + } + "-gpu" + + val version = + this@TensorflowKotlinIntegration.javaClass.classLoader.getResourceAsStream( + tensorflowPropertiesFile) + .let { + it + ?: error( + "No $tensorflowPropertiesFile resource found, can't determine the library version") + Properties().apply { load(it) }.getProperty("version") + ?: error( + "No version property found in the $tensorflowPropertiesFile resource, did you overwrite it?") + } + + dependencies("org.tensorflow:tensorflow-core-api:jar:$ext:$version") + dependencies("org.tensorflow:tensorflow-core-kotlin-jupyter:$version") + } +} diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json new file mode 100644 index 00000000000..02d41bcd2c4 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json @@ -0,0 +1,6 @@ +{ + "definitions":[], + "producers": [ + { "fqn" : "org.tensorflow.jupyter.TensorflowKotlinIntegration" } + ] +} \ No newline at end of file diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/org/tensorflow/jupyter/tensorflow.properties b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/org/tensorflow/jupyter/tensorflow.properties new file mode 100644 index 00000000000..b775882198a --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/org/tensorflow/jupyter/tensorflow.properties @@ -0,0 +1,18 @@ +# +# /* Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ======================================================================= +# */ +# +version=${project.version} \ No newline at end of file diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml new file mode 100644 index 00000000000..d84c051b6d7 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml @@ -0,0 +1,49 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-kotlin-parent + 0.5.0-SNAPSHOT + + tensorflow-kotlin + jar + + TensorFlow Kotlin Library + Kotlin API wrappers for the TensorFlow Java library + + + + + + + + org.tensorflow + tensorflow-core-kotlin + ${project.version} + + + org.tensorflow + tensorflow-framework-kotlin + ${project.version} + + +