Skip to content

Commit 12f56df

Browse files
committed
Update generation
Signed-off-by: Ryan Nett <[email protected]>
1 parent 42c61b1 commit 12f56df

File tree

4 files changed

+11
-166
lines changed

4 files changed

+11
-166
lines changed

tensorflow-core/tensorflow-core-api/pom.xml

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
<javacpp.parser.skip>${native.build.skip}</javacpp.parser.skip>
2121
<javacpp.compiler.skip>${native.build.skip}</javacpp.compiler.skip>
2222
<java.module.name>org.tensorflow.core.api</java.module.name>
23-
<ndarray.version>0.3.3</ndarray.version>
23+
<ndarray.version>0.4.0-SNAPSHOT</ndarray.version>
2424
<truth.version>1.0.1</truth.version>
2525
</properties>
2626

tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java

+6-73
Original file line numberDiff line numberDiff line change
@@ -1811,55 +1811,6 @@ public <T extends TNumber> Selu<T> selu(Operand<T> features) {
18111811
return Selu.create(scope, features);
18121812
}
18131813

1814-
/**
1815-
* Computes sigmoid cross entropy given <code>logits</code>.
1816-
*
1817-
* <p>Measures the probability error in discrete classification tasks in which each class is
1818-
* independent and not mutually exclusive. For instance, one could perform multilabel
1819-
* classification where a picture can contain both an elephant and a dog at the same time.
1820-
*
1821-
* <p>For brevity, let <code>x = logits</code>, <code>z = labels</code>. The logistic loss in
1822-
* pseudo-code is
1823-
*
1824-
* <pre>
1825-
* z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
1826-
* = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
1827-
* = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
1828-
* = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
1829-
* = (1 - z) * x + log(1 + exp(-x))
1830-
* = x - x * z + log(1 + exp(-x))
1831-
* </pre>
1832-
*
1833-
* <p>For <code>x < 0</code>, to avoid overflow in <code>exp(-x)</code>, we reformulate the above
1834-
*
1835-
* <pre>
1836-
* x - x * z + log(1 + exp(-x))
1837-
* = log(exp(x)) - x * z + log(1 + exp(-x))
1838-
* = - x * z + log(1 + exp(x))
1839-
* </pre>
1840-
*
1841-
* <p>Hence, to ensure stability and avoid overflow, the implementation uses this equivalent
1842-
* formulation
1843-
*
1844-
* <pre>
1845-
* max(x, 0) - x * z + log(1 + exp(-abs(x)))
1846-
* </pre>
1847-
*
1848-
* <p></ode>logits</code> and <code>labels</code> must have the same type and shape.
1849-
*
1850-
* <p>
1851-
*
1852-
* @param labels the labels
1853-
* @param logits the logits of type float32 or float64
1854-
* @param <T> the type of labels and logits
1855-
* @return the component-wise logistic losses.
1856-
* @throws IllegalArgumentException if logits' and labels' do not have the same shape
1857-
*/
1858-
public <T extends TNumber> Operand<T> sigmoidCrossEntropyWithLogits(Operand<T> labels,
1859-
Operand<T> logits) {
1860-
return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits);
1861-
}
1862-
18631814
/**
18641815
* Computes softmax activations.
18651816
* For each batch {@code i} and class {@code j} we have
@@ -2084,30 +2035,12 @@ public <T extends TType> SpaceToDepth<T> spaceToDepth(Operand<T> input, Long blo
20842035
* given row.
20852036
* <p>Inputs are the logits, not probabilities.
20862037
*
2087-
* <p>This op expects unscaled logits, since it performs a <code>softmax</code> on <code>logits
2088-
* </code> internally for efficiency. Do not call this op with the output of <code>softmax</code>,
2089-
* as it will produce incorrect results.
2090-
*
2091-
* <p>A common use case is to have logits of shape <code>[batchSize, numClasses]</code> and have
2092-
* labels of shape <code>[batchSize]</code>, but higher dimensions are supported, in which case
2093-
* the <code>dim</code>-th dimension is assumed to be of size <code>numClasses</code>. <code>
2094-
* logits</code> must have the <cod>dataType</cod> of <code>TFloat16</code>, <code>TFloat32</code>
2095-
* , or <code>TFloat64</code>, and <code>labels</code> must have the dtype of <code>TInt32</code>
2096-
* or <code>TInt64</code>.
2097-
*
2098-
* @param labels <code>Tensor</code> of shape <code>[d_0, d_1, ..., d_{r-1}]</code> (where <code>r
2099-
* </code> is rank of <code>labels</code> and result) and the dataType is <code>TInt32</code>
2100-
* or <code>TInt64</code>. Each entry in <code>labels</code> must be an index in <code>[0,
2101-
* numClasses)</code>. Other values will raise an exception when this op is run on CPU, and
2102-
* return <code>NaN</code> for corresponding loss and gradient rows on GPU.
2103-
* @param logits Per-label activations (typically a linear output) of shape <code>[d_0, d_1, ...,
2104-
* d_{r-1}, numClasses]</code> and dataType of <code>TFloat16</code>, <code>TFloat32</code>,
2105-
* or <code>TFloat64</code>. These activation energies are interpreted as unnormalized log
2106-
* probabilities.
2107-
* @return A <code>Tensor</code> of the same shape as <code>labels</code> and of the same type as
2108-
* <code>logits</code> with the softmax cross entropy loss.
2109-
* @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank
2110-
* of the labels is not equal to the rank of the logits minus one.
2038+
* @param <T> data type for {@code loss} output
2039+
* @param features batch_size x num_classes matrix
2040+
* @param labels batch_size vector with values in [0, num_classes).
2041+
* This is the label for the given minibatch entry.
2042+
* @param <T> data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands
2043+
* @return a new instance of SparseSoftmaxCrossEntropyWithLogits
21112044
*/
21122045
public <T extends TNumber> SparseSoftmaxCrossEntropyWithLogits<T> sparseSoftmaxCrossEntropyWithLogits(
21132046
Operand<T> features, Operand<? extends TNumber> labels) {

tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java

+3-77
Original file line numberDiff line numberDiff line change
@@ -371,10 +371,10 @@ public final class Ops implements WithOps {
371371

372372
public final TpuOps tpu;
373373

374-
public final AudioOps audio;
375-
376374
public final MathOps math;
377375

376+
public final AudioOps audio;
377+
378378
public final SignalOps signal;
379379

380380
public final TrainOps train;
@@ -400,8 +400,8 @@ public final class Ops implements WithOps {
400400
sparse = new SparseOps(this);
401401
bitwise = new BitwiseOps(this);
402402
tpu = new TpuOps(this);
403-
audio = new AudioOps(this);
404403
math = new MathOps(this);
404+
audio = new AudioOps(this);
405405
signal = new SignalOps(this);
406406
train = new TrainOps(this);
407407
quantization = new QuantizationOps(this);
@@ -2996,80 +2996,6 @@ public <T extends TType> ImmutableConst<T> immutableConst(Class<T> dtype, Shape
29962996
return ImmutableConst.create(scope, dtype, shape, memoryRegionName);
29972997
}
29982998

2999-
/**
3000-
* Factory method to create an operation executing all initializers of a graph.
3001-
*
3002-
* <p>All initializers added to a graph via
3003-
* {@link org.tensorflow.op.core.Init#add(Scope, Op) tf.initAdd} are grouped together as a single
3004-
* unit of computation in the graph. This operation must then be added to any graph using one or
3005-
* more {@link Variable variables} and executed once before running the graph so the variable
3006-
* states are initialized properly.</p>
3007-
*
3008-
* <p>When the graph is built by the same process that is running the session, the initializers
3009-
* can be invoked by executing this single endpoint. For example:</p>
3010-
* <pre>{@code
3011-
* try (Graph g = new Graph()) {
3012-
* Variable<TInt32> x = tf.variable(tf.constant(10)); // initAdd is called implicitly
3013-
* Variable<TInt32> y = tf.variable(tf.constant(20)); // idem
3014-
* Add<TInt32> z = tf.math.add(x, y);
3015-
*
3016-
* try (Session s = new Session(g)) {
3017-
* s.run(tf.init()); // initialize all variables
3018-
*
3019-
* try (TInt32 t = (TInt32)s.runner().fetch(z).run().get(0)) {
3020-
* assertEquals(30, t.data().getInt());
3021-
* }
3022-
* }
3023-
* }
3024-
* }</pre>
3025-
*
3026-
* <p>When the graph is built by a separate process, the initializers can be invoked by running
3027-
* the init op by its name, which defaults to {@link org.tensorflow.op.core.Init#DEFAULT_NAME}.
3028-
* For example:</p>
3029-
* <pre>{@code
3030-
* // Building the model
3031-
* try (Graph g = new Graph()) {
3032-
* Variable<TInt32> x = tf.variable(tf.constant(10)); // initAdd is called implicitly
3033-
* Variable<TInt32> y = tf.variable(tf.constant(20)); // idem
3034-
* Add<TInt32> z = tf.withName("z").math.add(x, y);
3035-
*
3036-
* tf.init(); // add variables initializers to the graph, as Init.DEFAULT_NAME
3037-
* // ...exporting graph as a saved model...
3038-
* }
3039-
*
3040-
* ...
3041-
*
3042-
* // Running the model
3043-
* try (SavedModelBundle model = SavedModelBundle.load("/path/to/model", "train")) {
3044-
* model.session().run(Init.DEFAULT_NAME);
3045-
*
3046-
* try (TInt32 t = (TInt32)s.runner().fetch("z").run().get(0)) {
3047-
* assertEquals(30, t.data().getInt());
3048-
* }
3049-
* }
3050-
* }</pre>
3051-
*
3052-
* @return an op grouping all initializers added to the graph
3053-
* @throws IllegalArgumentException if the execution environment in scope is not a graph
3054-
*/
3055-
public Init init() {
3056-
return Init.create(scope);
3057-
}
3058-
3059-
/**
3060-
* Register an op as an initializer of the graph.
3061-
*
3062-
* <p>Registered initializers are then grouped as a single unit of computation by adding
3063-
* and executing an {@link org.tensorflow.op.core.Init#create(Scope) init} operation from a graph
3064-
* session. This is a no-op if executed in an eager session.
3065-
*
3066-
* @param initializer
3067-
* @see org.tensorflow.op.core.Init#create(Scope) init
3068-
*/
3069-
public void initAdd(Op initializer) {
3070-
Init.add(scope, initializer);
3071-
}
3072-
30732999
/**
30743000
* Table initializer that takes two tensors for keys and values respectively.
30753001
*

tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java

+1-15
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,12 @@
1515
*/
1616
package org.tensorflow.processor.operator;
1717

18-
import com.squareup.javapoet.ArrayTypeName;
19-
import com.squareup.javapoet.ClassName;
2018
import com.squareup.javapoet.FieldSpec;
2119
import com.squareup.javapoet.JavaFile;
2220
import com.squareup.javapoet.MethodSpec;
2321
import com.squareup.javapoet.TypeSpec;
22+
import com.squareup.javapoet.TypeVariableName;
2423
import java.io.IOException;
25-
import java.util.Arrays;
2624
import java.util.List;
2725
import javax.lang.model.element.Modifier;
2826
import org.tensorflow.Names;
@@ -230,18 +228,6 @@ protected TypeSpec buildTopClass(OpsSpec spec) {
230228
.addJavadoc("{@inheritDoc}")
231229
.build());
232230

233-
opsBuilder.addMethod(
234-
MethodSpec.methodBuilder("withControlDependencies")
235-
.addModifiers(Modifier.PUBLIC)
236-
.addAnnotation(Override.class)
237-
.addParameter(ArrayTypeName.of(Names.Op), "controls")
238-
.varargs()
239-
.returns(Names.Ops)
240-
.addStatement(
241-
"return withControlDependencies($T.asList(controls))", ClassName.get(Arrays.class))
242-
.addJavadoc("{@inheritDoc}")
243-
.build());
244-
245231
opsBuilder.addMethod(
246232
MethodSpec.methodBuilder("withControlDependencies")
247233
.addModifiers(Modifier.PUBLIC)

0 commit comments

Comments
 (0)