|
1 |
| -import tensorflow as tf |
2 | 1 | from keras import backend as K
|
3 | 2 |
|
4 | 3 | smooth = 1
|
@@ -38,32 +37,6 @@ def dice_loss_strict(y_true, y_pred):
|
38 | 37 | return -dice_strict(y_true, y_pred)
|
39 | 38 |
|
40 | 39 |
|
41 |
| -def _to_tensor(x, dtype): |
42 |
| - x = tf.convert_to_tensor(x) |
43 |
| - if x.dtype != dtype: |
44 |
| - x = tf.cast(x, dtype) |
45 |
| - return x |
46 |
| - |
47 |
| - |
48 |
| -def _tf_bce(output, target, from_logits=False): |
49 |
| - """Workaround for keras bug with latest tensorflow""" |
50 |
| - |
51 |
| - # Note: tf.nn.softmax_cross_entropy_with_logits |
52 |
| - # expects logits, Keras expects probabilities. |
53 |
| - if not from_logits: |
54 |
| - # transform back to logits |
55 |
| - epsilon = _to_tensor(K.epsilon(), output.dtype.base_dtype) |
56 |
| - output = tf.clip_by_value(output, epsilon, 1 - epsilon) |
57 |
| - output = tf.log(output / (1 - output)) |
58 |
| - return tf.nn.sigmoid_cross_entropy_with_logits(labels=output, logits=target) |
59 |
| - |
60 |
| - |
61 |
| -def bce(y_true, y_pred): |
62 |
| - # Workaround for shape bug. |
63 |
| - y_true.set_shape(y_pred.get_shape()) |
64 |
| - return K.mean(_tf_bce(y_pred, y_true), axis=-1) |
65 |
| - |
66 |
| - |
67 | 40 | # Sanity check loss functions..
|
68 | 41 | if __name__ == "'__main__":
|
69 | 42 | import numpy as np
|
|
0 commit comments