Skip to content

Commit 909a8b7

Browse files
committed
패키지들 최신버전으로 업데이트 (Check README.md)
1 parent 00d4418 commit 909a8b7

File tree

11 files changed

+15
-15
lines changed

11 files changed

+15
-15
lines changed

Diff for: 04 - Neural Network Basic/02 - Deep NN.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
# 텐서플로우에서 기본적으로 제공되는 크로스 엔트로피 함수를 이용해
4545
# 복잡한 수식을 사용하지 않고도 최적화를 위한 비용 함수를 다음처럼 간단하게 적용할 수 있습니다.
4646
cost = tf.reduce_mean(
47-
tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))
47+
tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=model))
4848

4949
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
5050
train_op = optimizer.minimize(cost)

Diff for: 05 - TensorBoard, Saver/01 - Saver.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
model = tf.matmul(L2, W3)
3434

3535
cost = tf.reduce_mean(
36-
tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))
36+
tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=model))
3737

3838
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
3939
# global_step로 넘겨준 변수를, 학습용 변수들을 최적화 할 때 마다 학습 횟수를 하나씩 증가시킵니다.

Diff for: 05 - TensorBoard, Saver/02 - TensorBoard.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434

3535
with tf.name_scope('optimizer'):
3636
cost = tf.reduce_mean(
37-
tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))
37+
tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=model))
3838

3939
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
4040
train_op = optimizer.minimize(cost, global_step=global_step)

Diff for: 05 - TensorBoard, Saver/03 - TensorBoard2.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737

3838
with tf.name_scope('optimizer'):
3939
cost = tf.reduce_mean(
40-
tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))
40+
tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=model))
4141

4242
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
4343
train_op = optimizer.minimize(cost, global_step=global_step)

Diff for: 06 - MNIST/01 - MNIST.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
# 최종 모델의 출력값은 W3 변수를 곱해 10개의 분류를 가지게 됩니다.
3333
model = tf.matmul(L2, W3)
3434

35-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
35+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=Y))
3636
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
3737

3838
#########

Diff for: 06 - MNIST/02 - Dropout.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
W3 = tf.Variable(tf.random_normal([256, 10], stddev=0.01))
2727
model = tf.matmul(L2, W3)
2828

29-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
29+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=Y))
3030
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
3131

3232
#########

Diff for: 07 - CNN/01 - CNN.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
W4 = tf.Variable(tf.random_normal([256, 10], stddev=0.01))
4949
model = tf.matmul(L3, W4)
5050

51-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
51+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=Y))
5252
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
5353
# 최적화 함수를 RMSPropOptimizer 로 바꿔서 결과를 확인해봅시다.
5454
# optimizer = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)

Diff for: 07 - CNN/02 - tf.layers.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
model = tf.layers.dense(L3, 10, activation=None)
3434

35-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
35+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=Y))
3636
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
3737

3838
#########

Diff for: 10 - RNN/01 - MNIST.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
outputs = outputs[-1]
5555
model = tf.matmul(outputs, W) + b
5656

57-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
57+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=Y))
5858
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
5959

6060
#########

Diff for: 11 - Inception/retrain.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -722,7 +722,7 @@ def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
722722
tf.summary.histogram('activations', final_tensor)
723723

724724
with tf.name_scope('cross_entropy'):
725-
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
725+
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
726726
labels=ground_truth_input, logits=logits)
727727
with tf.name_scope('total'):
728728
cross_entropy_mean = tf.reduce_mean(cross_entropy)

Diff for: README.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,11 @@
1414

1515
## 요구사항
1616

17-
- TensorFlow >= 1.2
18-
- Python >= 3.6
19-
- numpy >= 1.12
20-
- matplotlib >= 2.0
21-
- pillow >= 4.1
17+
- TensorFlow >= 1.8.0
18+
- Python >= 3.6.1
19+
- numpy >= 1.14.3
20+
- matplotlib >= 2.2.2
21+
- pillow >= 5.1
2222

2323
## 골빈해커의 3분 딥러닝
2424

0 commit comments

Comments
 (0)