Skip to content

Commit d2b1b89

Browse files
committed
depp_learning
1 parent 4d18f16 commit d2b1b89

File tree

1 file changed

+49
-0
lines changed

1 file changed

+49
-0
lines changed

Diff for: Deep_learning/propagate.py

+49
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Sat Aug 11 16:14:09 2018
4+
5+
@author: Administrator
6+
"""
7+
8+
def propagate(w, b, X, Y):
9+
"""
10+
Implement the cost function and its gradient for the propagation explained above
11+
12+
Arguments:
13+
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
14+
b -- bias, a scalar
15+
X -- data of size (num_px * num_px * 3, number of examples)
16+
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
17+
18+
Return:
19+
cost -- negative log-likelihood cost for logistic regression
20+
dw -- gradient of the loss with respect to w, thus same shape as w
21+
db -- gradient of the loss with respect to b, thus same shape as b
22+
23+
Tips:
24+
- Write your code step by step for the propagation. np.log(), np.dot()
25+
"""
26+
m = X.shape[1]
27+
28+
# FORWARD PROPAGATION (FROM X TO COST)
29+
### START CODE HERE ### (≈ 2 lines of code)
30+
A = sigmoid(np.dot(X.T ,w) + b) # compute activation
31+
cost = -1 / m * (np.dot(Y,np.log(A)) + np.dot((1 - Y), np.log(1 - A))) # compute cost
32+
### END CODE HERE ###
33+
34+
# BACKWARD PROPAGATION (TO FIND GRAD)
35+
### START CODE HERE ### (≈ 2 lines of code)
36+
import math
37+
dw = 1 / m * np.dot(X, (A - Y.T))
38+
db = 1 / m * np.sum((A - Y.T).T)
39+
### END CODE HERE ###
40+
41+
assert(dw.shape == w.shape)
42+
assert(db.dtype == float)
43+
cost = np.squeeze(cost)
44+
assert(cost.shape == ())
45+
46+
grads = {"dw": dw,
47+
"db": db}
48+
49+
return grads, cost

0 commit comments

Comments
 (0)