Skip to content

Commit e2b8a42

Browse files
dmeoliantmarakis
authored andcommitted
fixed deep learning .ipynb imports (#1123)
* changed queue to set in AC3 Changed queue to set in AC3 (as in the pseudocode of the original algorithm) to reduce the number of consistency-check due to the redundancy of the same arcs in queue. For example, on the harder1 configuration of the Sudoku CSP the number consistency-check has been reduced from 40464 to 12562! * re-added test commented by mistake * added the mentioned AC4 algorithm for constraint propagation AC3 algorithm has non-optimal worst case time-complexity O(cd^3 ), while AC4 algorithm runs in O(cd^2) worst case time * added doctest in Sudoku for AC4 and and the possibility of choosing the constant propagation algorithm in mac inference * removed useless doctest for AC4 in Sudoku because AC4's tests are already present in test_csp.py * added map coloring SAT problems * fixed typo errors and removed unnecessary brackets * reformulated the map coloring problem * Revert "reformulated the map coloring problem" This reverts commit 20ab0e5. * Revert "fixed typo errors and removed unnecessary brackets" This reverts commit f743146. * Revert "added map coloring SAT problems" This reverts commit 9e0fa55. * Revert "removed useless doctest for AC4 in Sudoku because AC4's tests are already present in test_csp.py" This reverts commit b3cd24c. * Revert "added doctest in Sudoku for AC4 and and the possibility of choosing the constant propagation algorithm in mac inference" This reverts commit 6986247. * Revert "added the mentioned AC4 algorithm for constraint propagation" This reverts commit 03551fb. * added map coloring SAT problem * fixed build error * Revert "added map coloring SAT problem" This reverts commit 93af259. * Revert "fixed build error" This reverts commit 6641c2c. * added map coloring SAT problem * removed redundant parentheses * added Viterbi algorithm * added monkey & bananas planning problem * simplified condition in search.py * added tests for monkey & bananas planning problem * removed monkey & bananas planning problem * Revert "removed monkey & bananas planning problem" This reverts commit 9d37ae0. * Revert "added tests for monkey & bananas planning problem" This reverts commit 24041e9. * Revert "simplified condition in search.py" This reverts commit 6d229ce. * Revert "added monkey & bananas planning problem" This reverts commit c74933a. * defined the PlanningProblem as a specialization of a search.Problem & fixed typo errors * fixed doctest in logic.py * fixed doctest for cascade_distribution * added ForwardPlanner and tests * added __lt__ implementation for Expr * added more tests * renamed forward planner * Revert "renamed forward planner" This reverts commit c4139e5. * renamed forward planner class & added doc * added backward planner and tests * fixed mdp4e.py doctests * removed ignore_delete_lists_heuristic flag * fixed heuristic for forward and backward planners * added SATPlan and tests * fixed ignore delete lists heuristic in forward and backward planners * fixed backward planner and added tests * updated doc * added nary csp definition and examples * added CSPlan and tests * fixed CSPlan * added book's cryptarithmetic puzzle example * fixed typo errors in test_csp * fixed #1111 * added sortedcontainers to yml and doc to CSPlan * added tests for n-ary csp * fixed utils.extend * updated test_probability.py * converted static methods to functions * added AC3b and AC4 with heuristic and tests * added conflict-driven clause learning sat solver * added tests for cdcl and heuristics * fixed probability.py * fixed import * fixed kakuro * added Martelli and Montanari rule-based unification algorithm * removed duplicate standardize_variables * renamed variables known as built-in functions * fixed typos in learning.py * renamed some files and fixed typos * fixed typos * fixed typos * fixed tests * removed unify_mm * remove unnecessary brackets * fixed tests * moved utility functions to utils.py * fixed typos * moved utils function to utils.py, separated probability learning classes from learning.py, fixed typos and fixed imports in .ipynb files * added missing learners * fixed Travis build * fixed typos * fixed typos * fixed typos * fixed typos * fixed typos in agents files * fixed imports in agent files * fixed deep learning .ipynb imports * fixed typos
1 parent 283fa41 commit e2b8a42

File tree

5 files changed

+14
-42
lines changed

5 files changed

+14
-42
lines changed

deep_learning4e.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def gradient_descent(dataset, net, loss, epochs=1000, l_rate=0.01, batch_size=1,
187187
Gradient descent algorithm to update the learnable parameters of a network.
188188
:return: the updated network
189189
"""
190-
examples = dataset.examples # init data
190+
examples = dataset.examples # init data
191191

192192
for e in range(epochs):
193193
total_loss = 0
@@ -209,7 +209,7 @@ def gradient_descent(dataset, net, loss, epochs=1000, l_rate=0.01, batch_size=1,
209209

210210
if verbose and (e + 1) % verbose == 0:
211211
print("epoch:{}, total_loss:{}".format(e + 1, total_loss))
212-
212+
213213
return net
214214

215215

@@ -238,26 +238,26 @@ def adam_optimizer(dataset, net, loss, epochs=1000, rho=(0.9, 0.999), delta=1 /
238238
for batch in get_batch(examples, batch_size):
239239
t += 1
240240
inputs, targets = init_examples(batch, dataset.inputs, dataset.target, len(net[-1].nodes))
241-
241+
242242
# compute gradients of weights
243243
gs, batch_loss = BackPropagation(inputs, targets, weights, net, loss)
244-
244+
245245
# update s,r,s_hat and r_gat
246246
s = vector_add(scalar_vector_product(rho[0], s),
247247
scalar_vector_product((1 - rho[0]), gs))
248248
r = vector_add(scalar_vector_product(rho[1], r),
249249
scalar_vector_product((1 - rho[1]), element_wise_product(gs, gs)))
250250
s_hat = scalar_vector_product(1 / (1 - rho[0] ** t), s)
251251
r_hat = scalar_vector_product(1 / (1 - rho[1] ** t), r)
252-
252+
253253
# rescale r_hat
254254
r_hat = map_vector(lambda x: 1 / (math.sqrt(x) + delta), r_hat)
255-
255+
256256
# delta weights
257257
delta_theta = scalar_vector_product(-l_rate, element_wise_product(s_hat, r_hat))
258258
weights = vector_add(weights, delta_theta)
259259
total_loss += batch_loss
260-
260+
261261
# update the weights of network each batch
262262
for i in range(len(net)):
263263
if weights[i]:
@@ -266,7 +266,7 @@ def adam_optimizer(dataset, net, loss, epochs=1000, rho=(0.9, 0.999), delta=1 /
266266

267267
if verbose and (e + 1) % verbose == 0:
268268
print("epoch:{}, total_loss:{}".format(e + 1, total_loss))
269-
269+
270270
return net
271271

272272

@@ -405,7 +405,7 @@ def PerceptronLearner(dataset, learning_rate=0.01, epochs=100, verbose=None):
405405

406406
# initialize the network, add dense layer
407407
raw_net = [InputLayer(input_size), DenseLayer(input_size, output_size)]
408-
408+
409409
# update the network
410410
learned_net = gradient_descent(dataset, raw_net, mse_loss, epochs, l_rate=learning_rate, verbose=verbose)
411411

@@ -478,7 +478,7 @@ def AutoencoderLearner(inputs, encoding_size, epochs=200):
478478
model.add(Dense(encoding_size, input_dim=input_size, activation='relu', kernel_initializer='random_uniform',
479479
bias_initializer='ones'))
480480
model.add(Dense(input_size, activation='relu', kernel_initializer='random_uniform', bias_initializer='ones'))
481-
481+
482482
# update model with sgd
483483
sgd = optimizers.SGD(lr=0.01)
484484
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])

notebooks/chapter19/Learners.ipynb

+1-8
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
"source": [
3636
"import os, sys\n",
3737
"sys.path = [os.path.abspath(\"../../\")] + sys.path\n",
38-
"from DeepNeuralNet4e import *\n",
38+
"from deep_learning4e import *\n",
3939
"from notebook4e import *\n",
4040
"from learning4e import *"
4141
]
@@ -482,13 +482,6 @@
482482
"source": [
483483
"After the model converging, the model's error ratio on the training set is still high. We will introduce the convolutional network in the following chapters to see how it helps improve accuracy on learning this dataset."
484484
]
485-
},
486-
{
487-
"cell_type": "code",
488-
"execution_count": null,
489-
"metadata": {},
490-
"outputs": [],
491-
"source": []
492485
}
493486
],
494487
"metadata": {

notebooks/chapter19/Loss Functions and Layers.ipynb

+1-8
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@
116116
"source": [
117117
"import os, sys\n",
118118
"sys.path = [os.path.abspath(\"../../\")] + sys.path\n",
119-
"from DeepNeuralNet4e import *\n",
119+
"from deep_learning4e import *\n",
120120
"from notebook4e import *"
121121
]
122122
},
@@ -372,13 +372,6 @@
372372
"source": [
373373
"We can see that each time kernel picks up the maximum value in its region."
374374
]
375-
},
376-
{
377-
"cell_type": "code",
378-
"execution_count": null,
379-
"metadata": {},
380-
"outputs": [],
381-
"source": []
382375
}
383376
],
384377
"metadata": {

notebooks/chapter19/Optimizer and Backpropagation.ipynb

+1-8
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
"source": [
4848
"import os, sys\n",
4949
"sys.path = [os.path.abspath(\"../../\")] + sys.path\n",
50-
"from DeepNeuralNet4e import *\n",
50+
"from deep_learning4e import *\n",
5151
"from notebook4e import *"
5252
]
5353
},
@@ -285,13 +285,6 @@
285285
"source": [
286286
"The demonstration of optimizers and back-propagation algorithm will be made together with neural network learners."
287287
]
288-
},
289-
{
290-
"cell_type": "code",
291-
"execution_count": null,
292-
"metadata": {},
293-
"outputs": [],
294-
"source": []
295288
}
296289
],
297290
"metadata": {

notebooks/chapter19/RNN.ipynb

+1-8
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@
6060
"source": [
6161
"import os, sys\n",
6262
"sys.path = [os.path.abspath(\"../../\")] + sys.path\n",
63-
"from DeepNeuralNet4e import *\n",
63+
"from deep_learning4e import *\n",
6464
"from notebook4e import *"
6565
]
6666
},
@@ -440,13 +440,6 @@
440440
"source": [
441441
"It shows we added two dense layers to the network structures."
442442
]
443-
},
444-
{
445-
"cell_type": "code",
446-
"execution_count": null,
447-
"metadata": {},
448-
"outputs": [],
449-
"source": []
450443
}
451444
],
452445
"metadata": {

0 commit comments

Comments
 (0)