From 1d2be2c7071562c551d6235ae2da18123a66d479 Mon Sep 17 00:00:00 2001 From: cclauss Date: Mon, 1 Jul 2019 10:27:25 +0200 Subject: [PATCH] Use print() function in both Python 2 and Python 3 --- airline/ann.py | 9 +++++---- airline/lr.py | 7 ++++--- airline/rnn.py | 9 +++++---- bayesian_ml/1/nb.py | 13 +++++++------ bayesian_ml/2/em.py | 9 +++++---- bayesian_ml/2/probit.py | 13 +++++++------ bayesian_ml/3/run.py | 9 +++++---- bayesian_ml/4/emgmm.py | 7 ++++--- bayesian_ml/4/npbgmm.py | 3 ++- bayesian_ml/4/vigmm.py | 3 ++- best_fit_line.py | 11 ++++++----- cnn_class/cifar.py | 7 ++++--- cnn_class2/tf_resnet_convblock_starter.py | 2 +- hmm_class/generate_ht.py | 4 ++-- hmm_class/sites.py | 7 ++++--- numpy_class/dot_for.py | 3 ++- numpy_class/manual_data_loading.py | 3 ++- 17 files changed, 67 insertions(+), 52 deletions(-) diff --git a/airline/ann.py b/airline/ann.py index a5cad526..ee924326 100644 --- a/airline/ann.py +++ b/airline/ann.py @@ -1,3 +1,4 @@ +from __future__ import print_function # The corresponding tutorial for this code was released EXCLUSIVELY as a bonus # If you want to learn about future bonuses, please sign up for my newsletter at: # https://lazyprogrammer.me @@ -109,7 +110,7 @@ def fit(self, X, Y, activation=T.tanh, learning_rate=1e-3, mu=0.5, reg=0, epochs c, p = train_op(Xbatch, Ybatch) costs.append(c) if (j+1) % print_period == 0: - print "i:", i, "j:", j, "nb:", n_batches, "cost:", c + print("i:", i, "j:", j, "nb:", n_batches, "cost:", c) if show_fig: plt.plot(costs) @@ -156,7 +157,7 @@ def predict(self, X): X[:,d] = series[d:d+n] Y = series[D:D+n] - print "series length:", n + print("series length:", n) Xtrain = X[:n/2] Ytrain = Y[:n/2] Xtest = X[n/2:] @@ -164,8 +165,8 @@ def predict(self, X): model = ANN([200]) model.fit(Xtrain, Ytrain, activation=T.tanh) - print "train score:", model.score(Xtrain, Ytrain) - print "test score:", model.score(Xtest, Ytest) + print("train score:", model.score(Xtrain, Ytrain)) + print("test score:", model.score(Xtest, Ytest)) # plot the prediction with true values plt.plot(series) diff --git a/airline/lr.py b/airline/lr.py index 175a4638..c7a2d655 100644 --- a/airline/lr.py +++ b/airline/lr.py @@ -1,3 +1,4 @@ +from __future__ import print_function # The corresponding tutorial for this code was released EXCLUSIVELY as a bonus # If you want to learn about future bonuses, please sign up for my newsletter at: # https://lazyprogrammer.me @@ -31,7 +32,7 @@ X[:,d] = series[d:d+n] Y = series[D:D+n] - print "series length:", n + print("series length:", n) Xtrain = X[:n/2] Ytrain = Y[:n/2] Xtest = X[n/2:] @@ -39,8 +40,8 @@ model = LinearRegression() model.fit(Xtrain, Ytrain) - print "train score:", model.score(Xtrain, Ytrain) - print "test score:", model.score(Xtest, Ytest) + print("train score:", model.score(Xtrain, Ytrain)) + print("test score:", model.score(Xtest, Ytest)) # plot the prediction with true values plt.plot(series) diff --git a/airline/rnn.py b/airline/rnn.py index ff11b2d1..e5530328 100644 --- a/airline/rnn.py +++ b/airline/rnn.py @@ -1,3 +1,4 @@ +from __future__ import print_function # The corresponding tutorial for this code was released EXCLUSIVELY as a bonus # If you want to learn about future bonuses, please sign up for my newsletter at: # https://lazyprogrammer.me @@ -87,7 +88,7 @@ def fit(self, X, Y, activation=T.tanh, learning_rate=1e-1, mu=0.5, reg=0, epochs c = self.train_op(learning_rate, X[j], Y[j]) cost += c if i % 10 == 0: - print "i:", i, "cost:", cost, "time for epoch:", (datetime.now() - t0) + print("i:", i, "cost:", cost, "time for epoch:", (datetime.now() - t0)) if (i+1) % 500 == 0: learning_rate /= 10 costs.append(cost) @@ -141,7 +142,7 @@ def predict(self, X): X[:,d] = series[d:d+n] Y = series[D:D+n] - print "series length:", n + print("series length:", n) Xtrain = X[:n/2] Ytrain = Y[:n/2] Xtest = X[n/2:] @@ -154,8 +155,8 @@ def predict(self, X): model = RNN([50]) model.fit(Xtrain, Ytrain, activation=T.tanh) - print "train score:", model.score(Xtrain, Ytrain) - print "test score:", model.score(Xtest, Ytest) + print("train score:", model.score(Xtrain, Ytrain)) + print("test score:", model.score(Xtest, Ytest)) # plot the prediction with true values plt.plot(series) diff --git a/bayesian_ml/1/nb.py b/bayesian_ml/1/nb.py index 73634cf9..b7f6b1bc 100644 --- a/bayesian_ml/1/nb.py +++ b/bayesian_ml/1/nb.py @@ -1,3 +1,4 @@ +from __future__ import print_function # Naive Bayes with prior on mean and precision of Gaussian # mean | precision ~ N(0, c / precision) # precision ~ Gamma(a, b) @@ -120,15 +121,15 @@ def plot_image(x, Q, title): Ytest = pd.read_csv('ytest.csv', header=None).as_matrix().flatten() model = NB() model.fit(Xtrain, Ytrain) - print "train accuracy:", model.score(Xtrain, Ytrain) - print "test accuracy:", model.score(Xtest, Ytest) + print("train accuracy:", model.score(Xtrain, Ytrain)) + print("test accuracy:", model.score(Xtest, Ytest)) # confusion matrix M = model.confusion_matrix(Xtest, Ytest) - print "confusion matrix:" - print M - print "N:", len(Ytest) - print "sum(M):", M.sum() + print("confusion matrix:") + print(M) + print("N:", len(Ytest)) + print("sum(M):", M.sum()) # plot 3 misclassified Q = pd.read_csv('Q.csv', header=None).as_matrix() diff --git a/bayesian_ml/2/em.py b/bayesian_ml/2/em.py index ad958963..d8a6a041 100644 --- a/bayesian_ml/2/em.py +++ b/bayesian_ml/2/em.py @@ -1,3 +1,4 @@ +from __future__ import print_function # expectation-maximization for the model: # x(n) ~ N(Wz(n), sigma**2 I) (observed variables) # z(n) ~ N(0, I) (latent variables) @@ -61,9 +62,9 @@ def loglikelihood(X, Z, W): plt.plot(costs) plt.show() -print "actual W:", W0 -print "predicted W:", W +print("actual W:", W0) +print("predicted W:", W) -print "log-likelihood given real W:", loglikelihood(X, Z, W0) +print("log-likelihood given real W:", loglikelihood(X, Z, W0)) -print "log-likelihood found:", costs[-1] +print("log-likelihood found:", costs[-1]) diff --git a/bayesian_ml/2/probit.py b/bayesian_ml/2/probit.py index a1cfc379..9bb3bf17 100644 --- a/bayesian_ml/2/probit.py +++ b/bayesian_ml/2/probit.py @@ -1,3 +1,4 @@ +from __future__ import print_function # probit regression import numpy as np @@ -103,15 +104,15 @@ def plot_image(x, Q, title): Ytest = pd.read_csv('ytest.csv', header=None).as_matrix().flatten() model = ProbitRegression() model.fit(Xtrain, Ytrain) - print "train accuracy:", model.score(Xtrain, Ytrain) - print "test accuracy:", model.score(Xtest, Ytest) + print("train accuracy:", model.score(Xtrain, Ytrain)) + print("test accuracy:", model.score(Xtest, Ytest)) # confusion matrix M = model.confusion_matrix(Xtest, Ytest) - print "confusion matrix:" - print M - print "N:", len(Ytest) - print "sum(M):", M.sum() + print("confusion matrix:") + print(M) + print("N:", len(Ytest)) + print("sum(M):", M.sum()) # plot 3 misclassified Q = pd.read_csv('Q.csv', header=None).as_matrix() diff --git a/bayesian_ml/3/run.py b/bayesian_ml/3/run.py index 72d9d8ee..3ec9c3bb 100644 --- a/bayesian_ml/3/run.py +++ b/bayesian_ml/3/run.py @@ -1,3 +1,4 @@ +from __future__ import print_function # variational-inference for linear regression # y(i) ~ N( x(i).dot(w), 1/lambda ) # w ~ N( 0, diag(alpha_1, alpha_2, ..., alpha_D)^-1 ) @@ -81,7 +82,7 @@ def run(num=1, T=500): Y = pd.read_csv('y_set%s.csv' % num, header=None).as_matrix().flatten() Z = pd.read_csv('z_set%s.csv' % num, header=None).as_matrix().flatten() N, D = X.shape - print X.shape, Y.shape, Z.shape + print(X.shape, Y.shape, Z.shape) a0 = 1e-16 b0 = 1e-16 @@ -129,16 +130,16 @@ def run(num=1, T=500): # update L L[t] = objective(X, Y, C, mu, a, b, e, f, a0, b0, e0, f0) if t % 20 == 0: - print "t:", t + print("t:", t) if num == 3: - print "L:", L[t] + print("L:", L[t]) # plot 1/E[alpha] plt.plot(b/a) plt.show() # 1/E[lambda] - print "1/E[lambda]:", f/e + print("1/E[lambda]:", f/e) # plot L plt.plot(L) diff --git a/bayesian_ml/4/emgmm.py b/bayesian_ml/4/emgmm.py index 42d1f1f4..0d78de93 100644 --- a/bayesian_ml/4/emgmm.py +++ b/bayesian_ml/4/emgmm.py @@ -1,3 +1,4 @@ +from __future__ import print_function # GMM using Expectation-Maximization import numpy as np @@ -54,9 +55,9 @@ def gmm(X, K, max_iter=20, smoothing=1e-2): plt.scatter(X[:,0], X[:,1], c=R.argmax(axis=1)) plt.show() - print "pi:", pi - print "means:", M - print "covariances:", C + print("pi:", pi) + print("means:", M) + print("covariances:", C) return R diff --git a/bayesian_ml/4/npbgmm.py b/bayesian_ml/4/npbgmm.py index ceb977af..f0cb771c 100644 --- a/bayesian_ml/4/npbgmm.py +++ b/bayesian_ml/4/npbgmm.py @@ -1,3 +1,4 @@ +from __future__ import print_function # GMM using Bayesian Nonparametric Clustering # Gaussian Mixture Model # Dirichlet Process @@ -96,7 +97,7 @@ def gmm(X, T=500): observations_per_cluster = np.zeros((T, 6)) for t in xrange(T): if t % 20 == 0: - print t + print(t) # 1) calculate phi[i,j] # Notes: # MANY new clusters can be made each iteration diff --git a/bayesian_ml/4/vigmm.py b/bayesian_ml/4/vigmm.py index 95887b62..e2a68bec 100644 --- a/bayesian_ml/4/vigmm.py +++ b/bayesian_ml/4/vigmm.py @@ -1,3 +1,4 @@ +from __future__ import print_function # GMM using Variational Inference import numpy as np @@ -199,7 +200,7 @@ def gmm(X, K, max_iter=100): plt.title("Costs") plt.show() - print "cluster assignments:\n", cluster_assignments + print("cluster assignments:\n", cluster_assignments) plt.scatter(X[:,0], X[:,1], c=cluster_assignments, s=100, alpha=0.7) plt.show() diff --git a/best_fit_line.py b/best_fit_line.py index 5f71dbc4..f1a4e08e 100644 --- a/best_fit_line.py +++ b/best_fit_line.py @@ -1,3 +1,4 @@ +from __future__ import print_function from pulp import * ### remove variable b because it is unconstrained @@ -39,12 +40,12 @@ prob += (a*x - y - c >= -z) status = prob.solve(GLPK(msg = 0)) -print "status:", LpStatus[status] -print "values:" -print "\ta:", value(a) +print("status:", LpStatus[status]) +print("values:") +print("\ta:", value(a)) # print "\tb:", value(b) -print "\tc:", value(c) -print "\tz:", value(z) +print("\tc:", value(c)) +print("\tz:", value(z)) # extra part to plot everything diff --git a/cnn_class/cifar.py b/cnn_class/cifar.py index cf7d4025..298532ad 100644 --- a/cnn_class/cifar.py +++ b/cnn_class/cifar.py @@ -1,3 +1,4 @@ +from __future__ import print_function # https://deeplearningcourses.com/c/deep-learning-convolutional-neural-networks-theano-tensorflow import os import numpy as np @@ -43,7 +44,7 @@ def getImageData(): im = Image.open("../large_files/cifar10/train/%s.png" % (i + 1)) X[i] = image2array(im) if i % 1000 == 0: - print i + print(i) np.save(savedXpath, X.astype(np.uint8)) else: X = np.load(savedXpath) @@ -62,7 +63,7 @@ def getImageData(): idx += 1 Y[i] = label2idx[s] i += 1 - print "done loading data" + print("done loading data") X, Y = shuffle(X, Y) return X[:30000], Y[:30000] @@ -202,7 +203,7 @@ def fit(self, X, Y, lr=1e-4, mu=0.99, reg=1e-6, decay=0.99999, eps=1e-2, batch_s c, p = cost_predict_op(Xvalid, Yvalid) costs.append(c) e = error_rate(Yvalid, p) - print "i:", i, "j:", j, "nb:", n_batches, "cost:", c, "error rate:", e + print("i:", i, "j:", j, "nb:", n_batches, "cost:", c, "error rate:", e) if show_fig: plt.plot(costs) diff --git a/cnn_class2/tf_resnet_convblock_starter.py b/cnn_class2/tf_resnet_convblock_starter.py index 4c61019b..0758ad13 100644 --- a/cnn_class2/tf_resnet_convblock_starter.py +++ b/cnn_class2/tf_resnet_convblock_starter.py @@ -31,5 +31,5 @@ def predict(self, X): conv_block.session = session session.run(init) - output = conv_block.predict(X): + output = conv_block.predict(X) print("output.shape:", output.shape) \ No newline at end of file diff --git a/hmm_class/generate_ht.py b/hmm_class/generate_ht.py index 62a8742a..f85e128a 100644 --- a/hmm_class/generate_ht.py +++ b/hmm_class/generate_ht.py @@ -35,9 +35,9 @@ def main(): for n in range(50): sequence = generate_sequence(30) sequence = ''.join(symbol_map[s] for s in sequence) - print sequence + print(sequence) f.write("%s\n" % sequence) if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/hmm_class/sites.py b/hmm_class/sites.py index 617863f5..5b334926 100644 --- a/hmm_class/sites.py +++ b/hmm_class/sites.py @@ -1,3 +1,4 @@ +from __future__ import print_function # https://deeplearningcourses.com/c/unsupervised-machine-learning-hidden-markov-models-in-python # https://udemy.com/unsupervised-machine-learning-hidden-markov-models-in-python # http://lazyprogrammer.me @@ -19,14 +20,14 @@ transitions[k] = v / row_sums[s] # initial state distribution -print "initial state distribution:" +print("initial state distribution:") for k, v in transitions.iteritems(): s, e = k if s == '-1': - print e, v + print(e, v) # which page has the highest bounce? for k, v in transitions.iteritems(): s, e = k if e == 'B': - print "bounce rate for %s: %s" % (s, v) + print("bounce rate for %s: %s" % (s, v)) diff --git a/numpy_class/dot_for.py b/numpy_class/dot_for.py index 2e093ff2..320d81da 100644 --- a/numpy_class/dot_for.py +++ b/numpy_class/dot_for.py @@ -1,3 +1,4 @@ +from __future__ import print_function # https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python # https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python import numpy as np @@ -23,4 +24,4 @@ def slow_dot_product(a, b): a.dot(b) dt2 = datetime.now() - t0 -print "dt1 / dt2:", dt1.total_seconds() / dt2.total_seconds() \ No newline at end of file +print("dt1 / dt2:", dt1.total_seconds() / dt2.total_seconds()) \ No newline at end of file diff --git a/numpy_class/manual_data_loading.py b/numpy_class/manual_data_loading.py index e30e425f..a8c28c26 100644 --- a/numpy_class/manual_data_loading.py +++ b/numpy_class/manual_data_loading.py @@ -1,3 +1,4 @@ +from __future__ import print_function # https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python # https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python @@ -16,4 +17,4 @@ X.append(sample) X = np.array(X) -print X \ No newline at end of file +print(X) \ No newline at end of file