|
408 | 408 | "stacked_ae.compile(loss=\"binary_crossentropy\",\n",
|
409 | 409 | " optimizer=keras.optimizers.SGD(lr=1.5), metrics=[rounded_accuracy])\n",
|
410 | 410 | "history = stacked_ae.fit(X_train, X_train, epochs=20,\n",
|
411 |
| - " validation_data=[X_valid, X_valid])" |
| 411 | + " validation_data=(X_valid, X_valid))" |
412 | 412 | ]
|
413 | 413 | },
|
414 | 414 | {
|
|
655 | 655 | "tied_ae.compile(loss=\"binary_crossentropy\",\n",
|
656 | 656 | " optimizer=keras.optimizers.SGD(lr=1.5), metrics=[rounded_accuracy])\n",
|
657 | 657 | "history = tied_ae.fit(X_train, X_train, epochs=10,\n",
|
658 |
| - " validation_data=[X_valid, X_valid])" |
| 658 | + " validation_data=(X_valid, X_valid))" |
659 | 659 | ]
|
660 | 660 | },
|
661 | 661 | {
|
|
708 | 708 | " autoencoder = keras.models.Sequential([encoder, decoder])\n",
|
709 | 709 | " autoencoder.compile(optimizer, loss, metrics=metrics)\n",
|
710 | 710 | " autoencoder.fit(X_train, X_train, epochs=n_epochs,\n",
|
711 |
| - " validation_data=[X_valid, X_valid])\n", |
| 711 | + " validation_data=(X_valid, X_valid))\n", |
712 | 712 | " return encoder, decoder, encoder(X_train), encoder(X_valid)"
|
713 | 713 | ]
|
714 | 714 | },
|
|
855 | 855 | "stacked_ae_1_by_1.compile(loss=\"binary_crossentropy\",\n",
|
856 | 856 | " optimizer=keras.optimizers.SGD(lr=0.1), metrics=[rounded_accuracy])\n",
|
857 | 857 | "history = stacked_ae_1_by_1.fit(X_train, X_train, epochs=10,\n",
|
858 |
| - " validation_data=[X_valid, X_valid])" |
| 858 | + " validation_data=(X_valid, X_valid))" |
859 | 859 | ]
|
860 | 860 | },
|
861 | 861 | {
|
|
943 | 943 | "conv_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n",
|
944 | 944 | " metrics=[rounded_accuracy])\n",
|
945 | 945 | "history = conv_ae.fit(X_train, X_train, epochs=5,\n",
|
946 |
| - " validation_data=[X_valid, X_valid])" |
| 946 | + " validation_data=(X_valid, X_valid))" |
947 | 947 | ]
|
948 | 948 | },
|
949 | 949 | {
|
|
1095 | 1095 | }
|
1096 | 1096 | ],
|
1097 | 1097 | "source": [
|
1098 |
| - "history = recurrent_ae.fit(X_train, X_train, epochs=10, validation_data=[X_valid, X_valid])" |
| 1098 | + "history = recurrent_ae.fit(X_train, X_train, epochs=10, validation_data=(X_valid, X_valid))" |
1099 | 1099 | ]
|
1100 | 1100 | },
|
1101 | 1101 | {
|
|
1187 | 1187 | "denoising_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n",
|
1188 | 1188 | " metrics=[rounded_accuracy])\n",
|
1189 | 1189 | "history = denoising_ae.fit(X_train, X_train, epochs=10,\n",
|
1190 |
| - " validation_data=[X_valid, X_valid])" |
| 1190 | + " validation_data=(X_valid, X_valid))" |
1191 | 1191 | ]
|
1192 | 1192 | },
|
1193 | 1193 | {
|
|
1276 | 1276 | "dropout_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n",
|
1277 | 1277 | " metrics=[rounded_accuracy])\n",
|
1278 | 1278 | "history = dropout_ae.fit(X_train, X_train, epochs=10,\n",
|
1279 |
| - " validation_data=[X_valid, X_valid])" |
| 1279 | + " validation_data=(X_valid, X_valid))" |
1280 | 1280 | ]
|
1281 | 1281 | },
|
1282 | 1282 | {
|
|
1378 | 1378 | "simple_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.),\n",
|
1379 | 1379 | " metrics=[rounded_accuracy])\n",
|
1380 | 1380 | "history = simple_ae.fit(X_train, X_train, epochs=10,\n",
|
1381 |
| - " validation_data=[X_valid, X_valid])" |
| 1381 | + " validation_data=(X_valid, X_valid))" |
1382 | 1382 | ]
|
1383 | 1383 | },
|
1384 | 1384 | {
|
|
1545 | 1545 | "sparse_l1_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n",
|
1546 | 1546 | " metrics=[rounded_accuracy])\n",
|
1547 | 1547 | "history = sparse_l1_ae.fit(X_train, X_train, epochs=10,\n",
|
1548 |
| - " validation_data=[X_valid, X_valid])" |
| 1548 | + " validation_data=(X_valid, X_valid))" |
1549 | 1549 | ]
|
1550 | 1550 | },
|
1551 | 1551 | {
|
|
1715 | 1715 | "sparse_kl_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n",
|
1716 | 1716 | " metrics=[rounded_accuracy])\n",
|
1717 | 1717 | "history = sparse_kl_ae.fit(X_train, X_train, epochs=10,\n",
|
1718 |
| - " validation_data=[X_valid, X_valid])" |
| 1718 | + " validation_data=(X_valid, X_valid))" |
1719 | 1719 | ]
|
1720 | 1720 | },
|
1721 | 1721 | {
|
|
1886 | 1886 | "variational_ae.add_loss(K.mean(latent_loss) / 784.)\n",
|
1887 | 1887 | "variational_ae.compile(loss=\"binary_crossentropy\", optimizer=\"rmsprop\", metrics=[rounded_accuracy])\n",
|
1888 | 1888 | "history = variational_ae.fit(X_train, X_train, epochs=25, batch_size=128,\n",
|
1889 |
| - " validation_data=[X_valid, X_valid])" |
| 1889 | + " validation_data=(X_valid, X_valid))" |
1890 | 1890 | ]
|
1891 | 1891 | },
|
1892 | 1892 | {
|
|
4304 | 4304 | "])\n",
|
4305 | 4305 | "classifier.compile(loss=\"sparse_categorical_crossentropy\", optimizer=keras.optimizers.SGD(lr=0.02),\n",
|
4306 | 4306 | " metrics=[\"accuracy\"])\n",
|
4307 |
| - "history = classifier.fit(X_train_small, y_train_small, epochs=20, validation_data=[X_valid, y_valid])" |
| 4307 | + "history = classifier.fit(X_train_small, y_train_small, epochs=20, validation_data=(X_valid, y_valid))" |
4308 | 4308 | ]
|
4309 | 4309 | },
|
4310 | 4310 | {
|
|
4429 | 4429 | " optimizer=keras.optimizers.SGD(lr=0.02),\n",
|
4430 | 4430 | " metrics=[\"accuracy\"])\n",
|
4431 | 4431 | "history = pretrained_clf.fit(X_train_small, y_train_small, epochs=30,\n",
|
4432 |
| - " validation_data=[X_valid, y_valid])" |
| 4432 | + " validation_data=(X_valid, y_valid))" |
4433 | 4433 | ]
|
4434 | 4434 | },
|
4435 | 4435 | {
|
|
4493 | 4493 | " optimizer=keras.optimizers.SGD(lr=0.02),\n",
|
4494 | 4494 | " metrics=[\"accuracy\"])\n",
|
4495 | 4495 | "history = pretrained_clf.fit(X_train_small, y_train_small, epochs=20,\n",
|
4496 |
| - " validation_data=[X_valid, y_valid])" |
| 4496 | + " validation_data=(X_valid, y_valid))" |
4497 | 4497 | ]
|
4498 | 4498 | },
|
4499 | 4499 | {
|
|
4555 | 4555 | "hashing_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n",
|
4556 | 4556 | " metrics=[rounded_accuracy])\n",
|
4557 | 4557 | "history = hashing_ae.fit(X_train, X_train, epochs=10,\n",
|
4558 |
| - " validation_data=[X_valid, X_valid])" |
| 4558 | + " validation_data=(X_valid, X_valid))" |
4559 | 4559 | ]
|
4560 | 4560 | },
|
4561 | 4561 | {
|
|
0 commit comments