diff --git a/data/imdb.zip b/data/imdb.zip new file mode 100644 index 0000000..e568354 Binary files /dev/null and b/data/imdb.zip differ diff --git a/src/TensorFlowNET.Examples/ImageProcessing/ImageClassificationKeras.cs b/src/TensorFlowNET.Examples/ImageProcessing/ImageClassificationKeras.cs index f88f574..76dbf1a 100644 --- a/src/TensorFlowNET.Examples/ImageProcessing/ImageClassificationKeras.cs +++ b/src/TensorFlowNET.Examples/ImageProcessing/ImageClassificationKeras.cs @@ -76,7 +76,7 @@ public override void PrepareData() { string fileName = "flower_photos.tgz"; string url = $"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"; - string data_dir = Path.GetTempPath(); + string data_dir = Path.Combine(Path.GetTempPath(), "flower_photos"); Web.Download(url, data_dir, fileName); Compress.ExtractTGZ(Path.Join(data_dir, fileName), data_dir); data_dir = Path.Combine(data_dir, "flower_photos"); @@ -90,11 +90,11 @@ public override void PrepareData() batch_size: batch_size); val_ds = keras.preprocessing.image_dataset_from_directory(data_dir, - validation_split: 0.2f, - subset: "validation", - seed: 123, - image_size: img_dim, - batch_size: batch_size); + validation_split: 0.2f, + subset: "validation", + seed: 123, + image_size: img_dim, + batch_size: batch_size); train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size: -1); val_ds = val_ds.cache().prefetch(buffer_size: -1); @@ -102,13 +102,7 @@ public override void PrepareData() foreach (var (img, label) in train_ds) { print($"images: {img.TensorShape}"); - var nd = label.numpy(); - print($"labels: {nd}"); - var data = nd.Data(); - if (data.Max() > 4 || data.Min() < 0) - { - // exception - } + print($"labels: {label.numpy()}"); } } } diff --git a/src/TensorFlowNET.Examples/ImageProcessing/ToyResNet.cs b/src/TensorFlowNET.Examples/ImageProcessing/ToyResNet.cs index 57dfdb1..847cb6a 100644 --- a/src/TensorFlowNET.Examples/ImageProcessing/ToyResNet.cs +++ b/src/TensorFlowNET.Examples/ImageProcessing/ToyResNet.cs @@ -43,7 +43,7 @@ public ExampleConfig InitConfig() public bool Run() { tf.enable_eager_execution(); - + BuildModel(); PrepareData(); Train(); diff --git a/src/TensorFlowNET.Examples/Program.cs b/src/TensorFlowNET.Examples/Program.cs index af195cf..7d2771d 100644 --- a/src/TensorFlowNET.Examples/Program.cs +++ b/src/TensorFlowNET.Examples/Program.cs @@ -22,6 +22,7 @@ limitations under the License. using System.Reflection; using Tensorflow; using static Tensorflow.Binding; +using static Tensorflow.KerasApi; using Console = Colorful.Console; namespace TensorFlowNET.Examples @@ -100,6 +101,7 @@ private static void RunExamples(string key, IExample[] examples) } finished++; + keras.backend.clear_session(); Console.WriteLine($"{DateTime.UtcNow} Completed {example.Config.Name}", Color.White); } diff --git a/src/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj b/src/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj index 5091e8d..61b0061 100644 --- a/src/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj +++ b/src/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj @@ -44,6 +44,7 @@ + diff --git a/src/TensorFlowNET.Examples/TextProcessing/CnnTextClassificationKeras.cs b/src/TensorFlowNET.Examples/TextProcessing/CnnTextClassificationKeras.cs new file mode 100644 index 0000000..1b01355 --- /dev/null +++ b/src/TensorFlowNET.Examples/TextProcessing/CnnTextClassificationKeras.cs @@ -0,0 +1,37 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; +using Tensorflow.Keras.Utils; +using static Tensorflow.KerasApi; + +namespace TensorFlowNET.Examples +{ + /// + /// https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/text_classification_from_scratch.ipynb#scrollTo=qqTCrB7SmJv9 + /// + public class CnnTextClassificationKeras : SciSharpExample, IExample + { + public ExampleConfig InitConfig() + => Config = new ExampleConfig + { + Name = "CNN Text Classification (Keras)", + Enabled = false + }; + + public bool Run() + { + return true; + } + + public override void PrepareData() + { + string fileName = "aclImdb_v1.tar.gz"; + string url = $"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"; + string data_dir = Path.GetTempPath(); + Web.Download(url, data_dir, fileName); + Compress.ExtractGZip(Path.Join(data_dir, fileName), data_dir); + data_dir = Path.Combine(data_dir, "aclImdb_v1"); + } + } +} diff --git a/src/tensorflow2.x-python-tutorial/.vscode/launch.json b/src/tensorflow2.x-python-tutorial/.vscode/launch.json index 83d1041..f5fd933 100644 --- a/src/tensorflow2.x-python-tutorial/.vscode/launch.json +++ b/src/tensorflow2.x-python-tutorial/.vscode/launch.json @@ -8,7 +8,7 @@ "name": "Python: Current File", "type": "python", "request": "launch", - "program": "${workspaceFolder}/image_classification.py", + "program": "${workspaceFolder}/keras_mnist.py", "console": "integratedTerminal", "cwd": "${workspaceFolder}", "justMyCode": false diff --git a/src/tensorflow2.x-python-tutorial/keras-basic.py b/src/tensorflow2.x-python-tutorial/keras-basic.py deleted file mode 100644 index b2250bd..0000000 --- a/src/tensorflow2.x-python-tutorial/keras-basic.py +++ /dev/null @@ -1,34 +0,0 @@ -import numpy as np -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras import layers - -inputs = keras.Input(shape=(784,)) - -dense = layers.Dense(64, activation="relu") -x = dense(inputs) - -dense = layers.Dense(64, activation="relu") -x = dense(x) - -dense = layers.Dense(10) -outputs = dense(x) - -model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model") -model.summary(); - - -model = tf.keras.Sequential() -layer = tf.keras.layers.Embedding(7, 2, input_length=4) -model.add(layer) -# The model will take as input an integer matrix of size (batch, -# input_length), and the largest integer (i.e. word index) in the input -# should be no larger than 999 (vocabulary size). -# Now model.output_shape is (None, 10, 64), where `None` is the batch -# dimension. -model.compile('rmsprop', 'mse') - -input_array = np.array([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]]) -# np.random.randint(4, size=(3, 4)) -output_array = model.predict(input_array) -print(output_array.shape) \ No newline at end of file diff --git a/src/tensorflow2.x-python-tutorial/keras_basic.py b/src/tensorflow2.x-python-tutorial/keras_basic.py deleted file mode 100644 index 791f29a..0000000 --- a/src/tensorflow2.x-python-tutorial/keras_basic.py +++ /dev/null @@ -1,39 +0,0 @@ - -# https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/1_Introduction/helloworld.ipynb - -import tensorflow as tf -import numpy as np - -model = tf.keras.Sequential([ - tf.keras.layers.Dense(5, input_shape=(3,)), - tf.keras.layers.Softmax()]) -model.save('/tmp/model') -loaded_model = tf.keras.models.load_model('/tmp/model') -x = tf.random.uniform((10, 3)) -assert np.allclose(model.predict(x), loaded_model.predict(x)) - -tensor = [0, 1, 2, 3] -mask = np.array([True, False, True, False]) -masked = tf.boolean_mask(tensor, mask) - -a = tf.constant(0.0); -b = 2.0 * a; - -X = tf.placeholder(tf.double) -W = tf.constant(1.0) -mul = tf.multiply(X, W) - -ones = tf.zeros([300, 400], tf.int32) - -x = tf.Variable(10, name = "x"); -for i in range(0, 5): - x = x + 1; - -# Create a Tensor. -hello = tf.constant("hello world") -print(hello) - -# To access a Tensor value, call numpy(). -val = hello.numpy() - -print(val) \ No newline at end of file diff --git a/src/tensorflow2.x-python-tutorial/keras_mnist.py b/src/tensorflow2.x-python-tutorial/keras_mnist.py new file mode 100644 index 0000000..a7abdb8 --- /dev/null +++ b/src/tensorflow2.x-python-tutorial/keras_mnist.py @@ -0,0 +1,51 @@ +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers + +from tensorflow.python.platform import gfile +# GRAPH_PB_PATH = 'D:/tmp/TensorflowIssue/TensorflowIssue/model/saved_model.pb' +GRAPH_PB_PATH = 'D:/tmp/TensorFlow.NET/data/saved_model.pb' +with tf.compat.v1.Session() as sess: + print("load graph") + with tf.io.gfile.GFile(GRAPH_PB_PATH,'rb') as f: + graph_def = tf.compat.v1.GraphDef() + graph_def.ParseFromString(f.read()) + sess.graph.as_default() + tf.import_graph_def(graph_def, name='') + graph_nodes=[n for n in graph_def.node] + names = [] + for t in graph_nodes: + names.append(t.name) + print(names) + +inputs = keras.Input(shape=(784,)) + +dense = layers.Dense(64, activation="relu") +x = dense(inputs) + +dense = layers.Dense(64, activation="relu") +x = dense(x) + +dense = layers.Dense(10) +outputs = dense(x) + +model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model") +model.summary(); + +(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + +x_train = x_train.reshape(60000, 784).astype("float32") / 255 +x_test = x_test.reshape(10000, 784).astype("float32") / 255 + +model.compile( + loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + optimizer=keras.optimizers.RMSprop(), + metrics=["accuracy"], +) + +history = model.fit(x_train, y_train, batch_size=64, epochs=2, validation_split=0.2) + +test_scores = model.evaluate(x_test, y_test, verbose=2) +print("Test loss:", test_scores[0]) +print("Test accuracy:", test_scores[1]) \ No newline at end of file diff --git a/src/tensorflow2.x-python-tutorial/transformer.py b/src/tensorflow2.x-python-tutorial/transformer.py new file mode 100644 index 0000000..3a597b6 --- /dev/null +++ b/src/tensorflow2.x-python-tutorial/transformer.py @@ -0,0 +1,24 @@ +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers + +physical_devices = tf.config.list_physical_devices('CPU') +tf.config.experimental.set_memory_growth(physical_devices[0], True) + +tf.config.run_functions_eagerly(True) +tf.debugging.set_log_device_placement(True) + +a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) +print(a) +b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) +print(b) +c = tf.matmul(a, b) +print(c) + +vocab_size = 20000 # Only consider the top 20k words +maxlen = 200 # Only consider the first 200 words of each movie review +(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size) +print(len(x_train), "Training sequences") +print(len(x_val), "Validation sequences") +x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen) +x_val = keras.preprocessing.sequence.pad_sequences(x_val, maxlen=maxlen) \ No newline at end of file