diff --git a/test/TensorFlowNET.Examples/NeuralNetXor.cs b/test/TensorFlowNET.Examples/NeuralNetXor.cs
index 380d77ff..a52fa3dd 100644
--- a/test/TensorFlowNET.Examples/NeuralNetXor.cs
+++ b/test/TensorFlowNET.Examples/NeuralNetXor.cs
@@ -1,99 +1,98 @@
-using System;
-using System.Collections.Generic;
-using System.Text;
-using NumSharp;
-using Tensorflow;
-
-namespace TensorFlowNET.Examples
-{
- ///
- /// Simple vanilla neural net solving the famous XOR problem
- /// https://github.com/amygdala/tensorflow-workshop/blob/master/workshop_sections/getting_started/xor/README.md
- ///
- public class NeuralNetXor : Python, IExample
- {
- public int Priority => 2;
- public bool Enabled { get; set; } = true;
- public string Name => "NN XOR";
-
- public int num_steps = 5000;
-
- private NDArray data;
-
- private (Operation, Tensor, RefVariable) make_graph(Tensor features,Tensor labels, int num_hidden = 8)
- {
+using System;
+using System.Collections.Generic;
+using System.Text;
+using NumSharp;
+using Tensorflow;
+
+namespace TensorFlowNET.Examples
+{
+ ///
+ /// Simple vanilla neural net solving the famous XOR problem
+ /// https://github.com/amygdala/tensorflow-workshop/blob/master/workshop_sections/getting_started/xor/README.md
+ ///
+ public class NeuralNetXor : Python, IExample
+ {
+ public int Priority => 2;
+ public bool Enabled { get; set; } = true;
+ public string Name => "NN XOR";
+
+ public int num_steps = 5000;
+
+ private NDArray data;
+
+ private (Operation, Tensor, RefVariable) make_graph(Tensor features,Tensor labels, int num_hidden = 8)
+ {
var stddev = 1 / Math.Sqrt(2);
- var hidden_weights = tf.Variable(tf.truncated_normal(new int []{2, num_hidden}, stddev: (float) stddev ));
-
- // Shape [4, num_hidden]
- var hidden_activations = tf.nn.relu(tf.matmul(features, hidden_weights));
-
- var output_weights = tf.Variable(tf.truncated_normal(
- new[] {num_hidden, 1},
- stddev: (float) (1 / Math.Sqrt(num_hidden))
- ));
-
- // Shape [4, 1]
- var logits = tf.matmul(hidden_activations, output_weights);
-
- // Shape [4]
- var predictions = tf.sigmoid(tf.squeeze(logits));
- var loss = tf.reduce_mean(tf.square(predictions - tf.cast(labels, tf.float32)));
-
- var gs = tf.Variable(0, trainable: false);
- var train_op = tf.train.GradientDescentOptimizer(0.2f).minimize(loss, global_step: gs);
-
- return (train_op, loss, gs);
- }
-
- public bool Run()
- {
- PrepareData();
-
+ var hidden_weights = tf.Variable(tf.truncated_normal(new int []{2, num_hidden}, stddev: (float) stddev ));
+
+ // Shape [4, num_hidden]
+ var hidden_activations = tf.nn.relu(tf.matmul(features, hidden_weights));
+
+ var output_weights = tf.Variable(tf.truncated_normal(
+ new[] {num_hidden, 1},
+ stddev: (float) (1 / Math.Sqrt(num_hidden))
+ ));
+
+ // Shape [4, 1]
+ var logits = tf.matmul(hidden_activations, output_weights);
+
+ // Shape [4]
+ var predictions = tf.sigmoid(tf.squeeze(logits));
+ var loss = tf.reduce_mean(tf.square(predictions - tf.cast(labels, tf.float32)));
+
+ var gs = tf.Variable(0, trainable: false);
+ var train_op = tf.train.GradientDescentOptimizer(0.2f).minimize(loss, global_step: gs);
+
+ return (train_op, loss, gs);
+ }
+
+ public bool Run()
+ {
+ PrepareData();
+
var graph = tf.Graph().as_default();
var features = tf.placeholder(tf.float32, new TensorShape(4, 2));
var labels = tf.placeholder(tf.int32, new TensorShape(4));
var (train_op, loss, gs) = make_graph(features, labels);
-
- var init = tf.global_variables_initializer();
-
- // Start tf session
- with(tf.Session(graph), sess =>
- {
- sess.run(init);
+
+ var init = tf.global_variables_initializer();
+
+ float loss_value = 0;
+ // Start tf session
+ with(tf.Session(graph), sess =>
+ {
+ sess.run(init);
var step = 0;
- var y_ = np.array(new int[] { 1, 0, 0, 1 }, dtype: np.int32);
- float loss_value = 0;
- while (step < num_steps)
- {
- // original python:
- //_, step, loss_value = sess.run(
- // [train_op, gs, loss],
- // feed_dict={features: xy, labels: y_}
- // )
- loss_value = sess.run(loss, new FeedItem(features, data), new FeedItem(labels, y_));
- step++;
- if (step%1000==0)
- Console.WriteLine($"Step {step} loss: {loss_value}");
- }
- Console.WriteLine($"Final loss: {loss_value}");
- });
-
- return true;
- }
-
- public void PrepareData()
- {
+ var y_ = np.array(new int[] { 1, 0, 0, 1 }, dtype: np.int32);
+ while (step < num_steps)
+ {
+ // original python:
+ //_, step, loss_value = sess.run(
+ // [train_op, gs, loss],
+ // feed_dict={features: xy, labels: y_}
+ // )
+ loss_value = sess.run(loss, new FeedItem(features, data), new FeedItem(labels, y_));
+ step++;
+ if (step%1000==0)
+ Console.WriteLine($"Step {step} loss: {loss_value}");
+ }
+ Console.WriteLine($"Final loss: {loss_value}");
+ });
+ return loss_value < 0.0627;
+ }
+
+ public void PrepareData()
+ {
data = new float[,]
{
{1, 0 },
{1, 1 },
{0, 0 },
{0, 1 }
- };
- }
- }
-}
+ };
+ }
+ }
+}
diff --git a/test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs b/test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs
index d58da040..df71b54e 100644
--- a/test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs
+++ b/test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs
@@ -109,12 +109,12 @@ namespace TensorFlowNET.ExamplesTests
new TextClassificationWithMovieReviews() { Enabled = true }.Run();
}
- //[Ignore("Attempting to use uninitialized value Variable_1")]
+ [Ignore("Loss function optimization is not working yet")]
[TestMethod]
public void NeuralNetXor()
{
tf.Graph().as_default();
- new NeuralNetXor() { Enabled = true }.Run();
+ Assert.IsTrue(new NeuralNetXor() { Enabled = true }.Run());
}