diff --git a/data/linear_regression.zip b/data/linear_regression.zip new file mode 100644 index 00000000..2d17085f Binary files /dev/null and b/data/linear_regression.zip differ diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 1aa5f589..343354b5 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -193,7 +193,9 @@ namespace Tensorflow } // Add Op to graph - var op = g.create_op(op_type_name, inputs.ToArray(), output_types.ToArray(), + var op = g.create_op(op_type_name, + inputs.ToArray(), + output_types.ToArray(), name: scope, input_types: input_types.ToArray(), attrs: attr_protos, diff --git a/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs b/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs index 5b1f07e2..06a1fb49 100644 --- a/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs +++ b/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs @@ -61,7 +61,7 @@ namespace Tensorflow bool sharded = false, int max_to_keep = 5, float keep_checkpoint_every_n_hours = 10000, - string name = "", + string name = null, bool restore_sequentially = false, string filename = "model", bool build_save = true, diff --git a/src/TensorFlowNET.Core/Train/Saving/Saver.cs b/src/TensorFlowNET.Core/Train/Saving/Saver.cs index cb95628d..318a318c 100644 --- a/src/TensorFlowNET.Core/Train/Saving/Saver.cs +++ b/src/TensorFlowNET.Core/Train/Saving/Saver.cs @@ -37,7 +37,7 @@ namespace Tensorflow bool sharded = false, int max_to_keep = 5, float keep_checkpoint_every_n_hours = 10000, - string name = "", + string name = null, bool restore_sequentially = false, SaverDef saver_def = null, ISaverBuilder builder = null, diff --git a/src/TensorFlowNET.Core/ops.name_scope.cs b/src/TensorFlowNET.Core/ops.name_scope.cs index cb6562e5..295ea89e 100644 --- a/src/TensorFlowNET.Core/ops.name_scope.cs +++ b/src/TensorFlowNET.Core/ops.name_scope.cs @@ -27,10 +27,7 @@ namespace Tensorflow public void __enter__() { - if (String.IsNullOrEmpty(_name)) - { - _name = _default_name; - } + _name = _name == null ? _default_name : _name; Graph g = null; if (_values is List values) diff --git a/test/TensorFlowNET.Examples/LinearRegression.cs b/test/TensorFlowNET.Examples/LinearRegression.cs index 48713be9..5d02122a 100644 --- a/test/TensorFlowNET.Examples/LinearRegression.cs +++ b/test/TensorFlowNET.Examples/LinearRegression.cs @@ -57,7 +57,7 @@ namespace TensorFlowNET.Examples var grad = tf.train.GradientDescentOptimizer(learning_rate); var optimizer = grad.minimize(cost);*/ - var new_saver = tf.train.import_meta_graph("save_model.meta", import_scope: "import"); + var new_saver = tf.train.import_meta_graph("linear_regression.meta"); var X = graph.OperationByName("Placeholder"); var Y = graph.OperationByName("Placeholder_1"); diff --git a/test/TensorFlowNET.Examples/python/linear_regression.py b/test/TensorFlowNET.Examples/python/linear_regression.py index eb9bfc87..f8e63cc9 100644 --- a/test/TensorFlowNET.Examples/python/linear_regression.py +++ b/test/TensorFlowNET.Examples/python/linear_regression.py @@ -14,7 +14,7 @@ rng = numpy.random # Parameters learning_rate = 0.01 training_epochs = 1000 -display_step = 50 +display_step = 10 # Training Data train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, @@ -23,28 +23,41 @@ train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) n_samples = train_X.shape[0] -# tf Graph Input -X = tf.placeholder("float") -Y = tf.placeholder("float") - -# Set model weights -W = tf.Variable(rng.randn(), name="weight") -b = tf.Variable(rng.randn(), name="bias") - -# Construct a linear model -mul = tf.multiply(X, W) -pred = tf.add(mul, b) - -# Mean squared error -sub = pred-Y -pow = tf.pow(sub, 2) - -reduce = tf.reduce_sum(pow) -cost = reduce/(2*n_samples) -# Gradient descent -# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default -grad = tf.train.GradientDescentOptimizer(learning_rate) -optimizer = grad.minimize(cost) +if False: + # tf Graph Input + X = tf.placeholder("float") + Y = tf.placeholder("float") + + # Set model weights + W = tf.Variable(-0.06, name="weight") + b = tf.Variable(-0.73, name="bias") + + # Construct a linear model + mul = tf.multiply(X, W) + pred = tf.add(mul, b) + + # Mean squared error + sub = pred-Y + pow = tf.pow(sub, 2) + + reduce = tf.reduce_sum(pow) + cost = reduce/(2*n_samples) + # Gradient descent + # Note, minimize() knows to modify W and b because Variable objects are trainable=True by default + grad = tf.train.GradientDescentOptimizer(learning_rate) + optimizer = grad.minimize(cost) + # tf.train.export_meta_graph(filename='save_model.meta'); +else: + # tf Graph Input + new_saver = tf.train.import_meta_graph("save_model.meta") + nodes = tf.get_default_graph()._nodes_by_name; + optimizer = nodes["GradientDescent"] + cost = nodes["truediv"].outputs[0] + X = nodes["Placeholder"].outputs[0] + Y = nodes["Placeholder_1"].outputs[0] + W = nodes["weight"].outputs[0] + b = nodes["bias"].outputs[0] + pred = nodes["Add"].outputs[0] # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer()