diff --git a/src/TensorFlowNET.Core/APIs/tf.init.cs b/src/TensorFlowNET.Core/APIs/tf.init.cs
index fb153301..c4cc415e 100644
--- a/src/TensorFlowNET.Core/APIs/tf.init.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.init.cs
@@ -15,10 +15,12 @@ namespace Tensorflow
public static variable_scope variable_scope(string name,
string default_name = null,
Tensor[] values = null,
- bool auxiliary_name_scope = true) => new variable_scope(name,
- default_name,
+ bool? reuse = null,
+ bool auxiliary_name_scope = true) => new variable_scope(name,
+ default_name,
values,
- auxiliary_name_scope);
+ reuse: reuse,
+ auxiliary_name_scope: auxiliary_name_scope);
public static variable_scope variable_scope(VariableScope scope,
string default_name = null,
@@ -27,7 +29,8 @@ namespace Tensorflow
bool auxiliary_name_scope = true) => new variable_scope(scope,
default_name,
values,
- auxiliary_name_scope);
+ reuse: reuse,
+ auxiliary_name_scope: auxiliary_name_scope);
public static IInitializer truncated_normal_initializer(float mean = 0.0f,
float stddev = 1.0f,
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Layer.cs b/src/TensorFlowNET.Core/Keras/Layers/Layer.cs
index 9c89aadf..7f7f75a2 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/Layer.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/Layer.cs
@@ -108,11 +108,11 @@ namespace Tensorflow.Keras.Layers
// Build layer if applicable (if the `build` method has been
// overridden).
_maybe_build(inputs[0]);
- });
- outputs = call(inputs[0], training: training);
- _handle_activity_regularization(inputs[0], outputs);
- _set_mask_metadata(inputs[0], outputs, null);
+ outputs = call(inputs[0], training: training);
+ _handle_activity_regularization(inputs[0], outputs);
+ _set_mask_metadata(inputs[0], outputs, null);
+ });
}
return outputs;
diff --git a/src/TensorFlowNET.Core/Layers/Layer.cs b/src/TensorFlowNET.Core/Layers/Layer.cs
index 6ed47fe8..e8844d46 100644
--- a/src/TensorFlowNET.Core/Layers/Layer.cs
+++ b/src/TensorFlowNET.Core/Layers/Layer.cs
@@ -48,6 +48,7 @@ namespace Tensorflow.Layers
else
{
scope_context_manager = tf.variable_scope(_scope,
+ reuse: _reuse,
auxiliary_name_scope: false);
}
@@ -123,34 +124,33 @@ namespace Tensorflow.Layers
_set_scope();
var reuse = built || (_reuse != null && _reuse.Value);
- return Python.with(tf.variable_scope(_scope,
- reuse: reuse,
+ return with(tf.variable_scope(_scope,
+ reuse: reuse,
auxiliary_name_scope: false), scope =>
- {
- _current_scope = scope;
- return Python.with(ops.name_scope(_name_scope()), delegate
{
- var variable = base.add_weight(name,
- shape,
- dtype: dtype,
- initializer: initializer,
- trainable: trainable,
- getter: (name1, shape1, dtype1, initializer1, trainable1) =>
- {
- return tf.get_variable(name1,
- shape: new TensorShape(shape1),
- dtype: dtype1,
- initializer: initializer1,
- trainable: trainable1);
- });
-
- if(init_graph != null)
+ _current_scope = scope;
+ return with(ops.name_scope(_name_scope()), delegate
{
- var trainable_variables = variables.trainable_variables();
- }
- return variable;
+ var variable = base.add_weight(name,
+ shape,
+ dtype: dtype,
+ initializer: initializer,
+ trainable: trainable,
+ getter: (name1, shape1, dtype1, initializer1, trainable1) =>
+ {
+ return tf.get_variable(name1,
+ shape: new TensorShape(shape1),
+ dtype: dtype1,
+ initializer: initializer1,
+ trainable: trainable1);
+ });
+
+ //if (init_graph != null)
+ //var trainable_variables = variables.trainable_variables();
+
+ return variable;
+ });
});
- });
}
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs b/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs
index 94833236..37434e43 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs
@@ -5,6 +5,9 @@ using static Tensorflow.Python;
namespace Tensorflow.Operations
{
+ ///
+ /// Performs the max pooling on the input.
+ ///
public class MaxPoolFunction : IPoolFunction
{
public Tensor Apply(Tensor value,
@@ -14,8 +17,9 @@ namespace Tensorflow.Operations
string data_format = "NHWC",
string name = null)
{
- return with(ops.name_scope(name, "MaxPool", new { value }), scope => {
-
+ return with(ops.name_scope(name, "MaxPool", value), scope =>
+ {
+ name = scope;
value = ops.convert_to_tensor(value, name: "input");
return gen_nn_ops.max_pool(
value,
diff --git a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj
index 02f75c84..bafa254f 100644
--- a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj
+++ b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj
@@ -5,7 +5,7 @@
TensorFlow.NET
Tensorflow
1.14.0
- 0.8.0
+ 0.8.1
Haiping Chen
SciSharp STACK
true
@@ -17,15 +17,12 @@
TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#
Google's TensorFlow full binding in .NET Standard.
Docs: https://tensorflownet.readthedocs.io
- 0.8.0.0
- Changes since v0.7:
+ 0.8.1.0
+ Changes since v0.8:
-Add XOR example.
-Add KMeans example.
-Add Object Detection example.
-Add Word2Vec example.
+Removed global static graph instance.
7.2
- 0.8.0.0
+ 0.8.1.0
diff --git a/src/TensorFlowNET.Core/Variables/variable_scope.py.cs b/src/TensorFlowNET.Core/Variables/variable_scope.py.cs
index 4b66dd3b..09429915 100644
--- a/src/TensorFlowNET.Core/Variables/variable_scope.py.cs
+++ b/src/TensorFlowNET.Core/Variables/variable_scope.py.cs
@@ -107,7 +107,7 @@ namespace Tensorflow
if (_name != null || _scope != null)
{
var name_scope = _name == null ? _scope.name.Split('/').Last() : _name;
- if (name_scope != null || current_name_scope != null)
+ if (current_name_scope == null)
current_name_scope = ops.name_scope(name_scope);
current_name_scope.__enter__();
var current_name_scope_name = current_name_scope;
diff --git a/test/TensorFlowNET.Examples/TextProcess/CnnTextClassification.cs b/test/TensorFlowNET.Examples/TextProcess/CnnTextClassification.cs
index bba2ed96..503e74f6 100644
--- a/test/TensorFlowNET.Examples/TextProcess/CnnTextClassification.cs
+++ b/test/TensorFlowNET.Examples/TextProcess/CnnTextClassification.cs
@@ -174,7 +174,8 @@ namespace TensorFlowNET.Examples
x_emb = tf.expand_dims(x_emb, -1);
});
- for(int len = 0; len < filter_sizes.Rank; len++)
+ var pooled_outputs = new List();
+ for (int len = 0; len < filter_sizes.Rank; len++)
{
int filter_size = filter_sizes.GetLength(len);
var conv = tf.layers.conv2d(
@@ -190,8 +191,11 @@ namespace TensorFlowNET.Examples
pool_size: new[] { document_max_len - filter_size + 1, 1 },
strides: new[] { 1, 1 },
padding: "VALID");
+
+ pooled_outputs.Add(pool);
}
+ // var h_pool = tf.concat(pooled_outputs, 3);
return graph;
}