Browse Source

simplify with function.

tags/v0.8.0
Oceania2018 6 years ago
parent
commit
5a7d28c200
29 changed files with 79 additions and 72 deletions
  1. +2
    -2
      src/TensorFlowNET.Core/Framework/importer.py.cs
  2. +3
    -3
      src/TensorFlowNET.Core/Gradients/gradients_impl.py.cs
  3. +1
    -1
      src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs
  4. +2
    -2
      src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
  5. +7
    -7
      src/TensorFlowNET.Core/Operations/array_ops.py.cs
  6. +5
    -5
      src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs
  7. +6
    -6
      src/TensorFlowNET.Core/Operations/math_ops.py.cs
  8. +1
    -1
      src/TensorFlowNET.Core/Operations/nn_impl.py.cs
  9. +2
    -2
      src/TensorFlowNET.Core/Operations/random_ops.py.cs
  10. +4
    -4
      src/TensorFlowNET.Core/Python.cs
  11. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs
  12. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.cs
  13. +3
    -3
      src/TensorFlowNET.Core/Train/Optimizer.cs
  14. +2
    -2
      src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs
  15. +1
    -1
      src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs
  16. +2
    -2
      src/TensorFlowNET.Core/Variables/RefVariable.cs
  17. +2
    -2
      src/TensorFlowNET.Core/Variables/VariableScope.cs
  18. +2
    -2
      src/TensorFlowNET.Core/ops.py.cs
  19. +3
    -3
      test/TensorFlowNET.Examples/ImageRecognition.cs
  20. +3
    -3
      test/TensorFlowNET.Examples/InceptionArchGoogLeNet.cs
  21. +1
    -1
      test/TensorFlowNET.Examples/LinearRegression.cs
  22. +1
    -1
      test/TensorFlowNET.Examples/MetaGraph.cs
  23. +1
    -1
      test/TensorFlowNET.UnitTest/CApiTest.cs
  24. +5
    -5
      test/TensorFlowNET.UnitTest/ConstantTest.cs
  25. +2
    -2
      test/TensorFlowNET.UnitTest/NameScopeTest.cs
  26. +2
    -2
      test/TensorFlowNET.UnitTest/PlaceholderTest.cs
  27. +1
    -1
      test/TensorFlowNET.UnitTest/SessionTest.cs
  28. +3
    -3
      test/TensorFlowNET.UnitTest/TrainSaverTest.cs
  29. +10
    -3
      test/TensorFlowNET.UnitTest/VariableTest.cs

+ 2
- 2
src/TensorFlowNET.Core/Framework/importer.py.cs View File

@@ -7,7 +7,7 @@ using static Tensorflow.OpDef.Types;

namespace Tensorflow
{
public class importer
public class importer : Python
{
public static ITensorOrOperation[] import_graph_def(GraphDef graph_def,
Dictionary<string, Tensor> input_map = null,
@@ -26,7 +26,7 @@ namespace Tensorflow

string prefix = "";
var graph = ops.get_default_graph();
Python.with<ops.name_scope>(new ops.name_scope(name, "import", input_map.Values), scope =>
with(new ops.name_scope(name, "import", input_map.Values), scope =>
{
prefix = scope;
/*if (!string.IsNullOrEmpty(prefix))


+ 3
- 3
src/TensorFlowNET.Core/Gradients/gradients_impl.py.cs View File

@@ -7,7 +7,7 @@ using System.Threading;

namespace Tensorflow
{
public class gradients_impl
public class gradients_impl : Python
{
public static Tensor[] gradients(Tensor[] ys,
Tensor[] xs,
@@ -58,7 +58,7 @@ namespace Tensorflow
**/
var grads = new Dictionary<string, Tensor[][]>();

Python.with<ops.name_scope>(new ops.name_scope(name, "gradients", values: all), scope =>
with(new ops.name_scope(name, "gradients", values: all), scope =>
{
string grad_scope = scope;
// Get a uid for this call to gradients that can be used to help
@@ -131,7 +131,7 @@ namespace Tensorflow
// for ops that do not have gradients.
var grad_fn = ops.get_gradient_function(op);

Python.with<ops.name_scope>(new ops.name_scope(op.name + "_grad"), scope1 =>
with(new ops.name_scope(op.name + "_grad"), scope1 =>
{
string name1 = scope1;
if (grad_fn != null)


+ 1
- 1
src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs View File

@@ -12,7 +12,7 @@ namespace Tensorflow
string scope = "",
string loss_collection= "losses")
{
with<ops.name_scope>(new ops.name_scope(scope,
with(new ops.name_scope(scope,
"sparse_softmax_cross_entropy_loss",
(logits, labels, weights)),
namescope =>


+ 2
- 2
src/TensorFlowNET.Core/Operations/OpDefLibrary.cs View File

@@ -10,7 +10,7 @@ using static Tensorflow.OpDef.Types;

namespace Tensorflow
{
public class OpDefLibrary
public class OpDefLibrary : Python
{
public Operation _apply_op_helper(string op_type_name, string name = null, dynamic args = null)
{
@@ -44,7 +44,7 @@ namespace Tensorflow
var input_types = new List<TF_DataType>();
dynamic values = null;

return Python.with<ops.name_scope, Operation>(new ops.name_scope(name), scope =>
return with(new ops.name_scope(name), scope =>
{
var inferred_from = new Dictionary<string, object>();
var base_types = new List<TF_DataType>();


+ 7
- 7
src/TensorFlowNET.Core/Operations/array_ops.py.cs View File

@@ -5,14 +5,14 @@ using System.Text;
namespace Tensorflow
{
public class array_ops
public class array_ops : Python
{
public static Tensor placeholder_with_default<T>(T input, int[] shape, string name = null) => gen_array_ops.placeholder_with_default(input, shape, name);
public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
{
dtype = dtype.as_base_dtype();
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "zeros", shape), scope =>
return with(new ops.name_scope(name, "zeros", shape), scope =>
{
name = scope;
switch (dtype)
@@ -68,7 +68,7 @@ namespace Tensorflow
private static Tensor ones_like_impl<T>(T tensor, TF_DataType dtype, string name, bool optimize = true)
{
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "ones_like", new { tensor }), scope =>
return with(new ops.name_scope(name, "ones_like", new { tensor }), scope =>
{
name = scope;
var tensor1 = ops.convert_to_tensor(tensor, name: "tensor");
@@ -84,7 +84,7 @@ namespace Tensorflow
public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
{
dtype = dtype.as_base_dtype();
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "ones", new { shape }), scope =>
return with(new ops.name_scope(name, "ones", new { shape }), scope =>
{
name = scope;
var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name);
@@ -130,7 +130,7 @@ namespace Tensorflow
private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32)
{
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "Shape", new { input }), scope =>
return with(new ops.name_scope(name, "Shape", new { input }), scope =>
{
name = scope;
@@ -151,7 +151,7 @@ namespace Tensorflow
private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32)
{
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "Size", new Tensor[] { input }), scope =>
return with(new ops.name_scope(name, "Size", new Tensor[] { input }), scope =>
{
name = scope;
@@ -182,7 +182,7 @@ namespace Tensorflow
public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
{
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope =>
return with(new ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope =>
{
name = scope;
tensor = ops.convert_to_tensor(tensor, name: "tensor");


+ 5
- 5
src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs View File

@@ -9,7 +9,7 @@ namespace Tensorflow
{
public static Operation group<T>(T[] inputs, string name = null) where T : ITensorOrOperation
{
return with<ops.name_scope, Operation>(new ops.name_scope(name, "group_deps", inputs), scope =>
return with(new ops.name_scope(name, "group_deps", inputs), scope =>
{
name = scope;

@@ -39,7 +39,7 @@ namespace Tensorflow

private static Operation _GroupControlDeps(string dev, Operation[] deps, string name = null)
{
return Python.with<_ControlDependenciesController, Operation>(ops.control_dependencies(deps), ctl =>
return with(ops.control_dependencies(deps), ctl =>
{
if (dev == null)
{
@@ -83,7 +83,7 @@ namespace Tensorflow

public static Tensor[] tuple(Tensor[] tensors, string name = null, Operation[] control_inputs = null)
{
return Python.with<ops.name_scope, Tensor[]>(new ops.name_scope(name, "tuple", tensors), scope =>
return with(new ops.name_scope(name, "tuple", tensors), scope =>
{
name = scope;
var gating_ops = tensors.Select(x => x.op).ToList();
@@ -115,11 +115,11 @@ namespace Tensorflow
values.AddRange(dependencies);
values.Add(output_tensor);

return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "control_dependency", values), scope =>
return with(new ops.name_scope(name, "control_dependency", values), scope =>
{
name = scope;

return Python.with<_ControlDependenciesController, Tensor>(ops.control_dependencies(dependencies), ctl =>
return with(ops.control_dependencies(dependencies), ctl =>
{
output_tensor = ops.convert_to_tensor_or_composite(output_tensor);
return _Identity(output_tensor, name: name);


+ 6
- 6
src/TensorFlowNET.Core/Operations/math_ops.py.cs View File

@@ -14,7 +14,7 @@ namespace Tensorflow
if(base_type == x.dtype)
return x;

return with<ops.name_scope, Tensor>(new ops.name_scope(name, "Cast", new { x }), scope =>
return with(new ops.name_scope(name, "Cast", new { x }), scope =>
{
x = ops.convert_to_tensor(x, name: "x");
if (x.dtype.as_base_dtype() != base_type)
@@ -141,7 +141,7 @@ namespace Tensorflow
if (delta == null)
delta = 1;

return with<ops.name_scope, Tensor>(new ops.name_scope(name, "Range", new object[] { start, limit, delta }), scope =>
return with(new ops.name_scope(name, "Range", new object[] { start, limit, delta }), scope =>
{
name = scope;
var start1 = ops.convert_to_tensor(start, name: "start");
@@ -154,7 +154,7 @@ namespace Tensorflow

public static Tensor floordiv(Tensor x, Tensor y, string name = null)
{
return with<ops.name_scope, Tensor>(new ops.name_scope(name, "floordiv", new { x, y }), scope =>
return with(new ops.name_scope(name, "floordiv", new { x, y }), scope =>
{
return gen_math_ops.floor_div(x, y, scope);
});
@@ -162,7 +162,7 @@ namespace Tensorflow

public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true)
{
return with<ops.name_scope, Tensor>(new ops.name_scope(name, "Rank", new List<Tensor> { input }), scope =>
return with(new ops.name_scope(name, "Rank", new List<Tensor> { input }), scope =>
{
name = scope;
var input_tensor = ops.convert_to_tensor(input);
@@ -182,7 +182,7 @@ namespace Tensorflow
{
Tensor result = null;

Python.with<ops.name_scope>(new ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope =>
with(new ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope =>
{
name = scope;

@@ -212,7 +212,7 @@ namespace Tensorflow
if (dt.is_floating() || dt.is_integer())
return x;

return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "Conj", new List<Tensor> { x }), scope =>
return with(new ops.name_scope(name, "Conj", new List<Tensor> { x }), scope =>
{

return x;


+ 1
- 1
src/TensorFlowNET.Core/Operations/nn_impl.py.cs View File

@@ -20,7 +20,7 @@ namespace Tensorflow
bool keep_dims = false)
{
Tuple<Tensor, Tensor> t = null;
with<ops.name_scope>(new ops.name_scope(name, "moments", new { x, axes }), scope =>
with(new ops.name_scope(name, "moments", new { x, axes }), scope =>
{
// The dynamic range of fp16 is too limited to support the collection of
// sufficient statistics. As a workaround we simply perform the operations


+ 2
- 2
src/TensorFlowNET.Core/Operations/random_ops.py.cs View File

@@ -23,7 +23,7 @@ namespace Tensorflow
int? seed = null,
string name = null)
{
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "random_normal", new object[] { shape, mean, stddev }), scope =>
return with(new ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope =>
{
var shape_tensor = _ShapeTensor(shape);
var mean_tensor = ops.convert_to_tensor(mean, dtype: dtype, name: "mean");
@@ -53,7 +53,7 @@ namespace Tensorflow
int? seed = null,
string name = null)
{
return with<ops.name_scope, Tensor>(new ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope =>
return with(new ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope =>
{
name = scope;
var tensorShape = _ShapeTensor(shape);


+ 4
- 4
src/TensorFlowNET.Core/Python.cs View File

@@ -43,12 +43,12 @@ namespace Tensorflow
}
}

public static void with<T>(IPython py, Action<T> action) where T : IPython
public static void with<T>(T py, Action<T> action) where T : IPython
{
try
{
py.__enter__();
action((T)py);
action(py);
}
catch (Exception ex)
{
@@ -62,12 +62,12 @@ namespace Tensorflow
}
}

public static TOut with<TIn, TOut>(IPython py, Func<TIn, TOut> action) where TIn : IPython
public static TOut with<TIn, TOut>(TIn py, Func<TIn, TOut> action) where TIn : IPython
{
try
{
py.__enter__();
return action((TIn)py);
return action(py);
}
catch (Exception ex)
{


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs View File

@@ -42,7 +42,7 @@ namespace Tensorflow
dtype = tr.dtype.as_base_dtype();
var namescope = new ops.name_scope(null, name, new { x, y });
return Python.with<ops.name_scope, Tensor>(namescope, scope =>
return with(namescope, scope =>
{
Tensor result = null;
var x1 = ops.convert_to_tensor(x, dtype: dtype, name: "x");


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.cs View File

@@ -12,7 +12,7 @@ namespace Tensorflow
/// A tensor is a generalization of vectors and matrices to potentially higher dimensions.
/// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes.
/// </summary>
public partial class Tensor : IDisposable, ITensorOrOperation
public partial class Tensor : Python, IDisposable, ITensorOrOperation
{
private readonly IntPtr _handle;



+ 3
- 3
src/TensorFlowNET.Core/Train/Optimizer.cs View File

@@ -12,7 +12,7 @@ namespace Tensorflow
/// class directly, but instead instantiate one of its subclasses such as
/// `GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
/// </summary>
public abstract class Optimizer
public abstract class Optimizer : Python
{
// Values for gate_gradients.
public static int GATE_NONE = 0;
@@ -87,7 +87,7 @@ namespace Tensorflow
_create_slots(var_list);

var update_ops = new List<Operation>();
return Python.with<ops.name_scope, Operation>(new ops.name_scope(name, Name), scope =>
return with(new ops.name_scope(name, Name), scope =>
{
name = scope;
_prepare();
@@ -98,7 +98,7 @@ namespace Tensorflow
continue;

var scope_name = var.op.name;
Python.with<ops.name_scope>(new ops.name_scope("update_" + scope_name), scope2 =>
with(new ops.name_scope("update_" + scope_name), scope2 =>
{
update_ops.Add(processor.update_op(this, grad));
});


+ 2
- 2
src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs View File

@@ -5,7 +5,7 @@ using System.Text;

namespace Tensorflow
{
public class BaseSaverBuilder
public class BaseSaverBuilder : Python
{
protected SaverDef.Types.CheckpointFormatVersion _write_version;

@@ -79,7 +79,7 @@ namespace Tensorflow
Tensor save_tensor = null;
Operation restore_op = null;

return Python.with<ops.name_scope, SaverDef>(new ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope =>
return with(new ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope =>
{
name = scope;



+ 1
- 1
src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs View File

@@ -17,7 +17,7 @@ namespace Tensorflow
private static Tensor op_helper<T>(string default_name, RefVariable x, T y)
{
var tensor1 = x.value();
return with<ops.name_scope, Tensor>(new ops.name_scope(null, default_name, new { tensor1, y }), scope => {
return with(new ops.name_scope(null, default_name, new { tensor1, y }), scope => {
var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y");
return gen_math_ops.add(tensor1, tensor2, scope);
});


+ 2
- 2
src/TensorFlowNET.Core/Variables/RefVariable.cs View File

@@ -118,7 +118,7 @@ namespace Tensorflow

ops.init_scope();
var values = init_from_fn ? new object[0] : new object[] { initial_value };
with<ops.name_scope>(new ops.name_scope(name, "Variable", values), scope =>
with(new ops.name_scope(name, "Variable", values), scope =>
{
name = scope;
if (init_from_fn)
@@ -132,7 +132,7 @@ namespace Tensorflow
List = new AttrValue.Types.ListValue()
};
attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}"));
with<ops.name_scope>(new ops.name_scope("Initializer"), scope2 =>
with(new ops.name_scope("Initializer"), scope2 =>
{
_initial_value = (initial_value as Func<Tensor>)();
_initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype);


+ 2
- 2
src/TensorFlowNET.Core/Variables/VariableScope.cs View File

@@ -7,7 +7,7 @@ namespace Tensorflow
/// <summary>
/// Variable scope object to carry defaults to provide to `get_variable`
/// </summary>
public class VariableScope
public class VariableScope : Python
{
public bool use_resource { get; set; }
private _ReuseMode _reuse;
@@ -38,7 +38,7 @@ namespace Tensorflow
VariableAggregation aggregation= VariableAggregation.NONE)
{
string full_name = !string.IsNullOrEmpty(this.name) ? this.name + "/" + name : name;
return Python.with<ops.name_scope, RefVariable>(new ops.name_scope(null), scope =>
return with(new ops.name_scope(null), scope =>
{
if (dtype == TF_DataType.DtInvalid)
dtype = _dtype;


+ 2
- 2
src/TensorFlowNET.Core/ops.py.cs View File

@@ -12,7 +12,7 @@ using System.ComponentModel;

namespace Tensorflow
{
public partial class ops
public partial class ops : Python
{
public static void add_to_collection<T>(string name, T value)
{
@@ -216,7 +216,7 @@ namespace Tensorflow
// inner_device_stack = default_graph._device_function_stack
// var outer_context = default_graph.as_default;

Python.with(ops.control_dependencies(null), delegate
with(ops.control_dependencies(null), delegate
{
var outer_graph = get_default_graph();
// outer_device_stack = None


+ 3
- 3
test/TensorFlowNET.Examples/ImageRecognition.cs View File

@@ -39,7 +39,7 @@ namespace TensorFlowNET.Examples

var idx = 0;
float propability = 0;
with<Session>(tf.Session(graph), sess =>
with(tf.Session(graph), sess =>
{
var results = sess.run(output_operation.outputs[0], new FeedItem(input_operation.outputs[0], tensor));
var probabilities = results.Data<float>();
@@ -63,7 +63,7 @@ namespace TensorFlowNET.Examples
int input_mean = 117,
int input_std = 1)
{
return with<Graph, NDArray>(tf.Graph().as_default(), graph =>
return with(tf.Graph().as_default(), graph =>
{
var file_reader = tf.read_file(file_name, "file_reader");
var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg");
@@ -74,7 +74,7 @@ namespace TensorFlowNET.Examples
var sub = tf.subtract(bilinear, new float[] { input_mean });
var normalized = tf.divide(sub, new float[] { input_std });

return with<Session, NDArray>(tf.Session(graph), sess => sess.run(normalized));
return with(tf.Session(graph), sess => sess.run(normalized));
});
}



+ 3
- 3
test/TensorFlowNET.Examples/InceptionArchGoogLeNet.cs View File

@@ -46,7 +46,7 @@ namespace TensorFlowNET.Examples
var input_operation = graph.get_operation_by_name(input_name);
var output_operation = graph.get_operation_by_name(output_name);

var results = with<Session, NDArray>(tf.Session(graph),
var results = with(tf.Session(graph),
sess => sess.run(output_operation.outputs[0],
new FeedItem(input_operation.outputs[0], nd)));

@@ -68,7 +68,7 @@ namespace TensorFlowNET.Examples
int input_mean = 0,
int input_std = 255)
{
return with<Graph, NDArray>(tf.Graph().as_default(), graph =>
return with(tf.Graph().as_default(), graph =>
{
var file_reader = tf.read_file(file_name, "file_reader");
var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader");
@@ -79,7 +79,7 @@ namespace TensorFlowNET.Examples
var sub = tf.subtract(bilinear, new float[] { input_mean });
var normalized = tf.divide(sub, new float[] { input_std });

return with<Session, NDArray>(tf.Session(graph), sess => sess.run(normalized));
return with(tf.Session(graph), sess => sess.run(normalized));
});
}



+ 1
- 1
test/TensorFlowNET.Examples/LinearRegression.cs View File

@@ -53,7 +53,7 @@ namespace TensorFlowNET.Examples
var init = tf.global_variables_initializer();

// Start training
with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
// Run the initializer
sess.run(init);


+ 1
- 1
test/TensorFlowNET.Examples/MetaGraph.cs View File

@@ -16,7 +16,7 @@ namespace TensorFlowNET.Examples

private void ImportMetaGraph(string dir)
{
with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
var new_saver = tf.train.import_meta_graph(dir + "my-model-10000.meta");
new_saver.restore(sess, dir + "my-model-10000");


+ 1
- 1
test/TensorFlowNET.UnitTest/CApiTest.cs View File

@@ -6,7 +6,7 @@ using Tensorflow;

namespace TensorFlowNET.UnitTest
{
public class CApiTest
public class CApiTest : Python
{
protected TF_Code TF_OK = TF_Code.TF_OK;
protected TF_DataType TF_FLOAT = TF_DataType.TF_FLOAT;


+ 5
- 5
test/TensorFlowNET.UnitTest/ConstantTest.cs View File

@@ -10,7 +10,7 @@ using Tensorflow;
namespace TensorFlowNET.UnitTest
{
[TestClass]
public class ConstantTest
public class ConstantTest : Python
{
Status status = new Status();

@@ -27,7 +27,7 @@ namespace TensorFlowNET.UnitTest
{
string str = "Hello, TensorFlow.NET!";
var tensor = tf.constant(str);
Python.with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
var result = sess.run(tensor);
Assert.IsTrue(result.Data<string>()[0] == str);
@@ -39,7 +39,7 @@ namespace TensorFlowNET.UnitTest
{
// small size
var tensor = tf.zeros(new Shape(3, 2), TF_DataType.TF_INT32, "small");
Python.with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
var result = sess.run(tensor);

@@ -50,7 +50,7 @@ namespace TensorFlowNET.UnitTest

// big size
tensor = tf.zeros(new Shape(200, 100), TF_DataType.TF_INT32, "big");
Python.with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
var result = sess.run(tensor);

@@ -74,7 +74,7 @@ namespace TensorFlowNET.UnitTest
});

var tensor = tf.constant(nd);
Python.with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
var result = sess.run(tensor);
var data = result.Data<int>();


+ 2
- 2
test/TensorFlowNET.UnitTest/NameScopeTest.cs View File

@@ -15,7 +15,7 @@ namespace TensorFlowNET.UnitTest
[TestMethod]
public void NestedNameScope()
{
with<ops.name_scope>(new ops.name_scope("scope1"), scope1 =>
with(new ops.name_scope("scope1"), scope1 =>
{
name = scope1;
Assert.AreEqual("scope1", g._name_stack);
@@ -24,7 +24,7 @@ namespace TensorFlowNET.UnitTest
var const1 = tf.constant(1.0);
Assert.AreEqual("scope1/Const:0", const1.name);

with<ops.name_scope>(new ops.name_scope("scope2"), scope2 =>
with(new ops.name_scope("scope2"), scope2 =>
{
name = scope2;
Assert.AreEqual("scope1/scope2", g._name_stack);


+ 2
- 2
test/TensorFlowNET.UnitTest/PlaceholderTest.cs View File

@@ -7,7 +7,7 @@ using Tensorflow;
namespace TensorFlowNET.UnitTest
{
[TestClass]
public class PlaceholderTest
public class PlaceholderTest : Python
{
[TestMethod]
public void placeholder()
@@ -15,7 +15,7 @@ namespace TensorFlowNET.UnitTest
var x = tf.placeholder(tf.int32);
var y = x * 3;

Python.with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
var result = sess.run(y,
new FeedItem(x, 2));


+ 1
- 1
test/TensorFlowNET.UnitTest/SessionTest.cs View File

@@ -82,7 +82,7 @@ namespace TensorFlowNET.UnitTest
var a = constant_op.constant(np.array(3.0).reshape(1, 1));
var b = constant_op.constant(np.array(2.0).reshape(1, 1));
var c = math_ops.matmul(a, b, name: "matmul");
Python.with(tf.Session(), delegate
with(tf.Session(), delegate
{
var result = c.eval();
Assert.AreEqual(6, result.Data<double>()[0]);


+ 3
- 3
test/TensorFlowNET.UnitTest/TrainSaverTest.cs View File

@@ -19,7 +19,7 @@ namespace TensorFlowNET.UnitTest

public void ImportGraph()
{
with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
var new_saver = tf.train.import_meta_graph("C:/tmp/my-model.meta");
});
@@ -44,7 +44,7 @@ namespace TensorFlowNET.UnitTest

public void ImportSavedModel()
{
with<Session>(Session.LoadFromSavedModel("mobilenet"), sess =>
with(Session.LoadFromSavedModel("mobilenet"), sess =>
{
});
@@ -65,7 +65,7 @@ namespace TensorFlowNET.UnitTest
// Add ops to save and restore all the variables.
var saver = tf.train.Saver();

with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
sess.run(init_op);



+ 10
- 3
test/TensorFlowNET.UnitTest/VariableTest.cs View File

@@ -32,9 +32,10 @@ namespace TensorFlowNET.UnitTest

/// <summary>
/// https://www.tensorflow.org/api_docs/python/tf/variable_scope
/// how to create a new variable
/// </summary>
[TestMethod]
public void VarCreation1()
public void VarCreation()
{
with(tf.variable_scope("foo"), delegate
{
@@ -46,6 +47,12 @@ namespace TensorFlowNET.UnitTest
});
}

[TestMethod]
public void ReenterVariableScope()
{

}

[TestMethod]
public void ScalarVar()
{
@@ -65,7 +72,7 @@ namespace TensorFlowNET.UnitTest
[TestMethod]
public void Assign1()
{
with<Graph>(tf.Graph().as_default(), graph =>
with(tf.Graph().as_default(), graph =>
{
var variable = tf.Variable(31, name: "tree");
var init = tf.global_variables_initializer();
@@ -91,7 +98,7 @@ namespace TensorFlowNET.UnitTest
// Add an op to initialize the variables.
var init_op = tf.global_variables_initializer();

with<Session>(tf.Session(), sess =>
with(tf.Session(), sess =>
{
sess.run(init_op);
// o some work with the model.


Loading…
Cancel
Save