diff --git a/src/TensorFlowNET.Core/Python.cs b/src/TensorFlowNET.Core/Python.cs
index ad934eb7..d216dae3 100644
--- a/src/TensorFlowNET.Core/Python.cs
+++ b/src/TensorFlowNET.Core/Python.cs
@@ -131,44 +131,6 @@ namespace Tensorflow
}
}
- ///
- /// Untyped implementation of zip for arbitrary data
- ///
- /// Converts an list of lists or arrays [[1,2,3], [4,5,6], [7,8,9]] into a list of arrays
- /// representing tuples of the same index of all source arrays [[1,4,7], [2,5,9], [3,6,9]]
- ///
- /// one or multiple sequences to be zipped
- ///
- public static IEnumerable zip(params object[] lists)
- {
- if (lists.Length == 0)
- yield break;
- var first = lists[0];
- if (first == null)
- yield break;
- var arity = (first as IEnumerable).OfType().Count();
- for (int i = 0; i < arity; i++)
- {
- var array= new object[lists.Length];
- for (int j = 0; j < lists.Length; j++)
- array[j] = GetSequenceElementAt(lists[j], i);
- yield return array;
- }
- }
-
- private static object GetSequenceElementAt(object sequence, int i)
- {
- switch (sequence)
- {
- case Array array:
- return array.GetValue(i);
- case IList list:
- return list[i];
- default:
- return (sequence as IEnumerable).OfType().Skip(Math.Max(0, i)).FirstOrDefault();
- }
- }
-
public static IEnumerable<(int, T)> enumerate(IList values)
{
for (int i = 0; i < values.Count; i++)
diff --git a/src/TensorFlowNET.Core/Util/nest.py.cs b/src/TensorFlowNET.Core/Util/nest.py.cs
index b61cb0e4..17b2f556 100644
--- a/src/TensorFlowNET.Core/Util/nest.py.cs
+++ b/src/TensorFlowNET.Core/Util/nest.py.cs
@@ -23,8 +23,44 @@ namespace Tensorflow.Util
public static class nest
{
- public static IEnumerable zip(params object[] structures)
- => Python.zip(structures);
+
+ ///
+ /// Untyped implementation of zip for arbitrary data
+ ///
+ /// Converts an list of lists or arrays [[1,2,3], [4,5,6], [7,8,9]] into a list of arrays
+ /// representing tuples of the same index of all source arrays [[1,4,7], [2,5,9], [3,6,9]]
+ ///
+ /// one or multiple sequences to be zipped
+ ///
+ public static IEnumerable zip_many(params IEnumerable[] lists)
+ {
+ if (lists.Length == 0)
+ yield break;
+ var first = lists[0];
+ if (first == null)
+ yield break;
+ var arity = first.Count();
+ for (int i = 0; i < arity; i++)
+ {
+ var array = new object[lists.Length];
+ for (int j = 0; j < lists.Length; j++)
+ array[j] = GetSequenceElementAt(lists[j], i);
+ yield return array;
+ }
+ }
+
+ private static object GetSequenceElementAt(object sequence, int i)
+ {
+ switch (sequence)
+ {
+ case Array array:
+ return array.GetValue(i);
+ case IList list:
+ return list[i];
+ default:
+ return _yield_value(sequence).Skip(Math.Max(0, i)).FirstOrDefault();
+ }
+ }
public static IEnumerable<(T1, T2)> zip(IEnumerable e1, IEnumerable e2)
=> Python.zip(e1, e2);
@@ -40,9 +76,9 @@ namespace Tensorflow.Util
///
/// Returns a sorted list of the dict keys, with error if keys not sortable.
///
- private static IEnumerable _sorted(IDictionary dict_)
+ private static IEnumerable _sorted(IDictionary dict_)
{
- return dict_.Keys.OfType().OrderBy(x => x);
+ return dict_.Keys.OfType().OrderBy(x => x);
}
@@ -86,7 +122,7 @@ namespace Tensorflow.Util
{
case Hashtable hash:
var result = new Hashtable();
- foreach ((object key, object value) in zip(_sorted(hash).OfType(), args))
+ foreach ((object key, object value) in zip(_sorted(hash), args))
result[key] = value;
return result;
}
@@ -370,13 +406,13 @@ namespace Tensorflow.Util
/// `flat_sequence` converted to have the same recursive structure as
/// `structure`.
///
- public static object pack_sequence_as(object structure, IEnumerable flat_sequence)
+ public static object pack_sequence_as(object structure, IEnumerable flat_sequence)
{
List flat = null;
if (flat_sequence is List)
flat = flat_sequence as List;
else
- flat=new List(flat_sequence.OfType());
+ flat=new List(flat_sequence);
if (flat_sequence==null)
throw new ArgumentException("flat_sequence must not be null");
// if not is_sequence(flat_sequence):
@@ -403,7 +439,7 @@ namespace Tensorflow.Util
var flat_structure = flatten(structure);
if (len(flat_structure) != len(flat))
{
- throw new ValueError("Could not pack sequence. Structure had %d elements, but " +
+ throw new ValueError("Could not pack sequence. Structure had {len(structure)} elements, but " +
$"flat_sequence had {len(flat_structure)} elements. flat_sequence had: {len(flat)}");
}
return _sequence_like(structure, packed);
@@ -413,7 +449,7 @@ namespace Tensorflow.Util
var flat_structure = flatten(structure);
if (len(flat_structure) != len(flat))
{
- throw new ValueError("Could not pack sequence. Structure had %d elements, but " +
+ throw new ValueError("Could not pack sequence. Structure had {len(structure)} elements, but " +
$"flat_sequence had {len(flat_structure)} elements. flat_sequence had: {len(flat)}");
}
return _sequence_like(structure, packed);
@@ -427,10 +463,8 @@ namespace Tensorflow.Util
/// `structure[i]`. All structures in `structure` must have the same arity,
/// and the return value will contain the results in the same structure.
///
- /// the type of the elements of the output structure (object if diverse)
/// A callable that accepts as many arguments as there are structures.
- /// scalar, or tuple or list of constructed scalars and/or other
- /// tuples/lists, or scalars. Note: numpy arrays are considered as scalars.
+ /// one or many IEnumerable of object
/// If set to
/// `True` (default) the types of iterables within the structures have to be
/// same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
@@ -444,23 +478,22 @@ namespace Tensorflow.Util
/// `check_types` is `False` the sequence types of the first structure will be
/// used.
///
- public static IEnumerable map_structure(Func func, object structure, params object[] more_structures)
+ public static IEnumerable map_structure(Func func, params IEnumerable[] structure)
{
// TODO: check structure and types
// for other in structure[1:]:
// assert_same_structure(structure[0], other, check_types=check_types)
- if (more_structures.Length==0)
+ if (structure.Length==1)
{
// we don't need to zip if we have only one structure
- return map_structure(a => func(new object[]{a}), structure);
+ return map_structure(a => func(new object[]{a}), structure[0]);
}
- var flat_structures = new List() { flatten(structure) };
- flat_structures.AddRange(more_structures.Select(flatten));
- var entries = zip(flat_structures);
+ var flat_structures = structure.Select(flatten).ToArray(); // ToArray is important here!
+ var entries = zip_many(flat_structures);
var mapped_flat_structure = entries.Select(func);
- return (pack_sequence_as(structure, mapped_flat_structure) as IEnumerable).OfType();
+ return _yield_value(pack_sequence_as(structure[0], mapped_flat_structure)).ToList();
}
///
@@ -469,7 +502,7 @@ namespace Tensorflow.Util
///
///
///
- public static IEnumerable map_structure(Func func, object structure)
+ public static IEnumerable map_structure(Func func, IEnumerable structure)
{
// TODO: check structure and types
// for other in structure[1:]:
@@ -478,7 +511,7 @@ namespace Tensorflow.Util
var flat_structure = flatten(structure);
var mapped_flat_structure = flat_structure.Select(func).ToList();
- return (pack_sequence_as(structure, mapped_flat_structure) as IEnumerable).OfType();
+ return _yield_value(pack_sequence_as(structure, mapped_flat_structure)).ToList();
}
//def map_structure_with_paths(func, *structure, **kwargs):
diff --git a/test/TensorFlowNET.UnitTest/PythonTest.cs b/test/TensorFlowNET.UnitTest/PythonTest.cs
index 890caa7a..5d9bb374 100644
--- a/test/TensorFlowNET.UnitTest/PythonTest.cs
+++ b/test/TensorFlowNET.UnitTest/PythonTest.cs
@@ -18,7 +18,8 @@ namespace TensorFlowNET.UnitTest
{
#region python compatibility layer
protected PythonTest self { get => this; }
- protected object None {
+ protected object None
+ {
get { return null; }
}
#endregion
@@ -43,7 +44,7 @@ namespace TensorFlowNET.UnitTest
assertItemsEqual((g[i] as NDArray).Array, (e[i] as NDArray).Array);
else if (e[i] is ICollection && g[i] is ICollection)
assertEqual(g[i], e[i]);
- else
+ else
Assert.AreEqual(e[i], g[i], $"Items differ at index {i}, expected {e[i]} but got {g[i]}");
}
}
@@ -102,28 +103,171 @@ namespace TensorFlowNET.UnitTest
{
if (tensors == null)
return null;
- //return nest.map_structure(self._eval_tensor, tensors);
+ return nest.map_structure(self._eval_tensor, tensors);
return null;
}
- //def evaluate(self, tensors) :
- // """Evaluates tensors and returns numpy values.
-
- // Args:
- // tensors: A Tensor or a nested list/tuple of Tensors.
-
- // Returns:
- // tensors numpy values.
- // """
- // if context.executing_eagerly():
- // return self._eval_helper(tensors)
- // else:
- // sess = ops.get_default_session()
- // if sess is None:
- // with self.test_session() as sess:
- // return sess.run(tensors)
- // else:
- // return sess.run(tensors)
+ protected object _eval_tensor(object tensor)
+ {
+ if (tensor == None)
+ return None;
+ //else if (callable(tensor))
+ // return self._eval_helper(tensor())
+ else
+ {
+ try
+ {
+ //TODO:
+ // if sparse_tensor.is_sparse(tensor):
+ // return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values,
+ // tensor.dense_shape)
+ //return (tensor as Tensor).numpy();
+ }
+ catch (Exception e)
+ {
+ throw new ValueError("Unsupported type: " + tensor.GetType());
+ }
+ return null;
+ }
+ }
+
+ ///
+ /// Evaluates tensors and returns numpy values.
+ /// A Tensor or a nested list/tuple of Tensors.
+ ///
+ /// tensors numpy values.
+ public object evaluate(params Tensor[] tensors)
+ {
+ // if context.executing_eagerly():
+ // return self._eval_helper(tensors)
+ // else:
+ {
+ var sess = ops.get_default_session();
+ if (sess == None)
+ with(self.session(), s => sess = s);
+ return sess.run(tensors);
+ }
+ }
+
+ //Returns a TensorFlow Session for use in executing tests.
+ public Session session(Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false)
+ {
+ //Note that this will set this session and the graph as global defaults.
+
+ //Use the `use_gpu` and `force_gpu` options to control where ops are run.If
+ //`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
+ //`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
+ //possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to
+ //the CPU.
+
+ //Example:
+ //```python
+ //class MyOperatorTest(test_util.TensorFlowTestCase):
+ // def testMyOperator(self):
+ // with self.session(use_gpu= True):
+ // valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
+ // result = MyOperator(valid_input).eval()
+ // self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
+ // invalid_input = [-1.0, 2.0, 7.0]
+ // with self.assertRaisesOpError("negative input not supported"):
+ // MyOperator(invalid_input).eval()
+ //```
+
+ //Args:
+ // graph: Optional graph to use during the returned session.
+ // config: An optional config_pb2.ConfigProto to use to configure the
+ // session.
+ // use_gpu: If True, attempt to run as many ops as possible on GPU.
+ // force_gpu: If True, pin all ops to `/device:GPU:0`.
+
+ //Yields:
+ // A Session object that should be used as a context manager to surround
+ // the graph building and execution code in a test case.
+
+ Session s = null;
+ //if (context.executing_eagerly())
+ // yield None
+ //else
+ {
+ with(self._create_session(graph, config, force_gpu), sess =>
+ {
+ with(self._constrain_devices_and_set_default(sess, use_gpu, force_gpu), (x) =>
+ {
+ s = sess;
+ });
+ });
+ }
+ return s;
+ }
+
+ private IPython _constrain_devices_and_set_default(Session sess, bool useGpu, bool forceGpu)
+ {
+ //def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
+ //"""Set the session and its graph to global default and constrain devices."""
+ //if context.executing_eagerly():
+ // yield None
+ //else:
+ // with sess.graph.as_default(), sess.as_default():
+ // if force_gpu:
+ // # Use the name of an actual device if one is detected, or
+ // # '/device:GPU:0' otherwise
+ // gpu_name = gpu_device_name()
+ // if not gpu_name:
+ // gpu_name = "/device:GPU:0"
+ // with sess.graph.device(gpu_name):
+ // yield sess
+ // elif use_gpu:
+ // yield sess
+ // else:
+ // with sess.graph.device("/device:CPU:0"):
+ // yield sess
+ return sess;
+ }
+
+ // See session() for details.
+ private Session _create_session(Graph graph, object cfg, bool forceGpu)
+ {
+ var prepare_config = new Func((config) =>
+ {
+ // """Returns a config for sessions.
+ // Args:
+ // config: An optional config_pb2.ConfigProto to use to configure the
+ // session.
+ // Returns:
+ // A config_pb2.ConfigProto object.
+
+ //TODO: config
+
+ // # use_gpu=False. Currently many tests rely on the fact that any device
+ // # will be used even when a specific device is supposed to be used.
+ // allow_soft_placement = not force_gpu
+ // if config is None:
+ // config = config_pb2.ConfigProto()
+ // config.allow_soft_placement = allow_soft_placement
+ // config.gpu_options.per_process_gpu_memory_fraction = 0.3
+ // elif not allow_soft_placement and config.allow_soft_placement:
+ // config_copy = config_pb2.ConfigProto()
+ // config_copy.CopyFrom(config)
+ // config = config_copy
+ // config.allow_soft_placement = False
+ // # Don't perform optimizations for tests so we don't inadvertently run
+ // # gpu ops on cpu
+ // config.graph_options.optimizer_options.opt_level = -1
+ // # Disable Grappler constant folding since some tests & benchmarks
+ // # use constant input and become meaningless after constant folding.
+ // # DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
+ // # GRAPPLER TEAM.
+ // config.graph_options.rewrite_options.constant_folding = (
+ // rewriter_config_pb2.RewriterConfig.OFF)
+ // config.graph_options.rewrite_options.pin_to_host_optimization = (
+ // rewriter_config_pb2.RewriterConfig.OFF)
+ return config;
+ });
+ //TODO: use this instead of normal session
+ //return new ErrorLoggingSession(graph = graph, config = prepare_config(config))
+ return new Session(graph: graph);//, config = prepare_config(config))
+ }
+
#endregion
diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs
index b6afc4a2..85908baf 100644
--- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs
+++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs
@@ -10,14 +10,14 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test
public class CondTestCases : PythonTest
{
- [Ignore("Todo")]
[TestMethod]
public void testCondTrue()
{
- //var x = constant_op.constant(2);
- //var y = constant_op.constant(5);
- // var z = control_flow_ops.cond(math_ops.less(x,y), ()=> math_ops.multiply(x, 17), ()=> math_ops.add(y, 23))
- //self.assertEquals(self.evaluate(z), 34);
+ var x = tf.constant(2);
+ var y = tf.constant(5);
+ var z = control_flow_ops.cond(tf.less(x, y), () => tf.multiply(x, tf.constant(17)),
+ () => tf.add(y, tf.constant(23)));
+ self.assertEquals(self.evaluate(z), 34);
}
[Ignore("Todo")]
diff --git a/test/TensorFlowNET.UnitTest/nest_test/NestTest.cs b/test/TensorFlowNET.UnitTest/nest_test/NestTest.cs
index 91db7bdc..eefae8a4 100644
--- a/test/TensorFlowNET.UnitTest/nest_test/NestTest.cs
+++ b/test/TensorFlowNET.UnitTest/nest_test/NestTest.cs
@@ -387,11 +387,10 @@ namespace TensorFlowNET.UnitTest.nest_test
// nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual( nest.flatten(structure1_plus1), new object[] { 2, 3, 4, 5, 6, 7 });
self.assertAllEqual(nest.flatten(structure1_strings), new object[] { "1", "2", "3", "4", "5", "6" });
- // structure1_plus_structure2 = nest.map_structure(
- // lambda x, y: x + y, structure1, structure2)
- // self.assertEqual(
- // (((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
- // structure1_plus_structure2)
+ var structure1_plus_structure2 = nest.map_structure(x => (int)(x[0]) + (int)(x[1]), structure1, structure2);
+ self.assertEqual(
+ new object[] { new object[] { new object[] { 1 + 7, 2 + 8}, 3 + 9}, 4 + 10, new object[] { 5 + 11, 6 + 12}},
+ structure1_plus_structure2);
// self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))