| @@ -1,7 +1,5 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Diagnostics; | |||
| using System.Text; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| @@ -1,7 +1,5 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using NumSharp; | |||
| using NumSharp; | |||
| using System; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| @@ -59,7 +57,7 @@ namespace Tensorflow | |||
| public Action<int> Gradient | |||
| => (iterate) => | |||
| { | |||
| for(int i = 0; i< iterate; i++) | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var w = tf.constant(3112.0f); | |||
| using var tape = tf.GradientTape(); | |||
| @@ -1,6 +1,4 @@ | |||
| using Tensorflow.Keras.Engine; | |||
| namespace Tensorflow.Keras.ArgsDefinition | |||
| namespace Tensorflow.Keras.ArgsDefinition | |||
| { | |||
| public class NodeArgs | |||
| { | |||
| @@ -1,5 +1,4 @@ | |||
| using System.Collections.Generic; | |||
| using Tensorflow.Keras.Engine; | |||
| namespace Tensorflow.Keras.ArgsDefinition | |||
| { | |||
| @@ -1,8 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow.Keras.Engine | |||
| namespace Tensorflow.Keras.Engine | |||
| { | |||
| public interface IModel | |||
| { | |||
| @@ -1,6 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using System.Collections.Generic; | |||
| namespace Tensorflow.Keras.Engine | |||
| { | |||
| @@ -1,6 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using System.Collections.Generic; | |||
| using Tensorflow.Keras.Engine; | |||
| namespace Tensorflow.Keras | |||
| @@ -57,8 +57,8 @@ namespace Tensorflow | |||
| this._loc = array_ops.identity(loc, name); | |||
| this._scale = array_ops.identity(scale, name); | |||
| base._dtype = this._scale.dtype; | |||
| // base._reparameterization_type = new ReparameterizationType("FULLY_REPARAMETERIZED"); | |||
| base._validate_args = validate_args; | |||
| // base._reparameterization_type = new ReparameterizationType("FULLY_REPARAMETERIZED"); | |||
| base._validate_args = validate_args; | |||
| base._allow_nan_stats = allow_nan_stats; | |||
| base._parameters = parameters; | |||
| base._graph_parents = new List<Tensor>(new Tensor[] { this._loc, this._scale }); | |||
| @@ -13,9 +13,9 @@ | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using static Tensorflow.Binding; | |||
| using Tensorflow.Keras.Engine; | |||
| using System; | |||
| using Tensorflow.Keras.Engine; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -242,9 +242,9 @@ namespace Tensorflow | |||
| image = ops.convert_to_tensor(image, name: "image"); | |||
| image = _AssertAtLeast3DImage(image); | |||
| // can't get k to convert to tensor without throwing error about it being an int--- | |||
| // might rework later. for now, k2 == k as Tensor | |||
| Tensor k2 = ops.convert_to_tensor(k, dtype: dtypes.int32, name: "k"); | |||
| // can't get k to convert to tensor without throwing error about it being an int--- | |||
| // might rework later. for now, k2 == k as Tensor | |||
| Tensor k2 = ops.convert_to_tensor(k, dtype: dtypes.int32, name: "k"); | |||
| k2.TensorShape.assert_has_rank(0); | |||
| k2 = gen_ops.mod(k2, tf.constant(4)); | |||
| @@ -465,8 +465,8 @@ or rank = 4. Had rank = {0}", rank)); | |||
| var assert_ops = _CheckAtLeast3DImage(image, require_static: false); | |||
| // batch: [0], height: [1], width: [2], depth: [3] | |||
| int[] bhwd = _ImageDimensions(image, rank: 4); | |||
| // batch: [0], height: [1], width: [2], depth: [3] | |||
| int[] bhwd = _ImageDimensions(image, rank: 4); | |||
| var after_padding_width = target_width - offset_width - bhwd[2]; | |||
| @@ -544,8 +544,8 @@ or rank = 4. Had rank = {0}", rank)); | |||
| var assert_ops = _CheckAtLeast3DImage(image, require_static: false); | |||
| // batch: [0], height: [1], width: [2], depth: [3] | |||
| int[] bhwd = _ImageDimensions(image, rank: 4); | |||
| // batch: [0], height: [1], width: [2], depth: [3] | |||
| int[] bhwd = _ImageDimensions(image, rank: 4); | |||
| assert_ops[assert_ops.Length] = _assert(check_ops.assert_greater_equal(tf.constant(offset_height), | |||
| tf.constant(0)), typeof(ValueError), | |||
| @@ -976,7 +976,7 @@ new_height, new_width"); | |||
| image = image - image_mean; | |||
| image = tf.div(image, adjusted_stddev, name: scope); // name: scope in python version | |||
| return convert_image_dtype(image, orig_dtype, saturate: true); | |||
| return convert_image_dtype(image, orig_dtype, saturate: true); | |||
| }); | |||
| } | |||
| @@ -1177,9 +1177,9 @@ new_height, new_width"); | |||
| image = ops.convert_to_tensor(image, name: "image"); | |||
| var channels = image.TensorShape.as_list()[image.TensorShape.dims.Length - 1]; | |||
| var orig_dtype = image.dtype; | |||
| // python code checks to ensure jpeq_quality is a tensor; unnecessary here since | |||
| // it is passed as a tensor | |||
| image = gen_ops.encode_jpeg_variable_quality(image, quality: jpeg_quality); | |||
| // python code checks to ensure jpeq_quality is a tensor; unnecessary here since | |||
| // it is passed as a tensor | |||
| image = gen_ops.encode_jpeg_variable_quality(image, quality: jpeg_quality); | |||
| image = gen_ops.decode_jpeg(image, channels: channels); | |||
| return convert_image_dtype(image, orig_dtype, saturate: true); | |||
| @@ -1587,8 +1587,8 @@ new_height, new_width"); | |||
| { | |||
| if (k > 0) | |||
| { | |||
| // handle flat_imgs | |||
| Tensor[] flat_imgs = new Tensor[] { }; | |||
| // handle flat_imgs | |||
| Tensor[] flat_imgs = new Tensor[] { }; | |||
| foreach ((Tensor x, Tensor t) in imgs.Zip(tails, Tuple.Create)) | |||
| { | |||
| flat_imgs[flat_imgs.Length] = array_ops.reshape(x, array_ops.concat(new Tensor[] { constant_op.constant(-1), t }, 0)); | |||
| @@ -1602,42 +1602,42 @@ new_height, new_width"); | |||
| true_fn: () => padded_func_pass(), | |||
| false_fn: () => flat_imgs); | |||
| // handle downscaled | |||
| Tensor[] downscaled = new Tensor[] { }; | |||
| // handle downscaled | |||
| Tensor[] downscaled = new Tensor[] { }; | |||
| foreach (Tensor x in padded) | |||
| { | |||
| downscaled[downscaled.Length] = gen_ops.avg_pool(x, ksize: divisor, strides: divisor, padding: "VALID"); | |||
| } | |||
| // handle tails | |||
| tails = new Tensor[] { }; | |||
| // handle tails | |||
| tails = new Tensor[] { }; | |||
| foreach (Tensor x in gen_array_ops.shape_n(downscaled)) | |||
| { | |||
| tails[tails.Length] = new Tensor(x.dims.Skip(1).Take(tails.Length - 1).ToArray()); | |||
| } | |||
| imgs = new Tensor[] { }; | |||
| // tuples weren't working; this is hacky, but should work similarly. | |||
| // zip loads the values into a tuple (Tensor, Tensor, Tensor) for each | |||
| // zip entry; this just gets the length of the longest array, and loops | |||
| // that many times, getting values (like zip) and using them similarly. | |||
| for (int x = 0; x < Math.Max(Math.Max(downscaled.Length, heads.Length), tails.Length); x++) | |||
| // tuples weren't working; this is hacky, but should work similarly. | |||
| // zip loads the values into a tuple (Tensor, Tensor, Tensor) for each | |||
| // zip entry; this just gets the length of the longest array, and loops | |||
| // that many times, getting values (like zip) and using them similarly. | |||
| for (int x = 0; x < Math.Max(Math.Max(downscaled.Length, heads.Length), tails.Length); x++) | |||
| { | |||
| imgs[imgs.Length] = array_ops.reshape(downscaled[x], array_ops.concat(new Tensor[] { heads[x], tails[x] }, 0)); | |||
| } | |||
| } | |||
| } | |||
| // python code uses * to unpack imgs; how to replicate that here? | |||
| // don't think that this is doing the same thing as the python code. | |||
| (ssim_per_channel, cs) = _ssim_per_channel( | |||
| img1: imgs[0], | |||
| img2: imgs[1], | |||
| max_val: max_val, | |||
| filter_size: filter_size, | |||
| filter_sigma: filter_sigma, | |||
| k1: k1, | |||
| k2: k2); | |||
| // python code uses * to unpack imgs; how to replicate that here? | |||
| // don't think that this is doing the same thing as the python code. | |||
| (ssim_per_channel, cs) = _ssim_per_channel( | |||
| img1: imgs[0], | |||
| img2: imgs[1], | |||
| max_val: max_val, | |||
| filter_size: filter_size, | |||
| filter_sigma: filter_sigma, | |||
| k1: k1, | |||
| k2: k2); | |||
| mcs.append(gen_nn_ops.relu(cs)); | |||
| } | |||
| @@ -136,8 +136,8 @@ namespace Tensorflow | |||
| return tf_with(ops.name_scope(name, "saturate_cast", new[] { value }), name => | |||
| { | |||
| value = ops.convert_to_tensor(value, name: "value"); | |||
| // dtype = dtypes.as_dtype(dtype).as_base_dtype(); | |||
| if (value.dtype.min() < dtype.min()) | |||
| // dtype = dtypes.as_dtype(dtype).as_base_dtype(); | |||
| if (value.dtype.min() < dtype.min()) | |||
| value = gen_math_ops.maximum( | |||
| value, | |||
| ops.convert_to_tensor(dtype.min(), dtype: value.dtype, name: "min")); | |||
| @@ -264,8 +264,8 @@ namespace Tensorflow | |||
| { | |||
| return tf_with(ops.name_scope(name, "Real", new[] { input }), scope => | |||
| { | |||
| // name = scope; | |||
| input = ops.convert_to_tensor(input, name: "input"); | |||
| // name = scope; | |||
| input = ops.convert_to_tensor(input, name: "input"); | |||
| if (input.dtype.is_complex()) | |||
| { | |||
| var real_dtype = input.dtype.real_dtype(); | |||
| @@ -192,19 +192,19 @@ namespace Tensorflow | |||
| { | |||
| tf_with(ops.control_dependencies(new object[] { _finish(update_ops.ToArray(), "update") }), dep => | |||
| { | |||
| // ops.colocate_with(global_step); | |||
| // TODO: port this if branch once ResourceVariable has been ported! | |||
| //if (global_step is ResourceVariable) | |||
| //{ | |||
| // # TODO(apassos): the implicit read in assign_add is slow; consider | |||
| // # making it less so. | |||
| // apply_updates = resource_variable_ops.assign_add_variable_op( | |||
| // global_step.handle, | |||
| // ops.convert_to_tensor(1, dtype = global_step.dtype), | |||
| // name = name) | |||
| //} | |||
| //else | |||
| { | |||
| // ops.colocate_with(global_step); | |||
| // TODO: port this if branch once ResourceVariable has been ported! | |||
| //if (global_step is ResourceVariable) | |||
| //{ | |||
| // # TODO(apassos): the implicit read in assign_add is slow; consider | |||
| // # making it less so. | |||
| // apply_updates = resource_variable_ops.assign_add_variable_op( | |||
| // global_step.handle, | |||
| // ops.convert_to_tensor(1, dtype = global_step.dtype), | |||
| // name = name) | |||
| //} | |||
| //else | |||
| { | |||
| apply_updates = state_ops.assign_add(global_step, | |||
| ops.convert_to_tensor(1, dtype: global_step.dtype), | |||
| name: name); | |||
| @@ -2,7 +2,6 @@ | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using Tensorflow.Keras.Metrics; | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.KerasExt; | |||
| namespace Tensorflow.Keras.Engine | |||
| @@ -1,6 +1,5 @@ | |||
| using System.Collections.Generic; | |||
| using Tensorflow.Keras.Metrics; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Keras.Engine | |||
| { | |||
| @@ -19,7 +18,7 @@ namespace Tensorflow.Keras.Engine | |||
| _metrics.add(compiled_metrics.metrics); | |||
| } | |||
| foreach(var layer in _flatten_layers()) | |||
| foreach (var layer in _flatten_layers()) | |||
| { | |||
| // _metrics.extend(layer.metrics); | |||
| } | |||
| @@ -36,7 +36,7 @@ namespace Tensorflow.Keras.Engine | |||
| // such as loss scaling and gradient clipping. | |||
| _minimize(tape, optimizer, loss, trainable_variables); | |||
| compiled_metrics.update_state(y, y_pred); | |||
| return metrics.Select(x => (x.Name, x.result())).ToList(); | |||
| } | |||
| @@ -17,7 +17,6 @@ | |||
| using System.Collections.Generic; | |||
| using Tensorflow.Keras.ArgsDefinition; | |||
| using Tensorflow.Keras.Layers; | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.KerasExt; | |||
| namespace Tensorflow.Keras.Engine | |||
| @@ -1,6 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using System.Collections.Generic; | |||
| using Tensorflow.Keras.ArgsDefinition; | |||
| using Tensorflow.Keras.Datasets; | |||
| using Tensorflow.Keras.Engine; | |||
| @@ -1,7 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow.Keras; | |||
| using Tensorflow.Keras; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -1,6 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow.Keras.Metrics | |||
| { | |||
| @@ -8,7 +6,7 @@ namespace Tensorflow.Keras.Metrics | |||
| { | |||
| string name; | |||
| Func<Tensor, Tensor, Tensor> _fn = null; | |||
| public MeanMetricWrapper(Func<Tensor, Tensor, Tensor> fn, string name, TF_DataType dtype = TF_DataType.TF_FLOAT) | |||
| : base(name: name, dtype: dtype) | |||
| { | |||
| @@ -1,8 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow.Keras.Metrics | |||
| namespace Tensorflow.Keras.Metrics | |||
| { | |||
| public class MetricsApi | |||
| { | |||
| @@ -17,7 +13,7 @@ namespace Tensorflow.Keras.Metrics | |||
| var y_pred_rank = y_pred.TensorShape.ndim; | |||
| var y_true_rank = y_true.TensorShape.ndim; | |||
| // If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) | |||
| if (y_true_rank != -1 && y_pred_rank != -1 | |||
| if (y_true_rank != -1 && y_pred_rank != -1 | |||
| && y_true.shape.Length == y_pred.shape.Length) | |||
| y_true = array_ops.squeeze(y_true, axis: new[] { -1 }); | |||
| y_pred = math_ops.argmax(y_pred, -1); | |||
| @@ -54,7 +54,7 @@ namespace Tensorflow.Keras.Optimizers | |||
| Name = name | |||
| }); | |||
| public SGD SGD(float learning_rate) | |||
| public SGD SGD(float learning_rate) | |||
| => new SGD(learning_rate); | |||
| } | |||
| } | |||
| @@ -1,5 +1,4 @@ | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.KerasExt; | |||
| using static Tensorflow.KerasExt; | |||
| namespace Tensorflow.Keras | |||
| { | |||
| @@ -6,6 +6,25 @@ | |||
| <LangVersion>8.0</LangVersion> | |||
| <RootNamespace>Tensorflow.Keras</RootNamespace> | |||
| <Platforms>AnyCPU;x64</Platforms> | |||
| <Version>0.1.0</Version> | |||
| <Authors>Haiping Chen</Authors> | |||
| <Product>Keras for .NET</Product> | |||
| <Copyright>Apache 2.0, Haiping Chen 2020</Copyright> | |||
| <PackageId>TensorFlow.Keras</PackageId> | |||
| <PackageProjectUrl>https://github.com/SciSharp/TensorFlow.NET</PackageProjectUrl> | |||
| <PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | |||
| <RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||
| <PackageReleaseNotes>Keras for .NET is a C# version of Keras migrated from the python version.</PackageReleaseNotes> | |||
| <Description>Keras for .NET developers. | |||
| Keras is an API designed for human beings, not machines. Keras follows best practices for reducing cognitive load: it offers consistent & simple APIs, it minimizes the number of user actions required for common use cases, and it provides clear & actionable error messages.</Description> | |||
| <Company>SciSharp STACK</Company> | |||
| <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | |||
| <PackageTags>tensorflow, keras, deep learning, machine learning</PackageTags> | |||
| </PropertyGroup> | |||
| <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'"> | |||
| <DefineConstants>DEBUG;TRACE</DefineConstants> | |||
| </PropertyGroup> | |||
| <ItemGroup> | |||
| @@ -64,7 +64,7 @@ namespace Tensorflow.Keras | |||
| Trainable = trainable, | |||
| Name = name | |||
| }); | |||
| return layer.Apply(inputs); | |||
| } | |||
| @@ -14,8 +14,6 @@ | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using Tensorflow.Keras.Optimizers; | |||
| namespace Tensorflow.Keras | |||
| { | |||
| public class tensorflow_backup | |||
| @@ -1,7 +1,7 @@ | |||
| using System; | |||
| using System.Reflection; | |||
| using BenchmarkDotNet.Configs; | |||
| using BenchmarkDotNet.Configs; | |||
| using BenchmarkDotNet.Running; | |||
| using System; | |||
| using System.Reflection; | |||
| namespace TensorFlowBenchmark | |||
| { | |||
| @@ -1,9 +1,6 @@ | |||
| using System; | |||
| using BenchmarkDotNet.Attributes; | |||
| using NumSharp; | |||
| using BenchmarkDotNet.Attributes; | |||
| using Tensorflow; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowBenchmark | |||
| { | |||
| @@ -1,11 +1,7 @@ | |||
| using System; | |||
| using BenchmarkDotNet.Attributes; | |||
| using System; | |||
| using System.Runtime.CompilerServices; | |||
| using System.Runtime.InteropServices; | |||
| using BenchmarkDotNet.Attributes; | |||
| using Google.Protobuf.WellKnownTypes; | |||
| using NumSharp; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowBenchmark.Unmanaged | |||
| { | |||
| @@ -57,7 +53,7 @@ namespace TensorFlowBenchmark.Unmanaged | |||
| UnmanagedStruct _; | |||
| for (int i = 0; i < 10000; i++) | |||
| { | |||
| _ = *(UnmanagedStruct*) dptr; | |||
| _ = *(UnmanagedStruct*)dptr; | |||
| } | |||
| } | |||
| @@ -1,8 +1,5 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -20,7 +17,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| var enqueue = queue.enqueue(numbers); | |||
| var dequeue_many = queue.dequeue_many(n: 3); | |||
| using(var sess = tf.Session()) | |||
| using (var sess = tf.Session()) | |||
| { | |||
| sess.run(enqueue, (numbers, new[] { 1 })); | |||
| sess.run(enqueue, (numbers, new[] { 2, 3 })); | |||
| @@ -106,7 +103,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| { | |||
| init.run(); | |||
| foreach(var i in range(9)) | |||
| foreach (var i in range(9)) | |||
| results += (int)sess.run(x) + "."; | |||
| // output in random order | |||
| @@ -1,13 +1,9 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Reflection; | |||
| using System.Runtime.CompilerServices; | |||
| using System.Text; | |||
| using FluentAssertions; | |||
| using Google.Protobuf; | |||
| using NumSharp.Backends; | |||
| using Tensorflow; | |||
| using Tensorflow.Util; | |||
| using static Tensorflow.Binding; | |||
| @@ -46,14 +42,14 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| inputs.Add(feed, new Tensor(3)); | |||
| csession.SetInputs(inputs); | |||
| var outputs = new TF_Output[] {new TF_Output(add, 0)}; | |||
| var outputs = new TF_Output[] { new TF_Output(add, 0) }; | |||
| csession.SetOutputs(outputs); | |||
| csession.Run(s); | |||
| Tensor outTensor = csession.output_tensor(0); | |||
| EXPECT_EQ(TF_DataType.TF_INT32, outTensor.dtype); | |||
| EXPECT_EQ(0, outTensor.NDims); | |||
| ASSERT_EQ((ulong) sizeof(uint), outTensor.bytesize); | |||
| ASSERT_EQ((ulong)sizeof(uint), outTensor.bytesize); | |||
| var output_contents = outTensor.ToArray<int>(); | |||
| EXPECT_EQ(3 + 2, output_contents[0]); | |||
| @@ -65,7 +61,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| inputs = new Dictionary<Operation, Tensor>(); | |||
| inputs.Add(feed, new Tensor(7)); | |||
| csession.SetInputs(inputs); | |||
| outputs = new TF_Output[] {new TF_Output(neg, 0)}; | |||
| outputs = new TF_Output[] { new TF_Output(neg, 0) }; | |||
| csession.SetOutputs(outputs); | |||
| csession.Run(s); | |||
| ASSERT_EQ(TF_Code.TF_OK, s.Code); | |||
| @@ -74,7 +70,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| ASSERT_TRUE(outTensor != IntPtr.Zero); | |||
| EXPECT_EQ(TF_DataType.TF_INT32, outTensor.dtype); | |||
| EXPECT_EQ(0, outTensor.NDims); // scalar | |||
| ASSERT_EQ((ulong) sizeof(uint), outTensor.bytesize); | |||
| ASSERT_EQ((ulong)sizeof(uint), outTensor.bytesize); | |||
| output_contents = outTensor.ToArray<int>(); | |||
| EXPECT_EQ(-(7 + 2), output_contents[0]); | |||
| @@ -132,7 +128,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| } | |||
| } | |||
| } | |||
| [TestMethod] | |||
| public void Autocast_Case0() | |||
| { | |||
| @@ -142,14 +138,14 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| var ret = sess.run(operation); | |||
| ret.Should().BeNull(); | |||
| } | |||
| } | |||
| [TestMethod] | |||
| public void Autocast_Case1() | |||
| { | |||
| var sess = tf.Session().as_default(); | |||
| var input = tf.placeholder(tf.float32, shape: new TensorShape(6)); | |||
| var op = tf.reshape(input, new int[] {2, 3}); | |||
| var op = tf.reshape(input, new int[] { 2, 3 }); | |||
| sess.run(tf.global_variables_initializer()); | |||
| var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6))); | |||
| @@ -163,7 +159,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| { | |||
| var sess = tf.Session().as_default(); | |||
| var input = tf.placeholder(tf.float64, shape: new TensorShape(6)); | |||
| var op = tf.reshape(input, new int[] {2, 3}); | |||
| var op = tf.reshape(input, new int[] { 2, 3 }); | |||
| sess.run(tf.global_variables_initializer()); | |||
| var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6).astype(NPTypeCode.Single) + 0.1f)); | |||
| @@ -177,7 +173,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| { | |||
| var sess = tf.Session().as_default(); | |||
| var input = tf.placeholder(tf.int64, shape: new TensorShape(6)); | |||
| var op = tf.reshape(input, new int[] {2, 3}); | |||
| var op = tf.reshape(input, new int[] { 2, 3 }); | |||
| sess.run(tf.global_variables_initializer()); | |||
| var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6).astype(NPTypeCode.Single) + 0.1f)); | |||
| @@ -191,7 +187,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| { | |||
| var sess = tf.Session().as_default(); | |||
| var input = tf.placeholder(tf.byte8, shape: new TensorShape(6)); | |||
| var op = tf.reshape(input, new int[] {2, 3}); | |||
| var op = tf.reshape(input, new int[] { 2, 3 }); | |||
| sess.run(tf.global_variables_initializer()); | |||
| var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6).astype(NPTypeCode.Single) + 0.1f)); | |||
| @@ -1,6 +1,6 @@ | |||
| using System; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -14,7 +14,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| { | |||
| int a = 2; | |||
| int b = 3; | |||
| var dims = new [] { Unknown, a, b}; | |||
| var dims = new[] { Unknown, a, b }; | |||
| new TensorShape(dims).GetPrivate<Shape>("shape").Should().BeShaped(-1, 2, 3); | |||
| } | |||
| @@ -23,8 +23,8 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| { | |||
| int a = 2; | |||
| int b = 3; | |||
| var dims = new[] { Unknown, a, b}; | |||
| new TensorShape(new [] {dims}).GetPrivate<Shape>("shape").Should().BeShaped(-1, 2, 3); | |||
| var dims = new[] { Unknown, a, b }; | |||
| new TensorShape(new[] { dims }).GetPrivate<Shape>("shape").Should().BeShaped(-1, 2, 3); | |||
| } | |||
| [TestMethod] | |||
| @@ -32,8 +32,8 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| { | |||
| int a = 2; | |||
| int b = Unknown; | |||
| var dims = new [] { Unknown, a, b}; | |||
| new TensorShape(new [] {dims}).GetPrivate<Shape>("shape").Should().BeShaped(-1, 2, -1); | |||
| var dims = new[] { Unknown, a, b }; | |||
| new TensorShape(new[] { dims }).GetPrivate<Shape>("shape").Should().BeShaped(-1, 2, -1); | |||
| } | |||
| [TestMethod] | |||
| @@ -1,13 +1,11 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Linq; | |||
| using System.Runtime.InteropServices; | |||
| using System.Threading; | |||
| using FluentAssertions; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| using Tensorflow.Framework; | |||
| namespace TensorFlowNET.UnitTest.NativeAPI | |||
| { | |||
| @@ -21,19 +19,19 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| var span = new Span<float>(array, 100, 500); | |||
| fixed (float* ptr = &MemoryMarshal.GetReference(span)) | |||
| { | |||
| using (var t = new Tensor((IntPtr) ptr, new long[] {span.Length}, tf.float32, 4 * span.Length)) | |||
| using (var t = new Tensor((IntPtr)ptr, new long[] { span.Length }, tf.float32, 4 * span.Length)) | |||
| { | |||
| Assert.IsFalse(t.IsDisposed); | |||
| Assert.AreEqual(2000, (int) t.bytesize); | |||
| Assert.AreEqual(2000, (int)t.bytesize); | |||
| } | |||
| } | |||
| fixed (float* ptr = &array[0]) | |||
| { | |||
| using (var t = new Tensor((IntPtr) ptr, new long[] {array.Length}, tf.float32, 4 * array.Length)) | |||
| using (var t = new Tensor((IntPtr)ptr, new long[] { array.Length }, tf.float32, 4 * array.Length)) | |||
| { | |||
| Assert.IsFalse(t.IsDisposed); | |||
| Assert.AreEqual(4000, (int) t.bytesize); | |||
| Assert.AreEqual(4000, (int)t.bytesize); | |||
| } | |||
| } | |||
| } | |||
| @@ -42,22 +40,22 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| public unsafe void TensorFromArray() | |||
| { | |||
| var array = new float[1000]; | |||
| using (var t = new Tensor(array, new long[] {array.Length}, tf.float32)) | |||
| using (var t = new Tensor(array, new long[] { array.Length }, tf.float32)) | |||
| { | |||
| Assert.IsFalse(t.IsDisposed); | |||
| Assert.AreEqual(1000 * sizeof(float), (int) t.bytesize); | |||
| Assert.AreEqual(1000 * sizeof(float), (int)t.bytesize); | |||
| } | |||
| using (var t = new Tensor(new float[] {1}, new long[] {1}, tf.float32)) | |||
| using (var t = new Tensor(new float[] { 1 }, new long[] { 1 }, tf.float32)) | |||
| { | |||
| Assert.IsFalse(t.IsDisposed); | |||
| Assert.AreEqual(1 * sizeof(float), (int) t.bytesize); | |||
| Assert.AreEqual(1 * sizeof(float), (int)t.bytesize); | |||
| } | |||
| using (var t = new Tensor(new float[] {1}, null, tf.float32)) | |||
| using (var t = new Tensor(new float[] { 1 }, null, tf.float32)) | |||
| { | |||
| Assert.IsFalse(t.IsDisposed); | |||
| Assert.AreEqual(1 * sizeof(float), (int) t.bytesize); | |||
| Assert.AreEqual(1 * sizeof(float), (int)t.bytesize); | |||
| t.shape.Should().BeEmpty(); | |||
| } | |||
| } | |||
| @@ -66,11 +64,11 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| public void AllocateTensor() | |||
| { | |||
| ulong num_bytes = 6 * sizeof(float); | |||
| long[] dims = {2, 3}; | |||
| long[] dims = { 2, 3 }; | |||
| Tensor t = c_api.TF_AllocateTensor(TF_DataType.TF_FLOAT, dims, 2, num_bytes); | |||
| EXPECT_EQ(TF_DataType.TF_FLOAT, t.dtype); | |||
| EXPECT_EQ(2, t.NDims); | |||
| EXPECT_EQ((int) dims[0], t.shape[0]); | |||
| EXPECT_EQ((int)dims[0], t.shape[0]); | |||
| EXPECT_EQ(num_bytes, t.bytesize); | |||
| t.Dispose(); | |||
| } | |||
| @@ -104,10 +102,10 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| EXPECT_EQ(tensor.dtype, TF_DataType.TF_FLOAT); | |||
| EXPECT_EQ(tensor.rank, nd.ndim); | |||
| EXPECT_EQ((int) tensor.shape[0], nd.shape[0]); | |||
| EXPECT_EQ((int) tensor.shape[1], nd.shape[1]); | |||
| EXPECT_EQ(tensor.bytesize, (ulong) nd.size * sizeof(float)); | |||
| Assert.IsTrue(Enumerable.SequenceEqual(nd.Data<float>(), new float[] {1, 2, 3, 4, 5, 6})); | |||
| EXPECT_EQ((int)tensor.shape[0], nd.shape[0]); | |||
| EXPECT_EQ((int)tensor.shape[1], nd.shape[1]); | |||
| EXPECT_EQ(tensor.bytesize, (ulong)nd.size * sizeof(float)); | |||
| Assert.IsTrue(Enumerable.SequenceEqual(nd.Data<float>(), new float[] { 1, 2, 3, 4, 5, 6 })); | |||
| } | |||
| /// <summary> | |||
| @@ -136,7 +134,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| EXPECT_EQ(-1, num_dims); | |||
| // Set the shape to be 2 x Unknown | |||
| long[] dims = {2, -1}; | |||
| long[] dims = { 2, -1 }; | |||
| c_api.TF_GraphSetTensorShape(graph, feed_out_0, dims, dims.Length, s.Handle); | |||
| Assert.IsTrue(s.Code == TF_Code.TF_OK); | |||
| num_dims = c_api.TF_GraphGetTensorNumDims(graph, feed_out_0, s.Handle); | |||
| @@ -165,8 +163,8 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| c_api.TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s.Handle); | |||
| Assert.IsTrue(s.Code == TF_Code.TF_OK); | |||
| EXPECT_EQ(2, num_dims); | |||
| EXPECT_EQ(2, (int) returned_dims[0]); | |||
| EXPECT_EQ(3, (int) returned_dims[1]); | |||
| EXPECT_EQ(2, (int)returned_dims[0]); | |||
| EXPECT_EQ(3, (int)returned_dims[1]); | |||
| // Try to set 'unknown' with same rank on the shape and see that | |||
| // it doesn't change. | |||
| @@ -177,8 +175,8 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| c_api.TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s.Handle); | |||
| Assert.IsTrue(s.Code == TF_Code.TF_OK); | |||
| EXPECT_EQ(2, num_dims); | |||
| EXPECT_EQ(2, (int) returned_dims[0]); | |||
| EXPECT_EQ(3, (int) returned_dims[1]); | |||
| EXPECT_EQ(2, (int)returned_dims[0]); | |||
| EXPECT_EQ(3, (int)returned_dims[1]); | |||
| // Try to fetch a shape with the wrong num_dims | |||
| c_api.TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, 5, s.Handle); | |||
| @@ -208,7 +206,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| public void sparse_to_dense() | |||
| { | |||
| var indices = tf.reshape(tf.range(0, 5), new int[] { 5, 1 }); | |||
| var labels = tf.expand_dims(tf.constant(new[] { 0, 1, 2, 3, 4 }),1); | |||
| var labels = tf.expand_dims(tf.constant(new[] { 0, 1, 2, 3, 4 }), 1); | |||
| var st = tf.concat(values: new[] { indices, labels }, axis: 1); | |||
| var onehot = tf.sparse_to_dense(st, (5, 5), 1); | |||
| using (var sess = tf.Session()) | |||
| @@ -44,7 +44,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| { | |||
| tf_with(Session.LoadFromSavedModel("mobilenet"), sess => | |||
| { | |||
| }); | |||
| } | |||
| @@ -1,8 +1,5 @@ | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System.Linq; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowNET.UnitTest.Basics | |||
| @@ -1,5 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowNET.UnitTest.Basics | |||
| @@ -1,11 +1,6 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using Tensorflow.Keras; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowNET.UnitTest.Dataset | |||
| @@ -20,7 +15,7 @@ namespace TensorFlowNET.UnitTest.Dataset | |||
| long value = 0; | |||
| var dataset = tf.data.Dataset.range(3); | |||
| foreach(var (step, item) in enumerate(dataset)) | |||
| foreach (var (step, item) in enumerate(dataset)) | |||
| { | |||
| Assert.AreEqual(iStep, step); | |||
| iStep++; | |||
| @@ -114,7 +109,7 @@ namespace TensorFlowNET.UnitTest.Dataset | |||
| foreach (var item in dataset) | |||
| { | |||
| Assert.AreEqual(value, (long)item.Item1); | |||
| value ++; | |||
| value++; | |||
| } | |||
| } | |||
| @@ -1,18 +1,10 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using Tensorflow.Keras.Layers; | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.KerasExt; | |||
| namespace TensorFlowNET.UnitTest | |||
| { | |||
| public class EagerModeTestBase : PythonTest | |||
| { | |||
| protected LayersApi layers = keras.layers; | |||
| [TestInitialize] | |||
| public void TestInit() | |||
| { | |||
| @@ -1,15 +1,9 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Diagnostics; | |||
| using System.IO; | |||
| using System.Linq; | |||
| using System.Runtime.InteropServices; | |||
| using System.Threading; | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using Tensorflow; | |||
| using Tensorflow.Util; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowNET.UnitTest.NativeAPI | |||
| @@ -63,11 +57,13 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| try | |||
| { | |||
| ret = fnc(); | |||
| } catch (Exception ee) | |||
| } | |||
| catch (Exception ee) | |||
| { | |||
| e = ee; | |||
| throw; | |||
| } finally | |||
| } | |||
| finally | |||
| { | |||
| mrh.Set(); | |||
| } | |||
| @@ -90,11 +86,13 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| try | |||
| { | |||
| fnc(); | |||
| } catch (Exception ee) | |||
| } | |||
| catch (Exception ee) | |||
| { | |||
| e = ee; | |||
| throw; | |||
| } finally | |||
| } | |||
| finally | |||
| { | |||
| mrh.Set(); | |||
| } | |||
| @@ -1,8 +1,8 @@ | |||
| using System; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,7 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using TensorFlowNET.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,12 +1,7 @@ | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.IO; | |||
| using System.Linq; | |||
| using System.Reflection; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -51,11 +46,11 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| image = image[tf.newaxis, tf.ellipsis, tf.newaxis]; | |||
| image = tf.image.resize(image, (3, 5)); | |||
| image = image[0, tf.ellipsis, 0]; | |||
| Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0.6666667f, 0.3333333f, 0, 0, 0 }, | |||
| Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0.6666667f, 0.3333333f, 0, 0, 0 }, | |||
| image[0].ToArray<float>())); | |||
| Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0, 0, 1, 0, 0 }, | |||
| Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0, 0, 1, 0, 0 }, | |||
| image[1].ToArray<float>())); | |||
| Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0, 0, 0, 0.3333335f, 0.6666665f }, | |||
| Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0, 0, 0, 0.3333335f, 0.6666665f }, | |||
| image[2].ToArray<float>())); | |||
| tf.compat.v1.disable_eager_execution(); | |||
| } | |||
| @@ -1,7 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using NumSharp; | |||
| using static Tensorflow.KerasExt; | |||
| @@ -23,6 +20,8 @@ namespace TensorFlowNET.UnitTest.Keras | |||
| [TestMethod] | |||
| public void Functional() | |||
| { | |||
| var layers = keras.layers; | |||
| var inputs = keras.Input(shape: 784); | |||
| Assert.AreEqual((None, 784), inputs.TensorShape); | |||
| @@ -1,7 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow.Keras.Engine; | |||
| using static Tensorflow.KerasExt; | |||
| @@ -1,8 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -36,7 +32,7 @@ namespace TensorFlowNET.UnitTest.nn_test | |||
| public void TanH() | |||
| { | |||
| var b = tf.nn.tanh(a, name: "TanH"); | |||
| var expected = new float[] { 0.7615942f, -0.46211717f, 0.9977749f , -0.970452f, 0f, -0.99999547f }; | |||
| var expected = new float[] { 0.7615942f, -0.46211717f, 0.9977749f, -0.970452f, 0f, -0.99999547f }; | |||
| var actual = b.ToArray<float>(); | |||
| Assert.IsTrue(Equal(expected, actual)); | |||
| } | |||
| @@ -2,8 +2,6 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using System.Linq; | |||
| using System.Runtime.InteropServices; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,10 +1,5 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq.Expressions; | |||
| using System.Runtime.CompilerServices; | |||
| using System.Security.Cryptography.X509Certificates; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,10 +1,6 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using Tensorflow.Graphs; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowNET.UnitTest.ManagedAPI | |||
| @@ -1,8 +1,4 @@ | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System.Linq; | |||
| using Tensorflow; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowNET.UnitTest.ManagedAPI | |||
| @@ -1,7 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowNET.UnitTest.ManagedAPI | |||
| @@ -1,8 +1,5 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,7 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,6 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace TensorFlowNET.UnitTest | |||
| { | |||
| @@ -1,8 +1,6 @@ | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System.Linq; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| namespace TensorFlowNET.UnitTest.ManagedAPI | |||
| @@ -26,17 +24,17 @@ namespace TensorFlowNET.UnitTest.ManagedAPI | |||
| [TestMethod] | |||
| public void InitTensorTest() | |||
| { | |||
| var a = tf.constant(np.array(new[, ,] | |||
| { | |||
| { { 1 }, { 2 }, { 3 } }, | |||
| { { 4 }, { 5 }, { 6 } } | |||
| var a = tf.constant(np.array(new[, ,] | |||
| { | |||
| { { 1 }, { 2 }, { 3 } }, | |||
| { { 4 }, { 5 }, { 6 } } | |||
| })); | |||
| Assert.IsTrue(Enumerable.SequenceEqual(new[] { 2, 3, 1 }, a.shape)); | |||
| var b = tf.constant(new[, ,] | |||
| { | |||
| { { 1 }, { 2 }, { 3 } }, | |||
| { { 4 }, { 5 }, { 6 } } | |||
| var b = tf.constant(new[, ,] | |||
| { | |||
| { { 1 }, { 2 }, { 3 } }, | |||
| { { 4 }, { 5 }, { 6 } } | |||
| }); | |||
| Assert.IsTrue(Enumerable.SequenceEqual(new[] { 2, 3, 1 }, b.shape)); | |||
| } | |||
| @@ -1,7 +1,7 @@ | |||
| using System; | |||
| using System.Linq; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Linq; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| @@ -16,7 +16,7 @@ namespace TensorFlowNET.UnitTest.nn_test | |||
| int total_elements = np.prod(x.shape); | |||
| var eps = 1e-8; | |||
| var nonzeros = x.Data<double>().Count(d=>Math.Abs(d)> eps); | |||
| var nonzeros = x.Data<double>().Count(d => Math.Abs(d) > eps); | |||
| return 1.0 - nonzeros / (double)total_elements; | |||
| } | |||
| @@ -26,7 +26,7 @@ namespace TensorFlowNET.UnitTest.nn_test | |||
| { | |||
| var x_shape = new Shape(5, 17); | |||
| var x_np = np.random.randint(0, 2, x_shape); | |||
| //x_np.astype(np.float32); | |||
| //x_np.astype(np.float32); | |||
| var y_np = this._ZeroFraction(x_np); | |||
| var x_tf = constant_op.constant(x_np); | |||
| @@ -53,7 +53,7 @@ namespace TensorFlowNET.UnitTest.nn_test | |||
| public void testZeroFraction2_27Zeros() | |||
| { | |||
| var sparsity = nn_impl.zero_fraction( | |||
| array_ops.zeros(new Shape((int) Math.Pow(2, 27 * 1.01)), dtypes.int8)); | |||
| array_ops.zeros(new Shape((int)Math.Pow(2, 27 * 1.01)), dtypes.int8)); | |||
| self.assertAllClose(1.0, self.evaluate<NDArray>(sparsity)); | |||
| } | |||
| @@ -1,11 +1,10 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using System.IO; | |||
| using System.Linq; | |||
| using System.Runtime.InteropServices; | |||
| using FluentAssertions; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -147,7 +146,7 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| for (int i = 0; i < 100; i++) | |||
| { | |||
| var t = new Tensor(new int[] {1, 2, 3}); | |||
| var t = new Tensor(new int[] { 1, 2, 3 }); | |||
| } | |||
| } | |||
| } | |||
| @@ -171,7 +170,7 @@ namespace TensorFlowNET.UnitTest | |||
| var v = (int*)Marshal.AllocHGlobal(sizeof(int)); | |||
| c_api.DeallocatorArgs _deallocatorArgs = new c_api.DeallocatorArgs(); | |||
| var handle = c_api.TF_NewTensor(typeof(int).as_dtype(), dims: new long[0], num_dims: 0, | |||
| data: (IntPtr) v, len: (UIntPtr) sizeof(int), | |||
| data: (IntPtr)v, len: (UIntPtr)sizeof(int), | |||
| deallocator: (IntPtr data, IntPtr size, ref c_api.DeallocatorArgs args) => Marshal.FreeHGlobal(data), | |||
| ref _deallocatorArgs); | |||
| c_api.TF_DeleteTensor(handle); | |||
| @@ -190,8 +189,8 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| tf.peak_default_graph().Should().BeNull(); | |||
| //graph is created automatically to perform create these operations | |||
| var a1 = tf.constant(new[] {2f}, shape: new[] {1}); | |||
| var a2 = tf.constant(new[] {3f}, shape: new[] {1}); | |||
| var a1 = tf.constant(new[] { 2f }, shape: new[] { 1 }); | |||
| var a2 = tf.constant(new[] { 3f }, shape: new[] { 1 }); | |||
| var math = a1 + a2; | |||
| for (int i = 0; i < 100; i++) | |||
| { | |||
| @@ -215,8 +214,8 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| tf.peak_default_graph().Should().NotBeNull(); | |||
| //graph is created automatically to perform create these operations | |||
| var a1 = tf.constant(new[] {2f}, shape: new[] {1}); | |||
| var a2 = tf.constant(new[] {3f}, shape: new[] {1}); | |||
| var a1 = tf.constant(new[] { 2f }, shape: new[] { 1 }); | |||
| var a2 = tf.constant(new[] { 3f }, shape: new[] { 1 }); | |||
| var math = a1 + a2; | |||
| var result = sess.run(math); | |||
| @@ -237,8 +236,8 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| tf.peak_default_graph().Should().NotBeNull(); | |||
| //graph is created automatically to perform create these operations | |||
| var a1 = tf.constant(new[] {2f}, shape: new[] {1}); | |||
| var a2 = tf.constant(new[] {3f}, shape: new[] {1}); | |||
| var a1 = tf.constant(new[] { 2f }, shape: new[] { 1 }); | |||
| var a2 = tf.constant(new[] { 3f }, shape: new[] { 1 }); | |||
| var math = a1 + a2; | |||
| } | |||
| } | |||
| @@ -254,8 +253,8 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| tf.peak_default_graph().Should().BeNull(); | |||
| //graph is created automatically to perform create these operations | |||
| var a1 = tf.constant(new[] {2f}, shape: new[] {1}); | |||
| var a2 = tf.constant(new[] {3f}, shape: new[] {1}); | |||
| var a1 = tf.constant(new[] { 2f }, shape: new[] { 1 }); | |||
| var a2 = tf.constant(new[] { 3f }, shape: new[] { 1 }); | |||
| var math = a1 + a2; | |||
| } | |||
| } | |||
| @@ -270,8 +269,8 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| tf.peak_default_graph().Should().BeNull(); | |||
| //graph is created automatically to perform create these operations | |||
| var a1 = tf.constant(new[] {2f}, shape: new[] {1}); | |||
| var a2 = tf.constant(new[] {3f}, shape: new[] {1}, name: "ConstantK"); | |||
| var a1 = tf.constant(new[] { 2f }, shape: new[] { 1 }); | |||
| var a2 = tf.constant(new[] { 3f }, shape: new[] { 1 }, name: "ConstantK"); | |||
| var math = a1 + a2; | |||
| for (int i = 0; i < 100; i++) | |||
| { | |||
| @@ -295,7 +294,7 @@ namespace TensorFlowNET.UnitTest | |||
| for (int j = 0; j < 100; j++) | |||
| { | |||
| var sess = Session.LoadFromSavedModel(modelPath).as_default(); | |||
| var inputs = new[] {"sp", "fuel"}; | |||
| var inputs = new[] { "sp", "fuel" }; | |||
| var inp = inputs.Select(name => sess.graph.OperationByName(name).output).ToArray(); | |||
| var outp = sess.graph.OperationByName("softmax_tensor").output; | |||
| @@ -1,5 +1,4 @@ | |||
| using System; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -2,10 +2,7 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Reflection; | |||
| using System.Runtime.InteropServices; | |||
| using Tensorflow; | |||
| using Tensorflow.Functions; | |||
| using static TensorFlowNET.UnitTest.c_test_util; | |||
| namespace TensorFlowNET.UnitTest.NativeAPI | |||
| @@ -61,7 +58,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| var func_feed = Placeholder(host_graph_, s_); | |||
| var func_op = Use(new[] { func_feed }); | |||
| Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) }, func_op, -3); | |||
| VerifyFDef(new string[] { "neg_0" }, | |||
| VerifyFDef(new string[] { "neg_0" }, | |||
| new List<IOSpec> { new IOSpec("feed", DataType.DtInt32) }, | |||
| new List<IOSpec> { new IOSpec("neg", DataType.DtInt32) }, | |||
| new List<EdgeSpec> { new EdgeSpec("feed", "neg_0:0"), new EdgeSpec("neg_0:y:0", "neg") }, | |||
| @@ -211,9 +208,9 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| VerifyFDef(new string[] { "add_0" }, | |||
| new List<IOSpec> { new IOSpec("feed1"), new IOSpec("feed2") }, | |||
| new List<IOSpec> { new IOSpec("add") }, | |||
| new List<EdgeSpec> | |||
| { | |||
| new EdgeSpec("feed1", "add_0:0"), | |||
| new List<EdgeSpec> | |||
| { | |||
| new EdgeSpec("feed1", "add_0:0"), | |||
| new EdgeSpec("feed2", "add_0:1"), | |||
| new EdgeSpec("add_0:sum:0", "add") | |||
| }, | |||
| @@ -397,7 +394,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| outputs.Select(x => new TF_Output(x, 0)).ToArray(), | |||
| output_names, expect_failure); | |||
| void DefineT(int num_opers, Operation[] opers, | |||
| void DefineT(int num_opers, Operation[] opers, | |||
| TF_Output[] inputs, TF_Output[] outputs, | |||
| string[] output_names, bool expect_failure = false) | |||
| { | |||
| @@ -405,7 +402,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| num_opers, num_opers == -1 ? null : opers.Select(x => (IntPtr)x).ToArray(), | |||
| inputs.Length, inputs.ToArray(), | |||
| outputs.Length, outputs.ToArray(), | |||
| output_names == null || output_names.Length == 0 ? null : output_names, | |||
| output_names == null || output_names.Length == 0 ? null : output_names, | |||
| IntPtr.Zero, null, s_.Handle); | |||
| if (expect_failure) | |||
| @@ -480,7 +477,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| void VerifyFDefNodes(FunctionDef fdef, string[] nodes) | |||
| { | |||
| ASSERT_EQ(nodes.Length, fdef.NodeDef.Count); | |||
| foreach(var node in fdef.NodeDef) | |||
| foreach (var node in fdef.NodeDef) | |||
| { | |||
| ASSERT_TRUE(nodes.Contains(node.Name), $"Got unexpected node: {node.Name} in fdef: {fdef}"); | |||
| } | |||
| @@ -519,7 +516,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| // Build a set of edges from fdef | |||
| var a_edges = new List<EdgeSpec>(); // actual edges | |||
| // Get edges from inputs to body nodes and between body nodes | |||
| foreach(var node in fdef.NodeDef) | |||
| foreach (var node in fdef.NodeDef) | |||
| { | |||
| for (int i = 0; i < node.Input.Count; ++i) | |||
| { | |||
| @@ -528,10 +525,10 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| } | |||
| } | |||
| // Get edges from body nodes to outputs and from inputs to outputs | |||
| foreach(var arg in fdef.Signature.OutputArg) | |||
| foreach (var arg in fdef.Signature.OutputArg) | |||
| { | |||
| var iter = fdef.Ret.FirstOrDefault(x => x.Key == arg.Name); | |||
| if(iter.Key != null) | |||
| if (iter.Key != null) | |||
| { | |||
| a_edges.Add(new EdgeSpec(iter.Value, arg.Name)); | |||
| } | |||
| @@ -541,7 +538,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| } | |||
| } | |||
| // Verify edges | |||
| foreach(var edge in e_edges) | |||
| foreach (var edge in e_edges) | |||
| { | |||
| ASSERT_TRUE(a_edges.Contains(edge)); | |||
| } | |||
| @@ -552,14 +549,14 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| // If caller specified all edges, check that we have seen all | |||
| if (is_exact_edges) | |||
| { | |||
| ASSERT_EQ(e_edges.Count + c_edges.Count, a_edges.Count, | |||
| ASSERT_EQ(e_edges.Count + c_edges.Count, a_edges.Count, | |||
| $"Expected edges: {e_edges}, Expected Control edges: {c_edges}, Actual edges: {a_edges}"); | |||
| } | |||
| } | |||
| public void Dispose() | |||
| { | |||
| } | |||
| public struct IOSpec | |||
| @@ -53,7 +53,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| c_api.TF_GraphToGraphDef(graph, buffer.Handle, s.Handle); | |||
| bool ret = TF_GetCode(s) == TF_OK; | |||
| EXPECT_EQ(TF_OK, TF_GetCode(s)); | |||
| if (ret) | |||
| if (ret) | |||
| graph_def = GraphDef.Parser.ParseFrom(buffer.DangerousMemoryBlock.Stream()); | |||
| return ret; | |||
| } | |||
| @@ -114,13 +114,13 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| IntPtr[] handles = new IntPtr[2] { IntPtr.Zero, IntPtr.Zero }; | |||
| c_api.TF_AddGradientsWithPrefix(graph_, prefix, outputs, noutputs, inputs, | |||
| ninputs, grad_inputs, s_.Handle, handles); | |||
| var op = new Operation(handles[0]); | |||
| } | |||
| else | |||
| { | |||
| //c_api.TF_AddGradientsWithPrefix(graph_, prefix, outputs, noutputs, inputs, | |||
| //ninputs, null, s_, grad_outputs); | |||
| //ninputs, null, s_, grad_outputs); | |||
| } | |||
| } | |||
| @@ -232,7 +232,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| return t; | |||
| } | |||
| private Operation MatMul(Graph graph, Status s, Operation l, Operation r, string name, | |||
| private Operation MatMul(Graph graph, Status s, Operation l, Operation r, string name, | |||
| bool transpose_a = false, bool transpose_b = false) | |||
| { | |||
| var desc = TF_NewOperation(graph, "MatMul", name); | |||
| @@ -4,7 +4,6 @@ using Tensorflow; | |||
| using Tensorflow.Device; | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.UnitTest; | |||
| using Buffer = System.Buffer; | |||
| namespace TensorFlowNET.UnitTest | |||
| { | |||
| @@ -25,7 +25,7 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| lock (Locks.ProcessWide) | |||
| { | |||
| var config = new ConfigProto {InterOpParallelismThreads = 4}; | |||
| var config = new ConfigProto { InterOpParallelismThreads = 4 }; | |||
| session_ = new Session(graph, config, s); | |||
| } | |||
| } | |||
| @@ -78,7 +78,7 @@ namespace TensorFlowNET.UnitTest | |||
| public unsafe void Run(Status s) | |||
| { | |||
| var inputs_ptr = inputs_.ToArray(); | |||
| var input_values_ptr = input_values_.Select(x => (IntPtr) x).ToArray(); | |||
| var input_values_ptr = input_values_.Select(x => (IntPtr)x).ToArray(); | |||
| var outputs_ptr = outputs_.ToArray(); | |||
| var output_values_ptr = output_values_.Select(x => IntPtr.Zero).ToArray(); | |||
| IntPtr[] targets_ptr = new IntPtr[0]; | |||
| @@ -1,5 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using Tensorflow; | |||
| using Tensorflow.Eager; | |||
| @@ -1,5 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,5 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using Tensorflow; | |||
| using Tensorflow.Eager; | |||
| @@ -1,5 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using Tensorflow; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| @@ -18,7 +18,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| var data = new float[] { 1.0f, 2.0f, 3.0f, 4.0f }; | |||
| var t = c_api.TF_AllocateTensor(TF_FLOAT, dims, dims.Length, (ulong)data.Length * sizeof(float)); | |||
| tf.memcpy(c_api.TF_TensorData(t), data, data.Length * sizeof(float)); | |||
| using var status = c_api.TF_NewStatus(); | |||
| var th = c_api.TFE_NewTensorHandle(t, status); | |||
| CHECK_EQ(TF_OK, TF_GetCode(status), TF_Message(status)); | |||
| @@ -146,7 +146,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| SafeTensorHandleHandle TestScalarTensorHandle(float value) | |||
| { | |||
| var data = new [] { value }; | |||
| var data = new[] { value }; | |||
| var t = c_api.TF_AllocateTensor(TF_FLOAT, null, 0, sizeof(float)); | |||
| tf.memcpy(TF_TensorData(t), data, TF_TensorByteSize(t)); | |||
| using var status = TF_NewStatus(); | |||
| @@ -1,7 +1,6 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Linq; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,8 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using System.Linq; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -23,7 +19,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI | |||
| Assert.AreEqual(1, one.consumers().Length); | |||
| Assert.AreEqual("add", neg.op.node_def.Input[0]); | |||
| // update edge | |||
| neg.op._update_input(0, one); | |||
| // c_api.TF_UpdateEdge(graph, new TF_Output(c1.op, 0), new TF_Input(neg.op, 0), tf.Status.Handle); | |||
| @@ -1,9 +1,8 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using Tensorflow; | |||
| using Buffer = Tensorflow.Buffer; | |||
| using static Tensorflow.Binding; | |||
| using Buffer = Tensorflow.Buffer; | |||
| namespace TensorFlowNET.UnitTest.NativeAPI | |||
| { | |||
| @@ -1,8 +1,6 @@ | |||
| using System; | |||
| using System.Diagnostics.CodeAnalysis; | |||
| using System.Runtime.CompilerServices; | |||
| using Tensorflow; | |||
| using Tensorflow.Functions; | |||
| using Tensorflow.Util; | |||
| using Buffer = Tensorflow.Buffer; | |||
| @@ -90,16 +88,19 @@ namespace TensorFlowNET.UnitTest | |||
| if (attr.Value.Type == DataType.DtInt32) | |||
| { | |||
| found_t = true; | |||
| } else | |||
| } | |||
| else | |||
| { | |||
| return false; | |||
| } | |||
| } else if (attr.Key == "N") | |||
| } | |||
| else if (attr.Key == "N") | |||
| { | |||
| if (attr.Value.I == n) | |||
| { | |||
| found_n = true; | |||
| } else | |||
| } | |||
| else | |||
| { | |||
| return false; | |||
| } | |||
| @@ -131,11 +132,13 @@ namespace TensorFlowNET.UnitTest | |||
| if (attr.Value.Type == DataType.DtInt32) | |||
| { | |||
| found_dtype = true; | |||
| } else | |||
| } | |||
| else | |||
| { | |||
| return false; | |||
| } | |||
| } else if (attr.Key == "shape") | |||
| } | |||
| else if (attr.Key == "shape") | |||
| { | |||
| found_shape = true; | |||
| } | |||
| @@ -160,18 +163,21 @@ namespace TensorFlowNET.UnitTest | |||
| if (attr.Value.Type == DataType.DtInt32) | |||
| { | |||
| found_dtype = true; | |||
| } else | |||
| } | |||
| else | |||
| { | |||
| return false; | |||
| } | |||
| } else if (attr.Key == "value") | |||
| } | |||
| else if (attr.Key == "value") | |||
| { | |||
| if (attr.Value.Tensor != null && | |||
| attr.Value.Tensor.IntVal.Count == 1 && | |||
| attr.Value.Tensor.IntVal[0] == v) | |||
| { | |||
| found_value = true; | |||
| } else | |||
| } | |||
| else | |||
| { | |||
| return false; | |||
| } | |||
| @@ -1,13 +1,13 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using NumSharp; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using Tensorflow.Util; | |||
| using Buffer = Tensorflow.Buffer; | |||
| using static Tensorflow.Binding; | |||
| using Tensorflow.UnitTest; | |||
| using Buffer = Tensorflow.Buffer; | |||
| namespace TensorFlowNET.UnitTest.Basics | |||
| { | |||
| @@ -41,9 +41,9 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| var b = tf.placeholder(tf.float32); | |||
| var c = tf.add(a, b); | |||
| using(var sess = tf.Session()) | |||
| using (var sess = tf.Session()) | |||
| { | |||
| var o = sess.run(c, | |||
| var o = sess.run(c, | |||
| new FeedItem(a, 3.0f), | |||
| new FeedItem(b, 2.0f)); | |||
| Assert.AreEqual((float)o, 5.0f); | |||
| @@ -123,7 +123,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| Assert.IsTrue(o.array_equal(check)); | |||
| } | |||
| b = tf.cumsum(a, exclusive:true, reverse: true); | |||
| b = tf.cumsum(a, exclusive: true, reverse: true); | |||
| check = np.array(15, 14, 12, 9, 5, 0); | |||
| using (var sess = tf.Session()) | |||
| @@ -136,7 +136,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| [TestMethod] | |||
| public void logicalOpsTest() | |||
| { | |||
| var a = tf.constant(new[] {1f, 2f, 3f, 4f, -4f, -3f, -2f, -1f}); | |||
| var a = tf.constant(new[] { 1f, 2f, 3f, 4f, -4f, -3f, -2f, -1f }); | |||
| var b = tf.less(a, 0f); | |||
| var c = tf.greater(a, 0f); | |||
| var d = tf.cast(tf.logical_and(b, c), tf.int32); | |||
| @@ -516,7 +516,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| private IEnumerable<int> MultiplyArray(IReadOnlyCollection<int> first, IReadOnlyCollection<int> second) | |||
| { | |||
| if(first.Count != second.Count) | |||
| if (first.Count != second.Count) | |||
| throw new ArgumentException("Arrays should be of equal size!"); | |||
| var firstEnumerator = first.GetEnumerator(); | |||
| @@ -535,7 +535,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| } | |||
| private IEnumerable<float> MultiplyArray(IReadOnlyCollection<float> first, IReadOnlyCollection<float> second) | |||
| { | |||
| if(first.Count != second.Count) | |||
| if (first.Count != second.Count) | |||
| throw new ArgumentException("Arrays should be of equal size!"); | |||
| var firstEnumerator = first.GetEnumerator(); | |||
| @@ -554,7 +554,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| } | |||
| private IEnumerable<double> MultiplyArray(IReadOnlyCollection<double> first, IReadOnlyCollection<double> second) | |||
| { | |||
| if(first.Count != second.Count) | |||
| if (first.Count != second.Count) | |||
| throw new ArgumentException("Arrays should be of equal size!"); | |||
| var firstEnumerator = first.GetEnumerator(); | |||
| @@ -788,7 +788,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| var firstFloatFeed = Enumerable.Repeat(firstFloatVal, rows * cols).ToArray(); | |||
| var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); | |||
| var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed.Select(x => 1/x).ToArray()).Sum(); | |||
| var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed.Select(x => 1 / x).ToArray()).Sum(); | |||
| a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||
| b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||
| @@ -837,7 +837,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| var firstDoubleFeed = Enumerable.Repeat(firstDoubleVal, rows * cols).ToArray(); | |||
| var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); | |||
| var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed.Select(x => 1/x).ToArray()).Sum(); | |||
| var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed.Select(x => 1 / x).ToArray()).Sum(); | |||
| a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||
| b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||
| @@ -1,9 +1,9 @@ | |||
| using System; | |||
| using System.Collections; | |||
| using System.Linq; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Newtonsoft.Json.Linq; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections; | |||
| using System.Linq; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -37,7 +37,8 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| /*if (g[i] is NDArray && e[i] is NDArray) | |||
| assertItemsEqual((g[i] as NDArray).GetData<object>(), (e[i] as NDArray).GetData<object>()); | |||
| else*/ if (e[i] is ICollection && g[i] is ICollection) | |||
| else*/ | |||
| if (e[i] is ICollection && g[i] is ICollection) | |||
| assertEqual(g[i], e[i]); | |||
| else | |||
| Assert.AreEqual(e[i], g[i], $"Items differ at index {i}, expected {e[i]} but got {g[i]}"); | |||
| @@ -183,11 +184,11 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| using (var sess = tf.Session()) | |||
| { | |||
| var ndarray=tensor.eval(sess); | |||
| var ndarray = tensor.eval(sess); | |||
| if (typeof(T) == typeof(double)) | |||
| { | |||
| double x = ndarray; | |||
| result=x; | |||
| result = x; | |||
| } | |||
| else if (typeof(T) == typeof(int)) | |||
| { | |||
| @@ -1,8 +1,5 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow; | |||
| using static Tensorflow.Binding; | |||
| @@ -27,7 +24,7 @@ namespace TensorFlowNET.UnitTest.Training | |||
| Func<Tensor, Tensor> model = (x) => W * x + b; | |||
| // Define the loss function | |||
| Func<Tensor, Tensor, Tensor> loss = (target_y, predicted_y) | |||
| Func<Tensor, Tensor, Tensor> loss = (target_y, predicted_y) | |||
| => tf.reduce_mean(tf.square(target_y - predicted_y)); | |||
| int NUM_EXAMPLES = 1000; | |||
| @@ -54,7 +51,7 @@ namespace TensorFlowNET.UnitTest.Training | |||
| }; | |||
| var epochs = range(10); | |||
| foreach(var epoch in epochs) | |||
| foreach (var epoch in epochs) | |||
| { | |||
| var current_loss = train(inputs, outputs, 0.1f); | |||
| print($"Epoch {epoch}: W={(float)W.numpy()} b={(float)b.numpy()}, loss={(float)current_loss.numpy()}"); | |||
| @@ -1,7 +1,6 @@ | |||
| using System; | |||
| using System.Diagnostics; | |||
| using System.Threading; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| namespace TensorFlowNET.UnitTest | |||
| { | |||
| @@ -46,7 +45,7 @@ namespace TensorFlowNET.UnitTest | |||
| if (workload == null) throw new ArgumentNullException(nameof(workload)); | |||
| if (postRun == null) throw new ArgumentNullException(nameof(postRun)); | |||
| if (threadCount <= 0) throw new ArgumentOutOfRangeException(nameof(threadCount)); | |||
| new MultiThreadedUnitTestExecuter(threadCount) {PostRun = postRun}.Run(workload); | |||
| new MultiThreadedUnitTestExecuter(threadCount) { PostRun = postRun }.Run(workload); | |||
| } | |||
| #endregion | |||
| @@ -81,12 +80,14 @@ namespace TensorFlowNET.UnitTest | |||
| try | |||
| { | |||
| workloads[0](0); | |||
| } catch (Exception e) | |||
| } | |||
| catch (Exception e) | |||
| { | |||
| if (Debugger.IsAttached) | |||
| throw; | |||
| ex = e; | |||
| } finally | |||
| } | |||
| finally | |||
| { | |||
| done_barrier2.Release(1); | |||
| } | |||
| @@ -111,12 +112,14 @@ namespace TensorFlowNET.UnitTest | |||
| try | |||
| { | |||
| core(threadid); | |||
| } catch (Exception e) | |||
| } | |||
| catch (Exception e) | |||
| { | |||
| if (Debugger.IsAttached) | |||
| throw; | |||
| return e; | |||
| } finally | |||
| } | |||
| finally | |||
| { | |||
| done_barrier2.Release(1); | |||
| } | |||
| @@ -133,7 +136,8 @@ namespace TensorFlowNET.UnitTest | |||
| var i_local = i; | |||
| Threads[i] = new Thread(() => Exceptions[i_local] = ThreadCore(workload, i_local)); | |||
| } | |||
| } else | |||
| } | |||
| else | |||
| { | |||
| for (int i = 0; i < ThreadCount; i++) | |||
| { | |||
| @@ -4,7 +4,6 @@ | |||
| namespace Microsoft.VisualStudio.TestTools.UnitTesting | |||
| { | |||
| using System; | |||
| using System.Collections.Generic; | |||
| //using System.Diagnostics; | |||
| //using System.Diagnostics.CodeAnalysis; | |||
| using System.Globalization; | |||
| @@ -239,8 +238,9 @@ namespace Microsoft.VisualStudio.TestTools.UnitTesting | |||
| //Debug.Assert(this.target != null, "target should not be null."); | |||
| if (typeof(PrivateObject) == obj?.GetType()) | |||
| { | |||
| return this.target.Equals(((PrivateObject) obj).target); | |||
| } else | |||
| return this.target.Equals(((PrivateObject)obj).target); | |||
| } | |||
| else | |||
| { | |||
| return false; | |||
| } | |||
| @@ -571,7 +571,7 @@ namespace Microsoft.VisualStudio.TestTools.UnitTesting | |||
| public void SetFieldOrProperty(string name, BindingFlags bindingFlags, object value) | |||
| { | |||
| Helper.CheckParameterNotNull(name, "name", string.Empty); | |||
| this.InvokeHelper(name, BindingFlags.SetField | BindingFlags.SetProperty | bindingFlags, new object[] {value}, CultureInfo.InvariantCulture); | |||
| this.InvokeHelper(name, BindingFlags.SetField | BindingFlags.SetProperty | bindingFlags, new object[] { value }, CultureInfo.InvariantCulture); | |||
| } | |||
| ///// <summary> | |||
| @@ -745,7 +745,8 @@ namespace Microsoft.VisualStudio.TestTools.UnitTesting | |||
| try | |||
| { | |||
| return this.originalType.InvokeMember(name, bindingFlags, null, this.target, args, culture); | |||
| } catch (TargetInvocationException e) | |||
| } | |||
| catch (TargetInvocationException e) | |||
| { | |||
| //Debug.Assert(e.InnerException != null, "Inner exception should not be null."); | |||
| if (e.InnerException != null) | |||
| @@ -219,7 +219,7 @@ namespace System | |||
| return true; | |||
| } | |||
| } | |||
| catch(MissingMethodException) | |||
| catch (MissingMethodException) | |||
| { | |||
| // When getter only property name is given, the property is found but fails to set. | |||
| return false; | |||
| @@ -375,7 +375,7 @@ namespace Microsoft.VisualStudio.TestTools.UnitTesting | |||
| public void SetStaticFieldOrProperty(string name, BindingFlags bindingFlags, object value) | |||
| { | |||
| Helper.CheckParameterNotNull(name, "name", string.Empty); | |||
| this.InvokeHelperStatic(name, BindingFlags.SetField | BindingFlags.SetProperty | bindingFlags | BindingFlags.Static, new[] {value}, CultureInfo.InvariantCulture); | |||
| this.InvokeHelperStatic(name, BindingFlags.SetField | BindingFlags.SetProperty | bindingFlags | BindingFlags.Static, new[] { value }, CultureInfo.InvariantCulture); | |||
| } | |||
| ///// <summary> | |||
| @@ -509,7 +509,8 @@ namespace Microsoft.VisualStudio.TestTools.UnitTesting | |||
| try | |||
| { | |||
| return this.type.InvokeMember(name, bindingFlags | BindToEveryThing | BindingFlags.Static, null, null, args, culture); | |||
| } catch (TargetInvocationException e) | |||
| } | |||
| catch (TargetInvocationException e) | |||
| { | |||
| //Debug.Assert(e.InnerException != null, "Inner Exception should not be null."); | |||
| if (e.InnerException != null) | |||
| @@ -1,7 +1,4 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.IO; | |||
| using System.Text; | |||
| using System.IO; | |||
| namespace TensorFlowNET.UnitTest | |||
| { | |||
| @@ -11,6 +8,6 @@ namespace TensorFlowNET.UnitTest | |||
| { | |||
| var dir = Path.Combine(Directory.GetCurrentDirectory(), "..", "..", "..", "..", "..", "data"); | |||
| return Path.GetFullPath(Path.Combine(dir, fileName)); | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -14,10 +14,10 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
| [TestMethod] | |||
| public void testShape() | |||
| { | |||
| var tensor = constant_op.constant(new[]{1.0, 2.0}); | |||
| self.assertEquals(new int[] {2}, tensor.shape); | |||
| self.assertEquals(new int[] {2}, | |||
| control_flow_ops.with_dependencies(new[] {constant_op.constant(1.0).op}, tensor).shape); | |||
| var tensor = constant_op.constant(new[] { 1.0, 2.0 }); | |||
| self.assertEquals(new int[] { 2 }, tensor.shape); | |||
| self.assertEquals(new int[] { 2 }, | |||
| control_flow_ops.with_dependencies(new[] { constant_op.constant(1.0).op }, tensor).shape); | |||
| } | |||
| } | |||
| @@ -1,5 +1,4 @@ | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Tensorflow; | |||
| namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
| { | |||
| @@ -16,7 +15,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
| { | |||
| //var embedding_matrix = variable_scope.get_variable( | |||
| //"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true); | |||
| //"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true); | |||
| /* | |||
| Tensor cond(Tensor it, Tensor _) | |||
| @@ -1,5 +1,5 @@ | |||
| using System; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -1,6 +1,6 @@ | |||
| using System; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using NumSharp; | |||
| using System; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -17,24 +17,24 @@ namespace TensorFlowNET.UnitTest.functional_ops_test | |||
| public void ScanForward() | |||
| { | |||
| var fn = new Func<Tensor, Tensor, Tensor>((a, x) => tf.add(a, x)); | |||
| var sess = tf.Session().as_default(); | |||
| var input = tf.placeholder(TF_DataType.TF_INT32, new TensorShape(6)); | |||
| var scan = functional_ops.scan(fn, input); | |||
| sess.run(scan, (input, np.array(1,2,3,4,5,6))).Should().Be(np.array(1,3,6,10,15,21)); | |||
| sess.run(scan, (input, np.array(1, 2, 3, 4, 5, 6))).Should().Be(np.array(1, 3, 6, 10, 15, 21)); | |||
| } | |||
| [TestMethod, Ignore("need UpdateEdge API")] | |||
| public void ScanReverse() | |||
| { | |||
| var fn = new Func<Tensor, Tensor, Tensor>((a, x) => tf.add(a, x)); | |||
| var sess = tf.Session().as_default(); | |||
| var input = tf.placeholder(TF_DataType.TF_INT32, new TensorShape(6)); | |||
| var scan = functional_ops.scan(fn, input, reverse:true); | |||
| sess.run(scan, (input, np.array(1,2,3,4,5,6))).Should().Be(np.array(21,20,18,15,11,6)); | |||
| var scan = functional_ops.scan(fn, input, reverse: true); | |||
| sess.run(scan, (input, np.array(1, 2, 3, 4, 5, 6))).Should().Be(np.array(21, 20, 18, 15, 11, 6)); | |||
| } | |||
| } | |||
| } | |||
| @@ -1,9 +1,9 @@ | |||
| using System; | |||
| using System.Collections; | |||
| using System.Collections.Generic; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Newtonsoft.Json.Linq; | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections; | |||
| using System.Collections.Generic; | |||
| using Tensorflow; | |||
| using Tensorflow.UnitTest; | |||
| using Tensorflow.Util; | |||
| @@ -393,11 +393,11 @@ namespace TensorFlowNET.UnitTest.nest_test | |||
| var s = JArray.FromObject(structure1_plus1).ToString(); | |||
| Console.WriteLine(s); | |||
| // nest.assert_same_structure(structure1, structure1_plus1) | |||
| self.assertAllEqual( nest.flatten(structure1_plus1), new object[] { 2, 3, 4, 5, 6, 7 }); | |||
| self.assertAllEqual(nest.flatten(structure1_plus1), new object[] { 2, 3, 4, 5, 6, 7 }); | |||
| self.assertAllEqual(nest.flatten(structure1_strings), new object[] { "1", "2", "3", "4", "5", "6" }); | |||
| var structure1_plus_structure2 = nest.map_structure(x => (int)(x[0]) + (int)(x[1]), structure1, structure2); | |||
| self.assertEqual( | |||
| new object[] { new object[] { new object[] { 1 + 7, 2 + 8}, 3 + 9}, 4 + 10, new object[] { 5 + 11, 6 + 12}}, | |||
| new object[] { new object[] { new object[] { 1 + 7, 2 + 8 }, 3 + 9 }, 4 + 10, new object[] { 5 + 11, 6 + 12 } }, | |||
| structure1_plus_structure2); | |||
| // self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4)) | |||
| @@ -1,8 +1,6 @@ | |||
| using System; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System.Linq; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Tensorflow; | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.UnitTest; | |||
| using static Tensorflow.Binding; | |||
| @@ -193,25 +191,25 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
| }); | |||
| // Note assertItemsEqual(given, expected), expected and given parameters should be swapped below | |||
| assertItemsEqual(new[] {a_1.op}, b_1.op.control_inputs); | |||
| assertItemsEqual(new[] {a_1.op, a_2.op}, b_2.op.control_inputs); | |||
| assertItemsEqual(new[] { a_1.op, a_2.op}, b_3.op.control_inputs); | |||
| assertItemsEqual(new[] {a_1.op, a_2.op}, b_4.op.control_inputs); | |||
| assertItemsEqual(new[] { a_1.op }, b_1.op.control_inputs); | |||
| assertItemsEqual(new[] { a_1.op, a_2.op }, b_2.op.control_inputs); | |||
| assertItemsEqual(new[] { a_1.op, a_2.op }, b_3.op.control_inputs); | |||
| assertItemsEqual(new[] { a_1.op, a_2.op }, b_4.op.control_inputs); | |||
| assertItemsEqual(new object[0], c_1.op.control_inputs); | |||
| assertItemsEqual(new[] {a_2.op}, c_2.op.control_inputs); | |||
| assertItemsEqual(new[] {a_2.op, a_3.op}, c_3.op.control_inputs); | |||
| assertItemsEqual(new[] {a_2.op, a_3.op, a_4.op}, c_4.op.control_inputs); | |||
| assertItemsEqual(new[] { a_2.op }, c_2.op.control_inputs); | |||
| assertItemsEqual(new[] { a_2.op, a_3.op }, c_3.op.control_inputs); | |||
| assertItemsEqual(new[] { a_2.op, a_3.op, a_4.op }, c_4.op.control_inputs); | |||
| assertItemsEqual(new object[0], d_1.op.control_inputs); | |||
| assertItemsEqual(new object[0], d_2.op.control_inputs); | |||
| assertItemsEqual(new object[0], d_3.op.control_inputs); | |||
| assertItemsEqual(new object[0], d_4.op.control_inputs); | |||
| assertItemsEqual(new[] {a_1.op}, e_1.op.control_inputs); | |||
| assertItemsEqual(new[] {a_2.op}, e_2.op.control_inputs); | |||
| assertItemsEqual(new[] {a_3.op}, e_3.op.control_inputs); | |||
| assertItemsEqual(new[] {a_4.op}, e_4.op.control_inputs); | |||
| assertItemsEqual(new[] { a_1.op }, e_1.op.control_inputs); | |||
| assertItemsEqual(new[] { a_2.op }, e_2.op.control_inputs); | |||
| assertItemsEqual(new[] { a_3.op }, e_3.op.control_inputs); | |||
| assertItemsEqual(new[] { a_4.op }, e_4.op.control_inputs); | |||
| } | |||
| [Ignore("Don't know how to create an operation with two outputs")] | |||
| @@ -1,6 +1,6 @@ | |||
| using System; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using System; | |||
| using System.Linq; | |||
| using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
| using Tensorflow; | |||
| using Tensorflow.Operations; | |||
| using Tensorflow.UnitTest; | |||
| @@ -28,14 +28,14 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
| { | |||
| using (var g = tf.Graph().as_default()) | |||
| { | |||
| var x = constant_op.constant(new[,] {{1, 2, 3}, {4, 5, 6}}); | |||
| var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] {x}, new Operation[0]); | |||
| var x = constant_op.constant(new[,] { { 1, 2, 3 }, { 4, 5, 6 } }); | |||
| var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] { x }, new Operation[0]); | |||
| var op = g._create_op_from_tf_operation(c_op); | |||
| Assert.AreEqual("myop", op.name); | |||
| Assert.AreEqual("Identity", op.type); | |||
| Assert.AreEqual(1, len(op.outputs)); | |||
| assertItemsEqual(new[] {2, 3}, op.outputs[0].shape); | |||
| assertItemsEqual(new[] { 2, 3 }, op.outputs[0].shape); | |||
| } | |||
| } | |||
| @@ -101,7 +101,7 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
| public void TestWhileLoop() | |||
| { | |||
| var graph = tf.Graph().as_default(); | |||
| Operation x=null; | |||
| Operation x = null; | |||
| x = constant_op.constant(42); | |||
| var body = new Func<int, int>(i => | |||
| { | |||
| @@ -120,7 +120,7 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
| self.assertEqual(op.outputs.Length, 0); | |||
| var op_input = op.inputs[0].op; | |||
| self.assertEqual(op_input.type, "Enter"); | |||
| self.assertItemsEqual(op_input.inputs.OfType<Operation>().ToArray(), new[] {x}); | |||
| self.assertItemsEqual(op_input.inputs.OfType<Operation>().ToArray(), new[] { x }); | |||
| self.assertEqual(op.graph, graph); | |||
| self.assertIsNotNone(op._get_control_flow_context()); | |||
| self.assertEqual(((ControlFlowContext)op._get_control_flow_context()).Name, "myloop/while_context"); | |||
| @@ -22,7 +22,8 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
| ops.reset_default_graph(); | |||
| } | |||
| private void _AssertDefault(Graph expected) { | |||
| private void _AssertDefault(Graph expected) | |||
| { | |||
| Assert.AreSame(ops.get_default_graph(), expected); | |||
| } | |||
| @@ -31,13 +32,13 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
| [TestMethod] | |||
| public void testResetDefaultGraphNesting() | |||
| { | |||
| /* | |||
| def testResetDefaultGraphNesting(self): | |||
| g0 = ops.Graph() | |||
| with self.assertRaises(AssertionError): | |||
| with g0.as_default(): | |||
| ops.reset_default_graph() | |||
| */ | |||
| /* | |||
| def testResetDefaultGraphNesting(self): | |||
| g0 = ops.Graph() | |||
| with self.assertRaises(AssertionError): | |||
| with g0.as_default(): | |||
| ops.reset_default_graph() | |||
| */ | |||
| } | |||
| [Ignore("Todo: Port")] | |||