From c12b5b9d58045278af0e8943e0964fcea5a46fd8 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Thu, 1 Aug 2019 18:43:35 -0500 Subject: [PATCH 01/13] Removed shared Status instance in Operation. --- .../Operations/Operation.Input.cs | 13 +++++++++- .../Operations/Operation.Output.cs | 13 +++++++++- .../Operations/Operation.cs | 25 ++++++++++++------- .../Tensors/c_api.tensor.cs | 2 +- test/TensorFlowNET.UnitTest/TensorTest.cs | 6 ++--- 5 files changed, 44 insertions(+), 15 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/Operation.Input.cs b/src/TensorFlowNET.Core/Operations/Operation.Input.cs index 83e7567f..6d6403c9 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Input.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Input.cs @@ -26,7 +26,18 @@ namespace Tensorflow { public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index)); public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index)); - public int InputListLength(string name) => c_api.TF_OperationInputListLength(_handle, name, status); + + public int InputListLength(string name) + { + int num = 0; + using(var status = new Status()) + { + num = c_api.TF_OperationInputListLength(_handle, name, status); + status.Check(true); + } + return num; + } + public int NumInputs => c_api.TF_OperationNumInputs(_handle); private TF_DataType[] _input_types => _inputs._inputs.Select(x => x.dtype).ToArray(); diff --git a/src/TensorFlowNET.Core/Operations/Operation.Output.cs b/src/TensorFlowNET.Core/Operations/Operation.Output.cs index 41f4a332..24348322 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Output.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Output.cs @@ -24,7 +24,18 @@ namespace Tensorflow { public int NumOutputs => c_api.TF_OperationNumOutputs(_handle); public TF_DataType OutputType(int index) => c_api.TF_OperationOutputType(new TF_Output(_handle, index)); - public int OutputListLength(string name) => c_api.TF_OperationOutputListLength(_handle, name, status); + + public int OutputListLength(string name) + { + int num = 0; + using (var status = new Status()) + { + num = c_api.TF_OperationOutputListLength(_handle, name, status); + status.Check(true); + } + + return num; + } private Tensor[] _outputs; public Tensor[] outputs => _outputs; diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index b673380b..8c4ce606 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -54,7 +54,6 @@ namespace Tensorflow public Operation op => this; public TF_DataType dtype => TF_DataType.DtInvalid; - private Status status = new Status(); public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle)); public string OpType => c_api.StringPiece(c_api.TF_OperationOpType(_handle)); @@ -96,10 +95,14 @@ namespace Tensorflow _operDesc = c_api.TF_NewOperation(g, opType, oper_name); c_api.TF_SetAttrType(_operDesc, "dtype", TF_DataType.TF_INT32); - _handle = c_api.TF_FinishOperation(_operDesc, status); - - // Dict mapping op name to file and line information for op colocation - // context managers. + using (var status = new Status()) + { + _handle = c_api.TF_FinishOperation(_operDesc, status); + status.Check(true); + } + + // Dict mapping op name to file and line information for op colocation + // context managers. _control_flow_context = graph._get_control_flow_context(); } @@ -220,6 +223,7 @@ namespace Tensorflow { AttrValue x = null; + using (var status = new Status()) using (var buf = new Buffer()) { c_api.TF_OperationGetAttrValueProto(_handle, name, buf, status); @@ -274,12 +278,15 @@ namespace Tensorflow var output = tensor._as_tf_output(); // Reset cached inputs. - _inputs = null; + _inputs = null; // after the c_api call next time _inputs is accessed // the updated inputs are reloaded from the c_api - c_api.UpdateEdge(_graph, output, input, status); - //var updated_inputs = inputs; - status.Check(); + using (var status = new Status()) + { + c_api.UpdateEdge(_graph, output, input, status); + //var updated_inputs = inputs; + status.Check(); + } } private void _assert_same_graph(Tensor tensor) diff --git a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs index fd240ee7..324de913 100644 --- a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs @@ -33,7 +33,7 @@ namespace Tensorflow public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, IntPtr dims, int num_dims, UIntPtr len); [DllImport(TensorFlowLibName)] - public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, UIntPtr len); + public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, ulong len); /// /// returns the sizeof() for the underlying type corresponding to the given TF_DataType enum value. diff --git a/test/TensorFlowNET.UnitTest/TensorTest.cs b/test/TensorFlowNET.UnitTest/TensorTest.cs index 9f4fff39..6008a809 100644 --- a/test/TensorFlowNET.UnitTest/TensorTest.cs +++ b/test/TensorFlowNET.UnitTest/TensorTest.cs @@ -77,14 +77,14 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void AllocateTensor() { - /*ulong num_bytes = 6 * sizeof(float); + ulong num_bytes = 6 * sizeof(float); long[] dims = { 2, 3 }; Tensor t = c_api.TF_AllocateTensor(TF_DataType.TF_FLOAT, dims, 2, num_bytes); EXPECT_EQ(TF_DataType.TF_FLOAT, t.dtype); EXPECT_EQ(2, t.NDims); - Assert.IsTrue(Enumerable.SequenceEqual(dims, t.shape)); + EXPECT_EQ((int)dims[0], t.shape[0]); EXPECT_EQ(num_bytes, t.bytesize); - t.Dispose();*/ + t.Dispose(); } From 9d2267f83aa9e0d8f35f23a3f4b710fd02194b21 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Thu, 1 Aug 2019 18:48:34 -0500 Subject: [PATCH 02/13] add overload for TF_AllocateTensor. --- src/TensorFlowNET.Core/Tensors/c_api.tensor.cs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs index 324de913..6b20b34f 100644 --- a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs @@ -35,6 +35,9 @@ namespace Tensorflow [DllImport(TensorFlowLibName)] public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, ulong len); + [DllImport(TensorFlowLibName)] + public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, UIntPtr len); + /// /// returns the sizeof() for the underlying type corresponding to the given TF_DataType enum value. /// From 9502395de777e299f6af6dc8f2323f6b77fbb594 Mon Sep 17 00:00:00 2001 From: Antonio Cifonelli Date: Fri, 2 Aug 2019 13:42:28 +0200 Subject: [PATCH 03/13] Adding `logical_and` operator (#338) Relative unit test in `OperationTest`. --- src/TensorFlowNET.Core/APIs/tf.math.cs | 3 +++ .../Operations/gen_math_ops.cs | 7 +++++++ test/TensorFlowNET.UnitTest/OperationsTest.cs | 16 ++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index 5586840c..fb65d31b 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -189,6 +189,9 @@ namespace Tensorflow public static Tensor log1p(Tensor x, string name = null) => gen_math_ops.log1p(x, name); + public static Tensor logical_and(Tensor x, Tensor y, string name = null) + => gen_math_ops.logical_and(x, y, name); + /// /// Clips tensor values to a specified min and max. /// diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 8ec7e253..a8b9ac49 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -350,6 +350,13 @@ namespace Tensorflow return _op.outputs[0]; } + public static Tensor logical_and(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y }); + + return _op.outputs[0]; + } + public static Tensor squared_difference(Tensor x, Tensor y, string name = null) { var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name }); diff --git a/test/TensorFlowNET.UnitTest/OperationsTest.cs b/test/TensorFlowNET.UnitTest/OperationsTest.cs index a0a3b5e4..10046f0c 100644 --- a/test/TensorFlowNET.UnitTest/OperationsTest.cs +++ b/test/TensorFlowNET.UnitTest/OperationsTest.cs @@ -130,6 +130,22 @@ namespace TensorFlowNET.UnitTest } } + [TestMethod] + public void logicalAndTest() + { + var a = tf.constant(new[] {1f, 2f, 3f, 4f, -4f, -3f, -2f, -1f}); + var b = tf.less(a, 0f); + var c = tf.greater(a, 0f); + var d = tf.cast(tf.logical_and(b, c), tf.int32); + var check = np.array(new[] { 0, 0, 0, 0, 0, 0, 0, 0 }); + + using (var sess = tf.Session()) + { + var o = sess.run(d); + Assert.IsTrue(o.array_equal(check)); + } + } + [TestMethod] public void addOpTests() { From fa2ae8c3b417732f8155e71df85c4e5bfeee3d38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B9=85=E6=B0=B8?= Date: Fri, 2 Aug 2019 22:23:48 +0800 Subject: [PATCH 04/13] Added features to avoid repeated downloads of running data. Perhaps you can add more code to implement the hash check ingress of the file and intermittently pass on. --- src/TensorFlowHub/Utils.cs | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/src/TensorFlowHub/Utils.cs b/src/TensorFlowHub/Utils.cs index 10aaf958..72ee9430 100644 --- a/src/TensorFlowHub/Utils.cs +++ b/src/TensorFlowHub/Utils.cs @@ -25,13 +25,25 @@ namespace Tensorflow.Hub if (!Path.IsPathRooted(dirSaveTo)) dirSaveTo = Path.Combine(AppContext.BaseDirectory, dirSaveTo); - if (!Directory.Exists(dirSaveTo)) - Directory.CreateDirectory(dirSaveTo); - - using (var wc = new WebClient()) + var fileSaveTo = Path.Combine(dirSaveTo, fileName); + + if (File.Exists(fileSaveTo)) { - await wc.DownloadFileTaskAsync(url, Path.Combine(dirSaveTo, fileName)); + //TODO:maybe you can check file's hashcode and "donglowad.info" to complete file ... + Console.WriteLine($"{fileSaveTo} already exists."); } + else + { + if (!Directory.Exists(dirSaveTo)) + Directory.CreateDirectory(dirSaveTo); + + using (var wc = new WebClient()) + { + await wc.DownloadFileTaskAsync(url, fileSaveTo); + } + + } + } public static async Task UnzipAsync(this IModelLoader modelLoader, string zipFile, string saveTo) @@ -42,7 +54,7 @@ namespace Tensorflow.Hub if (!Directory.Exists(saveTo)) Directory.CreateDirectory(saveTo); - + if (!Path.IsPathRooted(zipFile)) zipFile = Path.Combine(AppContext.BaseDirectory, zipFile); @@ -78,7 +90,7 @@ namespace Tensorflow.Hub var cts = new CancellationTokenSource(); var showProgressTask = ShowProgressInConsole(cts); - + try { await task; @@ -86,7 +98,7 @@ namespace Tensorflow.Hub finally { cts.Cancel(); - } + } } private static async Task ShowProgressInConsole(CancellationTokenSource cts) From b06b09932ebc09e0dfcc85d544acaa0b11a4d48b Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Fri, 2 Aug 2019 06:41:54 -0500 Subject: [PATCH 05/13] add DisposableObject as base class. --- src/TensorFlowNET.Core/Buffers/Buffer.cs | 12 ++- src/TensorFlowNET.Core/DisposableObject.cs | 88 +++++++++++++++++++ src/TensorFlowNET.Core/Graphs/Graph.Export.cs | 6 +- .../Sessions/BaseSession.cs | 28 ++++-- src/TensorFlowNET.Core/Sessions/Session.cs | 32 +------ .../Sessions/SessionOptions.cs | 33 ++++--- src/TensorFlowNET.Core/Status/Status.cs | 23 +---- src/TensorFlowNET.Core/Tensors/Tensor.cs | 26 +----- test/TensorFlowNET.UnitTest/ConstantTest.cs | 8 +- test/TensorFlowNET.UnitTest/TensorTest.cs | 2 +- test/TensorFlowNET.UnitTest/VariableTest.cs | 1 + test/TensorFlowNET.UnitTest/c_test_util.cs | 15 ++-- 12 files changed, 153 insertions(+), 121 deletions(-) create mode 100644 src/TensorFlowNET.Core/DisposableObject.cs diff --git a/src/TensorFlowNET.Core/Buffers/Buffer.cs b/src/TensorFlowNET.Core/Buffers/Buffer.cs index 378c7c85..753ad508 100644 --- a/src/TensorFlowNET.Core/Buffers/Buffer.cs +++ b/src/TensorFlowNET.Core/Buffers/Buffer.cs @@ -19,10 +19,8 @@ using System.Runtime.InteropServices; namespace Tensorflow { - public class Buffer : IDisposable + public class Buffer : DisposableObject { - private IntPtr _handle; - private TF_Buffer buffer => Marshal.PtrToStructure(_handle); public byte[] Data @@ -54,6 +52,8 @@ namespace Tensorflow Marshal.Copy(data, 0, dst, data.Length); _handle = c_api.TF_NewBufferFromString(dst, (ulong)data.Length); + + Marshal.FreeHGlobal(dst); } public static implicit operator IntPtr(Buffer buffer) @@ -66,9 +66,7 @@ namespace Tensorflow return buffer.Data; } - public void Dispose() - { - c_api.TF_DeleteBuffer(_handle); - } + protected override void DisposeUnManagedState() + => c_api.TF_DeleteBuffer(_handle); } } diff --git a/src/TensorFlowNET.Core/DisposableObject.cs b/src/TensorFlowNET.Core/DisposableObject.cs new file mode 100644 index 00000000..b59e6aa0 --- /dev/null +++ b/src/TensorFlowNET.Core/DisposableObject.cs @@ -0,0 +1,88 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow +{ + /// + /// Abstract class for disposable object allocated in unmanaged runtime. + /// + public abstract class DisposableObject : IDisposable + { + protected IntPtr _handle; + + protected DisposableObject() { } + + public DisposableObject(IntPtr handle) + { + _handle = handle; + } + + private bool disposedValue = false; // To detect redundant calls + + protected virtual void DisposeManagedState() + { + } + + protected abstract void DisposeUnManagedState(); + + protected virtual void Dispose(bool disposing) + { + if (!disposedValue) + { + if (disposing) + { + // dispose managed state (managed objects). + DisposeManagedState(); + } + + // free unmanaged resources (unmanaged objects) and override a finalizer below. + /*IntPtr h = IntPtr.Zero; + lock (this) + { + h = _handle; + _handle = IntPtr.Zero; + }*/ + if (_handle != IntPtr.Zero) + DisposeUnManagedState(); + + // set large fields to null. + _handle = IntPtr.Zero; + + disposedValue = true; + } + } + + // override a finalizer only if Dispose(bool disposing) above has code to free unmanaged resources. + ~DisposableObject() + { + // Do not change this code. Put cleanup code in Dispose(bool disposing) above. + Dispose(false); + } + + // This code added to correctly implement the disposable pattern. + public void Dispose() + { + // Do not change this code. Put cleanup code in Dispose(bool disposing) above. + Dispose(true); + // uncomment the following line if the finalizer is overridden above. + GC.SuppressFinalize(this); + } + } +} diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Export.cs b/src/TensorFlowNET.Core/Graphs/Graph.Export.cs index 60657038..17828c73 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Export.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Export.cs @@ -22,7 +22,7 @@ namespace Tensorflow { var buffer = new Buffer(); c_api.TF_GraphToGraphDef(_handle, buffer, s); - s.Check(); + s.Check(true); // var def = GraphDef.Parser.ParseFrom(buffer); // buffer.Dispose(); @@ -33,7 +33,9 @@ namespace Tensorflow { var status = new Status(); var buffer = ToGraphDef(status); - status.Check(); + status.Check(true); + status.Dispose(); + var def = GraphDef.Parser.ParseFrom(buffer); buffer.Dispose(); diff --git a/src/TensorFlowNET.Core/Sessions/BaseSession.cs b/src/TensorFlowNET.Core/Sessions/BaseSession.cs index 47a891d6..1b807a97 100644 --- a/src/TensorFlowNET.Core/Sessions/BaseSession.cs +++ b/src/TensorFlowNET.Core/Sessions/BaseSession.cs @@ -24,7 +24,7 @@ using System.Text; namespace Tensorflow { - public class BaseSession + public class BaseSession : DisposableObject { protected Graph _graph; protected bool _opened; @@ -42,17 +42,13 @@ namespace Tensorflow SessionOptions newOpts = null; if (opts == null) - newOpts = c_api.TF_NewSessionOptions(); + newOpts = new SessionOptions(); - var Status = new Status(); - - _session = c_api.TF_NewSession(_graph, opts ?? newOpts, Status); + var status = new Status(); - // dispose newOpts - if (opts == null) - c_api.TF_DeleteSessionOptions(newOpts); + _session = c_api.TF_NewSession(_graph, opts ?? newOpts, status); - Status.Check(true); + status.Check(true); } public virtual NDArray run(object fetches, params FeedItem[] feed_dict) @@ -324,5 +320,19 @@ namespace Tensorflow { } + + public void close() + { + Dispose(); + } + + protected override void DisposeUnManagedState() + { + using (var status = new Status()) + { + c_api.TF_DeleteSession(_handle, status); + status.Check(true); + } + } } } diff --git a/src/TensorFlowNET.Core/Sessions/Session.cs b/src/TensorFlowNET.Core/Sessions/Session.cs index c85e0598..36797ec7 100644 --- a/src/TensorFlowNET.Core/Sessions/Session.cs +++ b/src/TensorFlowNET.Core/Sessions/Session.cs @@ -50,7 +50,7 @@ namespace Tensorflow { var graph = c_api.TF_NewGraph(); var status = new Status(); - var opt = c_api.TF_NewSessionOptions(); + var opt = new SessionOptions(); var tags = new string[] { "serve" }; var buffer = new TF_Buffer(); @@ -68,7 +68,7 @@ namespace Tensorflow // var data = new byte[buffer.length]; // Marshal.Copy(buffer.data, data, 0, (int)buffer.length); // var meta_graph = MetaGraphDef.Parser.ParseFrom(data);*/ - status.Check(); + status.Check(true); return new Session(sess, g: new Graph(graph).as_default()); } @@ -76,34 +76,6 @@ namespace Tensorflow public static implicit operator IntPtr(Session session) => session._session; public static implicit operator Session(IntPtr handle) => new Session(handle); - public void close() - { - Dispose(); - } - - public void Dispose() - { - IntPtr h = IntPtr.Zero; - lock (this) - { - h = _session; - _session = IntPtr.Zero; - } - if (h != IntPtr.Zero) - { - var status = new Status(); - c_api.TF_DeleteSession(h, status); - status.Check(true); - } - - GC.SuppressFinalize(this); - } - - ~Session() - { - Dispose(); - } - public void __enter__() { diff --git a/src/TensorFlowNET.Core/Sessions/SessionOptions.cs b/src/TensorFlowNET.Core/Sessions/SessionOptions.cs index 361a48d6..21604495 100644 --- a/src/TensorFlowNET.Core/Sessions/SessionOptions.cs +++ b/src/TensorFlowNET.Core/Sessions/SessionOptions.cs @@ -20,37 +20,34 @@ using System.Runtime.InteropServices; namespace Tensorflow { - public class SessionOptions : IDisposable + public class SessionOptions : DisposableObject { - private IntPtr _handle; - private Status _status; - - public unsafe SessionOptions() + public SessionOptions() { - var opts = c_api.TF_NewSessionOptions(); - _handle = opts; - _status = new Status(); + _handle = c_api.TF_NewSessionOptions(); } - public unsafe SessionOptions(IntPtr handle) + public SessionOptions(IntPtr handle) { _handle = handle; } - public void Dispose() - { - c_api.TF_DeleteSessionOptions(_handle); - _status.Dispose(); - } + protected override void DisposeUnManagedState() + => c_api.TF_DeleteSessionOptions(_handle); - public Status SetConfig(ConfigProto config) + public void SetConfig(ConfigProto config) { var bytes = config.ToByteArray(); var proto = Marshal.AllocHGlobal(bytes.Length); Marshal.Copy(bytes, 0, proto, bytes.Length); - c_api.TF_SetConfig(_handle, proto, (ulong)bytes.Length, _status); - _status.Check(false); - return _status; + + using (var status = new Status()) + { + c_api.TF_SetConfig(_handle, proto, (ulong)bytes.Length, status); + status.Check(false); + } + + Marshal.FreeHGlobal(proto); } public static implicit operator IntPtr(SessionOptions opts) => opts._handle; diff --git a/src/TensorFlowNET.Core/Status/Status.cs b/src/TensorFlowNET.Core/Status/Status.cs index d39a73c7..fde0bcee 100644 --- a/src/TensorFlowNET.Core/Status/Status.cs +++ b/src/TensorFlowNET.Core/Status/Status.cs @@ -22,10 +22,8 @@ namespace Tensorflow /// TF_Status holds error information. It either has an OK code, or /// else an error code with an associated error message. /// - public class Status : IDisposable + public class Status : DisposableObject { - protected IntPtr _handle; - /// /// Error message /// @@ -67,22 +65,7 @@ namespace Tensorflow return status._handle; } - public void Dispose() - { - IntPtr h = IntPtr.Zero; - lock (this) - { - h = _handle; - _handle = IntPtr.Zero; - } - if (h != IntPtr.Zero) - c_api.TF_DeleteStatus(h); - GC.SuppressFinalize(this); - } - - ~Status() - { - Dispose(); - } + protected override void DisposeUnManagedState() + => c_api.TF_DeleteStatus(_handle); } } diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs index 6d1a0783..5faced62 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs @@ -29,10 +29,8 @@ namespace Tensorflow /// A tensor is a generalization of vectors and matrices to potentially higher dimensions. /// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes. /// - public partial class Tensor : IDisposable, ITensorOrOperation, _TensorLike + public partial class Tensor : DisposableObject, ITensorOrOperation, _TensorLike { - private IntPtr _handle; - private int _id; private Operation _op; @@ -394,26 +392,8 @@ namespace Tensorflow return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}"; } - public void Dispose() - { - IntPtr h = IntPtr.Zero; - lock (this) - { - h = _handle; - _handle = IntPtr.Zero; - } - if (h != IntPtr.Zero) - c_api.TF_DeleteTensor(h); - GC.SuppressFinalize(this); - } - - /// - /// Dispose the tensor when it gets garbage collected - /// - ~Tensor() - { - Dispose(); - } + protected override void DisposeUnManagedState() + => c_api.TF_DeleteTensor(_handle); public bool IsDisposed { diff --git a/test/TensorFlowNET.UnitTest/ConstantTest.cs b/test/TensorFlowNET.UnitTest/ConstantTest.cs index 752d6d50..e16ba6a9 100644 --- a/test/TensorFlowNET.UnitTest/ConstantTest.cs +++ b/test/TensorFlowNET.UnitTest/ConstantTest.cs @@ -1,6 +1,8 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using NumSharp; +using System; using System.Linq; +using System.Runtime.InteropServices; using Tensorflow; using static Tensorflow.Python; @@ -184,9 +186,9 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void StringEncode() { - /*string str = "Hello, TensorFlow.NET!"; + string str = "Hello, TensorFlow.NET!"; var handle = Marshal.StringToHGlobalAnsi(str); - ulong dst_len = c_api.TF_StringEncodedSize((UIntPtr)str.Length); + ulong dst_len = (ulong)c_api.TF_StringEncodedSize((UIntPtr)str.Length); Assert.AreEqual(dst_len, (ulong)23); IntPtr dst = Marshal.AllocHGlobal((int)dst_len); ulong encoded_len = c_api.TF_StringEncode(handle, (ulong)str.Length, dst, dst_len, status); @@ -194,7 +196,7 @@ namespace TensorFlowNET.UnitTest Assert.AreEqual(status.Code, TF_Code.TF_OK); string encoded_str = Marshal.PtrToStringUTF8(dst + sizeof(byte)); Assert.AreEqual(encoded_str, str); - Assert.AreEqual(str.Length, Marshal.ReadByte(dst));*/ + Assert.AreEqual(str.Length, Marshal.ReadByte(dst)); //c_api.TF_StringDecode(dst, (ulong)str.Length, IntPtr.Zero, ref dst_len, status); } diff --git a/test/TensorFlowNET.UnitTest/TensorTest.cs b/test/TensorFlowNET.UnitTest/TensorTest.cs index 6008a809..419ab4be 100644 --- a/test/TensorFlowNET.UnitTest/TensorTest.cs +++ b/test/TensorFlowNET.UnitTest/TensorTest.cs @@ -12,7 +12,7 @@ namespace TensorFlowNET.UnitTest [TestClass] public class TensorTest : CApiTest { - [TestMethod] + //[TestMethod] public void TensorDeallocationThreadSafety() { var tensors = new Tensor[1000]; diff --git a/test/TensorFlowNET.UnitTest/VariableTest.cs b/test/TensorFlowNET.UnitTest/VariableTest.cs index a353bcc1..0b9a44d8 100644 --- a/test/TensorFlowNET.UnitTest/VariableTest.cs +++ b/test/TensorFlowNET.UnitTest/VariableTest.cs @@ -129,6 +129,7 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void Add() { + tf.Graph().as_default(); int result = 0; Tensor x = tf.Variable(10, name: "x"); diff --git a/test/TensorFlowNET.UnitTest/c_test_util.cs b/test/TensorFlowNET.UnitTest/c_test_util.cs index c75bc616..1b6909e7 100644 --- a/test/TensorFlowNET.UnitTest/c_test_util.cs +++ b/test/TensorFlowNET.UnitTest/c_test_util.cs @@ -37,14 +37,13 @@ namespace TensorFlowNET.UnitTest public static GraphDef GetGraphDef(Graph graph) { - var s = new Status(); - var buffer = new Buffer(); - c_api.TF_GraphToGraphDef(graph, buffer, s); - s.Check(); - var def = GraphDef.Parser.ParseFrom(buffer); - buffer.Dispose(); - s.Dispose(); - return def; + using (var s = new Status()) + using (var buffer = new Buffer()) + { + c_api.TF_GraphToGraphDef(graph, buffer, s); + s.Check(); + return GraphDef.Parser.ParseFrom(buffer); + } } public static bool IsAddN(NodeDef node_def, int n) From 8178785b973da988d2f0a508d2ea5fb2c47a5282 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Fri, 2 Aug 2019 22:36:37 -0500 Subject: [PATCH 06/13] rename with to tf_with, only use to construct graph purpose. --- src/KerasNET.Core/Model.cs | 4 +- src/TensorFlowNET.Core/APIs/tf.nn.cs | 4 +- src/TensorFlowNET.Core/Buffers/Buffer.cs | 4 +- .../_InitializeClustersOpFactory.cs | 2 +- src/TensorFlowNET.Core/DisposableObject.cs | 27 ++-- .../Framework/importer.py.cs | 2 +- .../Gradients/gradients_util.cs | 4 +- src/TensorFlowNET.Core/Gradients/math_grad.cs | 12 +- .../Keras/Layers/BatchNormalization.cs | 2 +- src/TensorFlowNET.Core/Keras/Layers/Layer.cs | 2 +- src/TensorFlowNET.Core/Layers/Layer.cs | 8 +- .../Operations/ControlFlows/CondContext.cs | 2 +- .../Distributions/distribution.py.cs | 2 +- .../Operations/Distributions/normal.py.cs | 4 +- .../Operations/Losses/losses_impl.py.cs | 6 +- .../Operations/NnOps/MaxPoolFunction.cs | 2 +- .../Operations/NnOps/rnn.cs | 4 +- .../Operations/OpDefLibrary.cs | 2 +- src/TensorFlowNET.Core/Operations/RNNCell.cs | 2 +- .../Operations/_GraphTensorArray.cs | 2 +- .../Operations/array_ops.py.cs | 28 ++-- .../Operations/check_ops.cs | 6 +- .../Operations/confusion_matrix.py.cs | 2 +- .../Operations/control_flow_ops.py.cs | 20 +-- .../Operations/embedding_ops.cs | 4 +- .../Operations/gen_image_ops.py.cs | 2 +- src/TensorFlowNET.Core/Operations/math_ops.cs | 26 ++-- .../Operations/nn_impl.py.cs | 10 +- src/TensorFlowNET.Core/Operations/nn_ops.cs | 10 +- .../Operations/random_ops.py.cs | 8 +- .../Operations/weights_broadcast_ops.cs | 2 +- src/TensorFlowNET.Core/Python.cs | 6 +- .../Sessions/BaseSession.cs | 4 +- .../Sessions/SessionOptions.cs | 4 +- src/TensorFlowNET.Core/Status/Status.cs | 4 +- src/TensorFlowNET.Core/Summaries/Summary.cs | 4 +- .../TensorFlowNET.Core.csproj | 9 +- .../Tensors/Tensor.Operators.cs | 2 +- src/TensorFlowNET.Core/Tensors/Tensor.cs | 13 +- src/TensorFlowNET.Core/Train/AdamOptimizer.cs | 6 +- src/TensorFlowNET.Core/Train/Optimizer.cs | 6 +- .../Train/Saving/BaseSaverBuilder.cs | 2 +- src/TensorFlowNET.Core/Train/SlotCreator.cs | 2 +- .../Variables/RefVariable.Operators.cs | 2 +- .../Variables/RefVariable.cs | 4 +- .../Variables/VariableScope.cs | 2 +- src/TensorFlowNET.Core/ops.py.cs | 2 +- .../FunctionApproximation.fs | 2 +- .../BasicModels/KMeansClustering.cs | 4 +- .../BasicModels/LinearRegression.cs | 4 +- .../BasicModels/LogisticRegression.cs | 4 +- .../BasicModels/NaiveBayesClassifier.cs | 4 +- .../BasicModels/NearestNeighbor.cs | 4 +- .../BasicModels/NeuralNetXor.cs | 8 +- .../TensorFlowNET.Examples/BasicOperations.cs | 4 +- test/TensorFlowNET.Examples/HelloWorld.cs | 4 +- .../ImageProcessing/DigitRecognitionCNN.cs | 22 +-- .../ImageProcessing/DigitRecognitionNN.cs | 4 +- .../ImageProcessing/DigitRecognitionRNN.cs | 4 +- .../ImageProcessing/ImageBackgroundRemoval.cs | 4 +- .../ImageRecognitionInception.cs | 30 ++-- .../ImageProcessing/InceptionArchGoogLeNet.cs | 35 ++--- .../ImageProcessing/ObjectDetection.cs | 20 +-- .../ImageProcessing/RetrainImageClassifier.cs | 86 ++++++----- .../TextProcessing/CnnTextClassification.cs | 4 +- .../TextProcessing/NER/LstmCrfNer.cs | 4 +- .../TextProcessing/Word2Vec.cs | 4 +- .../TextProcessing/cnn_models/CharCnn.cs | 22 +-- .../TextProcessing/cnn_models/VdCnn.cs | 28 ++-- .../TextProcessing/cnn_models/WordCnn.cs | 10 +- .../Basics/NegativeTests.cs | 6 +- test/TensorFlowNET.UnitTest/ConstantTest.cs | 20 +-- test/TensorFlowNET.UnitTest/GradientTest.cs | 11 +- test/TensorFlowNET.UnitTest/GraphTest.cs | 4 +- test/TensorFlowNET.UnitTest/NameScopeTest.cs | 4 +- .../TensorFlowNET.UnitTest/PlaceholderTest.cs | 4 +- test/TensorFlowNET.UnitTest/PythonTest.cs | 5 +- test/TensorFlowNET.UnitTest/SessionTest.cs | 4 +- test/TensorFlowNET.UnitTest/TensorTest.cs | 2 +- test/TensorFlowNET.UnitTest/TrainSaverTest.cs | 14 +- test/TensorFlowNET.UnitTest/VariableTest.cs | 37 +++-- .../control_flow_ops_test/CondTestCases.cs | 8 +- .../WhileContextTestCase.cs | 4 +- .../gradients_test/GradientsTest.cs | 30 ++-- .../nn_test/ZeroFractionTest.cs | 11 +- .../ops_test/ControlDependenciesTest.cs | 71 +++++---- .../ops_test/CreateOpFromTfOperationTest.cs | 135 ++++++++---------- 87 files changed, 478 insertions(+), 494 deletions(-) diff --git a/src/KerasNET.Core/Model.cs b/src/KerasNET.Core/Model.cs index b1e6de57..d1d05159 100644 --- a/src/KerasNET.Core/Model.cs +++ b/src/KerasNET.Core/Model.cs @@ -115,7 +115,7 @@ namespace Keras var init = tf.global_variables_initializer(); float loss_value = 0; - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { sess.run(init); var step = 0; @@ -133,7 +133,7 @@ namespace Keras Console.WriteLine($"Step {step} loss: {loss_value}"); } Console.WriteLine($"Final loss: {loss_value}"); - }); + } return loss_value; } diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index 0bc9d0e5..67efe726 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -136,7 +136,7 @@ namespace Tensorflow public static Tensor bias_add(Tensor value, RefVariable bias, string data_format = null, string name = null) { - return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => + return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => { name = scope; return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); @@ -169,7 +169,7 @@ namespace Tensorflow /// public static Tensor softmax_cross_entropy_with_logits(Tensor labels, Tensor logits, int dim = -1, string name = null) { - with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope => + tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope => { name = scope; labels = array_ops.stop_gradient(labels, name: "labels_stop_gradient"); diff --git a/src/TensorFlowNET.Core/Buffers/Buffer.cs b/src/TensorFlowNET.Core/Buffers/Buffer.cs index 753ad508..dbe576b8 100644 --- a/src/TensorFlowNET.Core/Buffers/Buffer.cs +++ b/src/TensorFlowNET.Core/Buffers/Buffer.cs @@ -66,7 +66,7 @@ namespace Tensorflow return buffer.Data; } - protected override void DisposeUnManagedState() - => c_api.TF_DeleteBuffer(_handle); + protected override void DisposeUnManagedState(IntPtr handle) + => c_api.TF_DeleteBuffer(handle); } } diff --git a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs index a6253520..8112708f 100644 --- a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs +++ b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs @@ -68,7 +68,7 @@ namespace Tensorflow.Clustering private Tensor _initialize() { - return with(ops.control_dependencies(new Operation[] + return tf_with(ops.control_dependencies(new Operation[] { check_ops.assert_positive(_num_remaining) }), delegate diff --git a/src/TensorFlowNET.Core/DisposableObject.cs b/src/TensorFlowNET.Core/DisposableObject.cs index b59e6aa0..7e416e6d 100644 --- a/src/TensorFlowNET.Core/DisposableObject.cs +++ b/src/TensorFlowNET.Core/DisposableObject.cs @@ -34,38 +34,27 @@ namespace Tensorflow _handle = handle; } - private bool disposedValue = false; // To detect redundant calls - protected virtual void DisposeManagedState() { } - protected abstract void DisposeUnManagedState(); + protected abstract void DisposeUnManagedState(IntPtr handle); protected virtual void Dispose(bool disposing) { - if (!disposedValue) + if (disposing) { - if (disposing) + // free unmanaged resources (unmanaged objects) and override a finalizer below. + if (_handle != IntPtr.Zero) { // dispose managed state (managed objects). DisposeManagedState(); - } - // free unmanaged resources (unmanaged objects) and override a finalizer below. - /*IntPtr h = IntPtr.Zero; - lock (this) - { - h = _handle; - _handle = IntPtr.Zero; - }*/ - if (_handle != IntPtr.Zero) - DisposeUnManagedState(); - - // set large fields to null. - _handle = IntPtr.Zero; + // set large fields to null. + DisposeUnManagedState(_handle); - disposedValue = true; + _handle = IntPtr.Zero; + } } } diff --git a/src/TensorFlowNET.Core/Framework/importer.py.cs b/src/TensorFlowNET.Core/Framework/importer.py.cs index 577d41aa..0c405be9 100644 --- a/src/TensorFlowNET.Core/Framework/importer.py.cs +++ b/src/TensorFlowNET.Core/Framework/importer.py.cs @@ -42,7 +42,7 @@ namespace Tensorflow string prefix = ""; var graph = ops.get_default_graph(); - with(ops.name_scope(name, "import", input_map.Values), scope => + tf_with(ops.name_scope(name, "import", input_map.Values), scope => { prefix = scope; /*if (!string.IsNullOrEmpty(prefix)) diff --git a/src/TensorFlowNET.Core/Gradients/gradients_util.cs b/src/TensorFlowNET.Core/Gradients/gradients_util.cs index 95f083da..43247fa4 100644 --- a/src/TensorFlowNET.Core/Gradients/gradients_util.cs +++ b/src/TensorFlowNET.Core/Gradients/gradients_util.cs @@ -55,7 +55,7 @@ namespace Tensorflow **/ var grads = new Dictionary>>(); - with(ops.name_scope(name, "gradients", + tf_with(ops.name_scope(name, "gradients", values: ys.Concat(xs).Concat(stop_gradients).Concat(grad_ys)), scope => { string grad_scope = scope; @@ -141,7 +141,7 @@ namespace Tensorflow } } - with(ops.name_scope(op.name + "_grad"), scope1 => + tf_with(ops.name_scope(op.name + "_grad"), scope1 => { string name1 = scope1; if (grad_fn != null) diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index a84185f3..a5ac79ba 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -90,7 +90,7 @@ namespace Tensorflow.Gradients { var grad = grads[0]; var y = op.outputs[0]; // y = e^x - return with(ops.control_dependencies(new Operation[] { grad }), dp => { + return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { y = math_ops.conj(y); return new Tensor[] { math_ops.mul_no_nan(y, grad) }; }); @@ -107,7 +107,7 @@ namespace Tensorflow.Gradients { var grad = grads[0]; var x = op.inputs[0]; - return with(ops.control_dependencies(new Operation[] { grad }), dp => { + return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { x = math_ops.conj(x); return new Tensor[] { grad * math_ops.digamma(x) }; }); @@ -118,7 +118,7 @@ namespace Tensorflow.Gradients { var grad = grads[0]; var x = op.inputs[0]; - return with(ops.control_dependencies(new Operation[] { grad }), dp => { + return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { x = math_ops.conj(x); return new Tensor[] { grad * math_ops.reciprocal(x) }; }); @@ -431,7 +431,7 @@ namespace Tensorflow.Gradients var grad = grads[0]; var y = op.outputs[0]; - return with(ops.control_dependencies(grads), delegate + return tf_with(ops.control_dependencies(grads), delegate { y = math_ops.conj(y); return new Tensor[] { gen_math_ops.sigmoid_grad(y, grad) }; @@ -453,7 +453,7 @@ namespace Tensorflow.Gradients var grad = grads[0]; var x = op.inputs[0]; - return with(ops.control_dependencies(grads), delegate + return tf_with(ops.control_dependencies(grads), delegate { x = math_ops.conj(x); var y = constant_op.constant(2.0f, dtype: x.dtype); @@ -467,7 +467,7 @@ namespace Tensorflow.Gradients var grad = grads[0]; var y = op.outputs[0]; - return with(ops.control_dependencies(grads), delegate + return tf_with(ops.control_dependencies(grads), delegate { y = math_ops.conj(y); return new Tensor[] { gen_math_ops.tanh_grad(y, grad) }; diff --git a/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs b/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs index d10b1874..52dc7bf4 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs @@ -207,7 +207,7 @@ namespace Tensorflow.Keras.Layers public Tensor _assign_moving_average(RefVariable variable, Tensor value, Tensor momentum) { - return Python.with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope => + return Python.tf_with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope => { // var cm = ops.colocate_with(variable); var decay = ops.convert_to_tensor(1.0f - momentum, name: "decay"); diff --git a/src/TensorFlowNET.Core/Keras/Layers/Layer.cs b/src/TensorFlowNET.Core/Keras/Layers/Layer.cs index f380ce78..d96c1f14 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/Layer.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/Layer.cs @@ -125,7 +125,7 @@ namespace Tensorflow.Keras.Layers // Symbolic execution on symbolic tensors. We will attempt to build // the corresponding TF subgraph inside `backend.get_graph()` var graph = backend.get_graph().as_default(); - with(ops.name_scope(_name_scope()), delegate + tf_with(ops.name_scope(_name_scope()), delegate { // Build layer if applicable (if the `build` method has been // overridden). diff --git a/src/TensorFlowNET.Core/Layers/Layer.cs b/src/TensorFlowNET.Core/Layers/Layer.cs index 57c71e92..961952a6 100644 --- a/src/TensorFlowNET.Core/Layers/Layer.cs +++ b/src/TensorFlowNET.Core/Layers/Layer.cs @@ -72,7 +72,7 @@ namespace Tensorflow.Layers } Tensor outputs = null; - with(scope_context_manager, scope2 => + tf_with(scope_context_manager, scope2 => { _current_scope = scope2; // Actually call layer @@ -136,12 +136,12 @@ namespace Tensorflow.Layers _set_scope(); var reuse = built || (_reuse != null && _reuse.Value); - return with(tf.variable_scope(_scope, + return tf_with(tf.variable_scope(_scope, reuse: reuse, auxiliary_name_scope: false), scope => { _current_scope = scope; - return with(ops.name_scope(_name_scope()), delegate + return tf_with(ops.name_scope(_name_scope()), delegate { var variable = base.add_weight(name, shape, @@ -183,7 +183,7 @@ namespace Tensorflow.Layers } else { - with(tf.variable_scope(scope, default_name: _base_name), captured_scope => + tf_with(tf.variable_scope(scope, default_name: _base_name), captured_scope => { // convert variable_scope to VariableScope _scope = captured_scope; diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs index c6b7d24d..136c9e3b 100644 --- a/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs +++ b/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs @@ -122,7 +122,7 @@ namespace Tensorflow.Operations _external_values[result.name] = result; } - with(ops.control_dependencies(null), ctrl => + tf_with(ops.control_dependencies(null), ctrl => { var results = control_flow_ops._SwitchRefOrTensor(result, _pred); result = results[_branch]; diff --git a/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs b/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs index 19a1266b..69affeea 100644 --- a/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs +++ b/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs @@ -58,7 +58,7 @@ namespace Tensorflow private Tensor _call_log_prob (Tensor value, string name) { - return with(ops.name_scope(name, "moments", new { value }), scope => + return tf_with(ops.name_scope(name, "moments", new { value }), scope => { try { diff --git a/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs b/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs index 2aa15063..f4f4b4bf 100644 --- a/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs +++ b/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs @@ -50,9 +50,9 @@ namespace Tensorflow parameters.Add("validate_args", validate_args); parameters.Add("allow_nan_stats", allow_nan_stats); - with(ops.name_scope(name, "", new { loc, scale }), scope => + tf_with(ops.name_scope(name, "", new { loc, scale }), scope => { - with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd => + tf_with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd => { this._loc = array_ops.identity(loc, name); this._scale = array_ops.identity(scale, name); diff --git a/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs b/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs index 9e530a12..f8ed0446 100644 --- a/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs @@ -24,7 +24,7 @@ namespace Tensorflow public Tensor compute_weighted_loss(Tensor losses, Tensor weights = null, string scope = null, string loss_collection = ops.GraphKeys.LOSSES, string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { - return with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate + return tf_with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate { // Save the `reduction` argument for loss normalization when distributing // to multiple replicas. Used only for estimator + v1 optimizer flow. @@ -77,7 +77,7 @@ namespace Tensorflow public Tensor _num_present(Tensor losses, Tensor weights, bool per_batch = false) { - return with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope => + return tf_with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope => { string scope = name_scope; weights = math_ops.cast(weights, dtype: dtypes.float32); @@ -104,7 +104,7 @@ namespace Tensorflow string loss_collection= ops.GraphKeys.LOSSES, string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { - return with(ops.name_scope(scope, + return tf_with(ops.name_scope(scope, "sparse_softmax_cross_entropy_loss", (logits, labels, weights)), name_scope => diff --git a/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs b/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs index 9d1e5726..b385f9c8 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs @@ -30,7 +30,7 @@ namespace Tensorflow.Operations string data_format = "NHWC", string name = null) { - return with(ops.name_scope(name, "MaxPool", value), scope => + return tf_with(ops.name_scope(name, "MaxPool", value), scope => { name = scope; value = ops.convert_to_tensor(value, name: "input"); diff --git a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs index 3200e13f..5b820b3a 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs @@ -29,7 +29,7 @@ namespace Tensorflow.Operations TF_DataType dtype = TF_DataType.DtInvalid, int? parallel_iterations = null, bool swap_memory = false, bool time_major = false) { - with(tf.variable_scope("rnn"), scope => + tf_with(tf.variable_scope("rnn"), scope => { VariableScope varscope = scope; var flat_input = nest.flatten(inputs_tensor); @@ -139,7 +139,7 @@ namespace Tensorflow.Operations var time = array_ops.constant(0, dtype: dtypes.int32, name: "time"); string base_name = null; - with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); + tf_with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); Func _create_ta = (name, element_shape, dtype_) => { diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 033731b0..ab34a320 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -58,7 +58,7 @@ namespace Tensorflow var input_types = new List(); object values = null; - return with(ops.name_scope(name), scope => + return tf_with(ops.name_scope(name), scope => { var inferred_from = new Dictionary(); var base_types = new List(); diff --git a/src/TensorFlowNET.Core/Operations/RNNCell.cs b/src/TensorFlowNET.Core/Operations/RNNCell.cs index 57f46e7b..1b260981 100644 --- a/src/TensorFlowNET.Core/Operations/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/RNNCell.cs @@ -82,7 +82,7 @@ namespace Tensorflow { Tensor output = null; var state_size = this.state_size; - with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate + tf_with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate { output = _zero_state_tensors(state_size, batch_size, dtype); }); diff --git a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs index b4619c05..bbeee929 100644 --- a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs @@ -66,7 +66,7 @@ namespace Tensorflow.Operations _element_shape = new List { }; } - with(ops.name_scope(name, "", new { handle, size, flow }), scope => + tf_with(ops.name_scope(name, "", new { handle, size, flow }), scope => { if(handle != null) { diff --git a/src/TensorFlowNET.Core/Operations/array_ops.py.cs b/src/TensorFlowNET.Core/Operations/array_ops.py.cs index c3f52cb8..2e909ab8 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.py.cs @@ -43,7 +43,7 @@ namespace Tensorflow public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "zeros", shape), scope => + return tf_with(ops.name_scope(name, "zeros", shape), scope => { name = scope; switch (dtype) @@ -67,7 +67,7 @@ namespace Tensorflow public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "zeros", shape), scope => + return tf_with(ops.name_scope(name, "zeros", shape), scope => { name = scope; switch (dtype) @@ -140,7 +140,7 @@ namespace Tensorflow { var must_pack = false; var converted_elems = new List(); - return with(ops.name_scope(name), scope => + return tf_with(ops.name_scope(name), scope => { foreach (var (i, elem) in enumerate(list_or_tuple)) { @@ -189,7 +189,7 @@ namespace Tensorflow public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true) { - return with(ops.name_scope(name, "Rank", new List { input }), scope => + return tf_with(ops.name_scope(name, "Rank", new List { input }), scope => { name = scope; var input_tensor = ops.convert_to_tensor(input); @@ -217,7 +217,7 @@ namespace Tensorflow private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true) { - return with(ops.name_scope(name, "ones_like", new { tensor }), scope => + return tf_with(ops.name_scope(name, "ones_like", new { tensor }), scope => { name = scope; var tensor1 = ops.convert_to_tensor(tensor, name: "tensor"); @@ -233,7 +233,7 @@ namespace Tensorflow public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "ones", new { shape }), scope => + return tf_with(ops.name_scope(name, "ones", new { shape }), scope => { name = scope; var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name); @@ -244,7 +244,7 @@ namespace Tensorflow public static Tensor ones(Tensor[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "ones", new { shape }), scope => + return tf_with(ops.name_scope(name, "ones", new { shape }), scope => { name = scope; var output = _constant_if_small(1, shape[0]); @@ -257,7 +257,7 @@ namespace Tensorflow public static Tensor ones(int[] dims, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "ones", new { dims }), scope => + return tf_with(ops.name_scope(name, "ones", new { dims }), scope => { name = scope; var shape = ops.convert_to_tensor(dims, dtype: TF_DataType.TF_INT32); @@ -273,7 +273,7 @@ namespace Tensorflow int axis = -1, string name = null) { - return with(ops.name_scope(name, "one_hot", new { indices, depth, dtype }), scope => + return tf_with(ops.name_scope(name, "one_hot", new { indices, depth, dtype }), scope => { name = scope; var on_exists = false; @@ -341,7 +341,7 @@ namespace Tensorflow private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) { - return with(ops.name_scope(name, "Shape", new { input }), scope => + return tf_with(ops.name_scope(name, "Shape", new { input }), scope => { name = scope; @@ -362,7 +362,7 @@ namespace Tensorflow private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) { - return with(ops.name_scope(name, "Size", new { input }), scope => + return tf_with(ops.name_scope(name, "Size", new { input }), scope => { name = scope; @@ -382,7 +382,7 @@ namespace Tensorflow public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) { - return with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => + return tf_with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => { name = scope; tensor = ops.convert_to_tensor(tensor, name: "tensor"); @@ -516,7 +516,7 @@ namespace Tensorflow { if(values.Length == 1) // Degenerate case of one tensor. { - return with(ops.name_scope(name), scope => { + return tf_with(ops.name_scope(name), scope => { var t = ops.convert_to_tensor(axis, name: "concat_dim", dtype: TF_DataType.TF_INT32); return identity(values[0], name: scope); }); @@ -535,7 +535,7 @@ namespace Tensorflow public static Tensor transpose(T1 a, T2 perm, string name = "transpose", bool conjugate = false) { - return with(ops.name_scope(name, "transpose", new { a }), scope => + return tf_with(ops.name_scope(name, "transpose", new { a }), scope => { return gen_array_ops.transpose(a, perm, name: scope); }); diff --git a/src/TensorFlowNET.Core/Operations/check_ops.cs b/src/TensorFlowNET.Core/Operations/check_ops.cs index d5d76dd3..06b648b6 100644 --- a/src/TensorFlowNET.Core/Operations/check_ops.cs +++ b/src/TensorFlowNET.Core/Operations/check_ops.cs @@ -31,7 +31,7 @@ namespace Tensorflow if (message == null) message = ""; - return with(ops.name_scope(name, "assert_equal", new { t1, t2, data }), delegate + return tf_with(ops.name_scope(name, "assert_equal", new { t1, t2, data }), delegate { var x = ops.convert_to_tensor(t1, name: "x"); var y = ops.convert_to_tensor(t2, name: "y"); @@ -62,7 +62,7 @@ namespace Tensorflow if (message == null) message = ""; - return with(ops.name_scope(name, "assert_positive", new { x, data }), delegate + return tf_with(ops.name_scope(name, "assert_positive", new { x, data }), delegate { x = ops.convert_to_tensor(x, name: "x"); if (data == null) @@ -86,7 +86,7 @@ namespace Tensorflow if (message == null) message = ""; - return with(ops.name_scope(name, "assert_less", new { x, y, data }), delegate + return tf_with(ops.name_scope(name, "assert_less", new { x, y, data }), delegate { x = ops.convert_to_tensor(x, name: "x"); y = ops.convert_to_tensor(y, name: "y"); diff --git a/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs b/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs index be5ca217..680b500c 100644 --- a/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs +++ b/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs @@ -34,7 +34,7 @@ namespace Tensorflow int expected_rank_diff = 0, string name = null) { - return with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate + return tf_with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate { predictions = ops.convert_to_tensor(predictions); labels = ops.convert_to_tensor(labels); diff --git a/src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs b/src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs index 39a4538b..2717fd3e 100644 --- a/src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs @@ -28,7 +28,7 @@ namespace Tensorflow { public static Operation Assert(Tensor condition, object[] data, int? summarize = null, string name = null) { - return with(ops.name_scope(name, "Assert", new { condition, data }), scope => + return tf_with(ops.name_scope(name, "Assert", new { condition, data }), scope => { name = scope; var xs = ops.convert_n_to_tensor(data); @@ -53,7 +53,7 @@ namespace Tensorflow public static Operation group(T[] inputs, string name = null) where T : ITensorOrOperation { - return with(ops.name_scope(name, "group_deps", inputs), scope => + return tf_with(ops.name_scope(name, "group_deps", inputs), scope => { name = scope; @@ -91,7 +91,7 @@ namespace Tensorflow private static Operation _GroupControlDeps(string dev, Operation[] deps, string name = null) { - return with(ops.control_dependencies(deps), ctl => + return tf_with(ops.control_dependencies(deps), ctl => { if (dev == null) { @@ -135,7 +135,7 @@ namespace Tensorflow public static Tensor[] tuple(Tensor[] tensors, string name = null, Operation[] control_inputs = null) { - return with(ops.name_scope(name, "tuple", tensors), scope => + return tf_with(ops.name_scope(name, "tuple", tensors), scope => { name = scope; var gating_ops = tensors.Where(x => x != null).Select(x => x.op).ToList(); @@ -189,13 +189,13 @@ namespace Tensorflow values.AddRange(dependencies); values.Add(output_tensor); - return with(ops.name_scope(name, "control_dependency", values), scope => + return tf_with(ops.name_scope(name, "control_dependency", values), scope => { name = scope; // TODO: missing original code //with ops.colocate_with(output_tensor): { - return with(ops.control_dependencies(dependencies), ctl => + return tf_with(ops.control_dependencies(dependencies), ctl => { output_tensor = ops.convert_to_tensor_or_composite(output_tensor); return _Identity(output_tensor, name: name); @@ -306,7 +306,7 @@ namespace Tensorflow bool strict = false, string name = null) { - return with(ops.name_scope(name, "cond", new { pred }), delegate + return tf_with(ops.name_scope(name, "cond", new { pred }), delegate { // TODO: here a chunk of original code is missing /* @@ -398,7 +398,7 @@ namespace Tensorflow bool strict = false, string name = null) { - return with(ops.name_scope(name, "cond", new { pred }), delegate + return tf_with(ops.name_scope(name, "cond", new { pred }), delegate { // Add the Switch to the graph. var switch_result = @switch(pred, pred); @@ -467,7 +467,7 @@ namespace Tensorflow { if (inputs.Any(x => x == null)) throw new ValueError($"At least one of the merge inputs is null: {inputs}"); - return with(ops.name_scope(name, "Merge", inputs), scope => + return tf_with(ops.name_scope(name, "Merge", inputs), scope => { name = scope; inputs = inputs.Select(inp => @@ -489,7 +489,7 @@ namespace Tensorflow TF_DataType dtype = TF_DataType.DtInvalid, string name = null) { - return with(ops.name_scope(name, "Switch", new { data, pred }), scope => + return tf_with(ops.name_scope(name, "Switch", new { data, pred }), scope => { name = scope; data = ops.internal_convert_to_tensor_or_indexed_slices(data, diff --git a/src/TensorFlowNET.Core/Operations/embedding_ops.cs b/src/TensorFlowNET.Core/Operations/embedding_ops.cs index e52107dc..23864329 100644 --- a/src/TensorFlowNET.Core/Operations/embedding_ops.cs +++ b/src/TensorFlowNET.Core/Operations/embedding_ops.cs @@ -35,7 +35,7 @@ namespace Tensorflow string name = null, string max_norm = null) { - return with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => + return tf_with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => { name = scope; int np = 1; @@ -58,7 +58,7 @@ namespace Tensorflow string name = null, string max_norm = null) { - return with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => + return tf_with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => { name = scope; int np = @params.Length; diff --git a/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs b/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs index dee089ea..dc7188a8 100644 --- a/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs @@ -28,7 +28,7 @@ namespace Tensorflow if (dtype == image.dtype) return array_ops.identity(image, name: name); - return with(ops.name_scope(name, "convert_image", image), scope => + return tf_with(ops.name_scope(name, "convert_image", image), scope => { name = scope; diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index fc8f08ac..a5d26b23 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -29,7 +29,7 @@ namespace Tensorflow { public static Tensor abs(Tensor x, string name = null) { - return with(ops.name_scope(name, "Abs", new { x }), scope => + return tf_with(ops.name_scope(name, "Abs", new { x }), scope => { x = ops.convert_to_tensor(x, name: "x"); if (x.dtype.is_complex()) @@ -69,7 +69,7 @@ namespace Tensorflow if(base_type == x.dtype) return x; - return with(ops.name_scope(name, "Cast", new { x }), scope => + return tf_with(ops.name_scope(name, "Cast", new { x }), scope => { name = scope; x = ops.convert_to_tensor(x, name: "x"); @@ -82,7 +82,7 @@ namespace Tensorflow public static Tensor cumsum(Tensor x, int axis = 0, bool exclusive = false, bool reverse = false, string name = null) { - return with(ops.name_scope(name, "Cumsum", new {x}), scope => + return tf_with(ops.name_scope(name, "Cumsum", new {x}), scope => { name = scope; x = ops.convert_to_tensor(x, name: "x"); @@ -110,7 +110,7 @@ namespace Tensorflow /// `x / y` returns the quotient of x and y. public static Tensor div(Tensor x, Tensor y, string name = null) { - return with(ops.name_scope(name, "div", (x, y)), name_scope => + return tf_with(ops.name_scope(name, "div", (x, y)), name_scope => { name = name_scope; x = ops.convert_to_tensor(x, name: "x"); @@ -146,7 +146,7 @@ namespace Tensorflow /// public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) { - return with(ops.name_scope(name, "div_no_nan", (x, y)), name_scope => + return tf_with(ops.name_scope(name, "div_no_nan", (x, y)), name_scope => { name = name_scope; x = ops.convert_to_tensor(x, name: "x"); @@ -229,7 +229,7 @@ namespace Tensorflow public static Tensor sign(Tensor x, string name = null) { - return with(ops.name_scope(name, "Sign", new {x}), scope => + return tf_with(ops.name_scope(name, "Sign", new {x}), scope => { x = ops.convert_to_tensor(x, name: "x"); return gen_math_ops.sign(x); @@ -337,7 +337,7 @@ namespace Tensorflow /// The reduced tensor. public static Tensor reduce_logsumexp(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) { - return with(ops.name_scope(name, "ReduceLogSumExp", new { input_tensor }), scope => + return tf_with(ops.name_scope(name, "ReduceLogSumExp", new { input_tensor }), scope => { var raw_max = reduce_max(input_tensor, axis, true); var my_max = array_ops.stop_gradient(array_ops.where(gen_math_ops.is_finite(raw_max), raw_max, array_ops.zeros_like(raw_max))); @@ -497,7 +497,7 @@ namespace Tensorflow if (delta == null) delta = 1; - return with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => + return tf_with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => { name = scope; var start1 = ops.convert_to_tensor(start, name: "start"); @@ -510,7 +510,7 @@ namespace Tensorflow public static Tensor floordiv(Tensor x, Tensor y, string name = null) { - return with(ops.name_scope(name, "floordiv", new { x, y }), scope => + return tf_with(ops.name_scope(name, "floordiv", new { x, y }), scope => { return gen_math_ops.floor_div(x, y, scope); }); @@ -527,7 +527,7 @@ namespace Tensorflow { Tensor result = null; - with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => + tf_with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => { name = scope; @@ -551,7 +551,7 @@ namespace Tensorflow { Tensor result = null; - with(ops.name_scope(name, "MatMul", new Tensor[] { x, y }), scope => + tf_with(ops.name_scope(name, "MatMul", new Tensor[] { x, y }), scope => { name = scope; @@ -576,7 +576,7 @@ namespace Tensorflow if (dt.is_floating() || dt.is_integer()) return x; - return with(ops.name_scope(name, "Conj", new List { x }), scope => + return tf_with(ops.name_scope(name, "Conj", new List { x }), scope => { return x; @@ -591,7 +591,7 @@ namespace Tensorflow public static Tensor _truediv_python3(Tensor x, Tensor y, string name = null) { - return with(ops.name_scope(name, "truediv", new { x, y }), scope => + return tf_with(ops.name_scope(name, "truediv", new { x, y }), scope => { name = scope; var x_dtype = x.dtype.as_base_dtype(); diff --git a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs index 7555f1cd..84fb5486 100644 --- a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs @@ -34,7 +34,7 @@ namespace Tensorflow float epsilon = 1e-12f, string name = null) { - return with(ops.name_scope(name, "l2_normalize", new { x }), scope => + return tf_with(ops.name_scope(name, "l2_normalize", new { x }), scope => { x = ops.convert_to_tensor(x, name: "x"); var sq = math_ops.square(x); @@ -57,7 +57,7 @@ namespace Tensorflow string name = null, bool keep_dims = false) { - return with(ops.name_scope(name, "moments", new { x, axes }), scope => + return tf_with(ops.name_scope(name, "moments", new { x, axes }), scope => { // The dynamic range of fp16 is too limited to support the collection of // sufficient statistics. As a workaround we simply perform the operations @@ -123,7 +123,7 @@ namespace Tensorflow /// number of nonzero values with type dtype private static Tensor _count_nonzero(Tensor input_tensor, TF_DataType dtype = TF_DataType.TF_INT64) { - return with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => + return tf_with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => { var zero = array_ops.zeros(new NumSharp.Shape(), dtype: input_tensor.dtype); var nonzero_count = math_ops.reduce_sum( @@ -140,7 +140,7 @@ namespace Tensorflow /// The fraction of zeros in value, with type float32. public static Tensor zero_fraction(Tensor value, string name = null) { - return with(ops.name_scope(name, "zero_fraction", new { value }), scope => + return tf_with(ops.name_scope(name, "zero_fraction", new { value }), scope => { value = ops.convert_to_tensor(value, name: "value"); Tensor size = array_ops.size(value, out_type: dtypes.int64); @@ -153,7 +153,7 @@ namespace Tensorflow () => _count_nonzero(value, dtype: dtypes.int64) ); - with(ops.name_scope("counts_to_fraction"), count_scope => + tf_with(ops.name_scope("counts_to_fraction"), count_scope => { var num_zero = math_ops.subtract(math_ops.cast(size, TF_DataType.TF_INT64), num_nonzero); var num_zero_float32 = math_ops.cast(num_zero, dtype: dtypes.float32); diff --git a/src/TensorFlowNET.Core/Operations/nn_ops.cs b/src/TensorFlowNET.Core/Operations/nn_ops.cs index 0f7efce3..b3dda42f 100644 --- a/src/TensorFlowNET.Core/Operations/nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/nn_ops.cs @@ -50,7 +50,7 @@ namespace Tensorflow string data_format = null, string name = null) { - return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => + return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => { name = scope; value = ops.convert_to_tensor(value, name: "input"); @@ -70,7 +70,7 @@ namespace Tensorflow /// public static Tensor dropout_v2(Tensor x, Tensor rate, Tensor noise_shape = null, int? seed = null, string name = null) { - return with(ops.name_scope(name, "dropout", x), scope => + return tf_with(ops.name_scope(name, "dropout", x), scope => { name = scope; x = ops.convert_to_tensor(x, name: "x"); @@ -134,7 +134,7 @@ namespace Tensorflow /// public static Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null) { - return with(ops.name_scope(name, "MaxPool", value), scope => + return tf_with(ops.name_scope(name, "MaxPool", value), scope => { name = scope; value = ops.convert_to_tensor(value, name: "input"); @@ -171,7 +171,7 @@ namespace Tensorflow Tensor logits = null, string name = null) { // Reshape logits and labels to rank 2. - return with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate + return tf_with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate { labels = ops.convert_to_tensor(labels); logits = ops.convert_to_tensor(logits); @@ -206,7 +206,7 @@ namespace Tensorflow int axis = -1, string name = null) { - return with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { logits, labels }), scope => + return tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { logits, labels }), scope => { name = scope; var precise_logits = logits; diff --git a/src/TensorFlowNET.Core/Operations/random_ops.py.cs b/src/TensorFlowNET.Core/Operations/random_ops.py.cs index 37ede456..3232c917 100644 --- a/src/TensorFlowNET.Core/Operations/random_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/random_ops.py.cs @@ -37,7 +37,7 @@ namespace Tensorflow int? seed = null, string name = null) { - return with(ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope => + return tf_with(ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope => { var shape_tensor = _ShapeTensor(shape); var mean_tensor = ops.convert_to_tensor(mean, dtype: dtype, name: "mean"); @@ -67,7 +67,7 @@ namespace Tensorflow int? seed = null, string name = null) { - return with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => + return tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => { name = scope; var tensorShape = _ShapeTensor(shape); @@ -85,7 +85,7 @@ namespace Tensorflow int? seed = null, string name = null) { - return with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => + return tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => { name = scope; var minTensor = ops.convert_to_tensor(minval, dtype: dtype, name: "min"); @@ -110,7 +110,7 @@ namespace Tensorflow int? seed = null, string name = null) { - return with(ops.name_scope(name, "truncated_normal", new { shape, mean, stddev }), scope => + return tf_with(ops.name_scope(name, "truncated_normal", new { shape, mean, stddev }), scope => { name = scope; var shape_tensor = _ShapeTensor(shape); diff --git a/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs b/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs index 7b889bf8..d8bfcbac 100644 --- a/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs +++ b/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs @@ -22,7 +22,7 @@ namespace Tensorflow { public static Tensor broadcast_weights(Tensor weights, Tensor values) { - return with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope => + return tf_with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope => { values = ops.convert_to_tensor(values, name: "values"); weights = ops.convert_to_tensor( diff --git a/src/TensorFlowNET.Core/Python.cs b/src/TensorFlowNET.Core/Python.cs index 1d205fde..6565037b 100644 --- a/src/TensorFlowNET.Core/Python.cs +++ b/src/TensorFlowNET.Core/Python.cs @@ -75,7 +75,7 @@ namespace Tensorflow } [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception - public static void with(IPython py, Action action) + public static void tf_with(IPython py, Action action) { try { @@ -95,7 +95,7 @@ namespace Tensorflow } [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception - public static void with(T py, Action action) where T : IPython + public static void tf_with(T py, Action action) where T : IPython { try { @@ -115,7 +115,7 @@ namespace Tensorflow } [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception - public static TOut with(TIn py, Func action) where TIn : IPython + public static TOut tf_with(TIn py, Func action) where TIn : IPython { try { diff --git a/src/TensorFlowNET.Core/Sessions/BaseSession.cs b/src/TensorFlowNET.Core/Sessions/BaseSession.cs index 1b807a97..fa3947f5 100644 --- a/src/TensorFlowNET.Core/Sessions/BaseSession.cs +++ b/src/TensorFlowNET.Core/Sessions/BaseSession.cs @@ -326,11 +326,11 @@ namespace Tensorflow Dispose(); } - protected override void DisposeUnManagedState() + protected override void DisposeUnManagedState(IntPtr handle) { using (var status = new Status()) { - c_api.TF_DeleteSession(_handle, status); + c_api.TF_DeleteSession(handle, status); status.Check(true); } } diff --git a/src/TensorFlowNET.Core/Sessions/SessionOptions.cs b/src/TensorFlowNET.Core/Sessions/SessionOptions.cs index 21604495..8e0a0a74 100644 --- a/src/TensorFlowNET.Core/Sessions/SessionOptions.cs +++ b/src/TensorFlowNET.Core/Sessions/SessionOptions.cs @@ -32,8 +32,8 @@ namespace Tensorflow _handle = handle; } - protected override void DisposeUnManagedState() - => c_api.TF_DeleteSessionOptions(_handle); + protected override void DisposeUnManagedState(IntPtr handle) + => c_api.TF_DeleteSessionOptions(handle); public void SetConfig(ConfigProto config) { diff --git a/src/TensorFlowNET.Core/Status/Status.cs b/src/TensorFlowNET.Core/Status/Status.cs index fde0bcee..7eb2d7e3 100644 --- a/src/TensorFlowNET.Core/Status/Status.cs +++ b/src/TensorFlowNET.Core/Status/Status.cs @@ -65,7 +65,7 @@ namespace Tensorflow return status._handle; } - protected override void DisposeUnManagedState() - => c_api.TF_DeleteStatus(_handle); + protected override void DisposeUnManagedState(IntPtr handle) + => c_api.TF_DeleteStatus(handle); } } diff --git a/src/TensorFlowNET.Core/Summaries/Summary.cs b/src/TensorFlowNET.Core/Summaries/Summary.cs index 5a22385f..258edf88 100644 --- a/src/TensorFlowNET.Core/Summaries/Summary.cs +++ b/src/TensorFlowNET.Core/Summaries/Summary.cs @@ -55,7 +55,7 @@ namespace Tensorflow.Summaries /// public Tensor merge(Tensor[] inputs, string[] collections = null, string name = null) { - return with(ops.name_scope(name, "Merge", inputs), delegate + return tf_with(ops.name_scope(name, "Merge", inputs), delegate { var val = gen_logging_ops.merge_summary(inputs: inputs, name: name); collect(val, collections?.ToList(), new List()); @@ -88,7 +88,7 @@ namespace Tensorflow.Summaries public (string, string) summary_scope(string name, string family = null, string default_name = null, Tensor[] values = null) { string scope_base_name = string.IsNullOrEmpty(family) ? name : $"{family}/{name}"; - return with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => + return tf_with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => { var tag = scope._name_scope; if (string.IsNullOrEmpty(family)) diff --git a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj index b7d0d36c..007eccc8 100644 --- a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj +++ b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj @@ -5,7 +5,7 @@ TensorFlow.NET Tensorflow 1.14.0 - 0.10.7.2 + 0.10.8 Haiping Chen, Meinrad Recheis SciSharp STACK true @@ -17,7 +17,7 @@ TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C# Google's TensorFlow full binding in .NET Standard. Docs: https://tensorflownet.readthedocs.io - 0.10.7.2 + 0.10.8.0 Changes since v0.9.0: 1. Added full connected Convolution Neural Network example. @@ -34,9 +34,10 @@ Docs: https://tensorflownet.readthedocs.io 12. Add Tensor operator overloads. 13. Fix default graph and operation issue when import model. 14. Fix TF_String endcode and decode. -15. Fix Tensor memory leak. +15. Fix Tensor memory leak. +16. Rename with to tf_with that is only used to build graph purpose. 7.2 - 0.10.7.2 + 0.10.8.0 LICENSE true true diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs index de26e28b..4bd32d74 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs @@ -123,7 +123,7 @@ namespace Tensorflow dtype = tr.dtype.as_base_dtype(); var namescope = ops.name_scope(null, name, new { x, y }); - return with(namescope, scope => + return tf_with(namescope, scope => { Tensor result = null; var x1 = ops.convert_to_tensor(x, dtype: dtype, name: "x"); diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs index 5faced62..50141be6 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs @@ -300,7 +300,7 @@ namespace Tensorflow index += 1; } - return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => + return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => { string name = scope; if (begin != null) @@ -349,7 +349,7 @@ namespace Tensorflow index += 1; } - return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => + return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => { string name = scope; if (begin != null) @@ -392,8 +392,13 @@ namespace Tensorflow return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}"; } - protected override void DisposeUnManagedState() - => c_api.TF_DeleteTensor(_handle); + protected override void DisposeUnManagedState(IntPtr handle) + { + if(handle != IntPtr.Zero) + { + c_api.TF_DeleteTensor(handle); + } + } public bool IsDisposed { diff --git a/src/TensorFlowNET.Core/Train/AdamOptimizer.cs b/src/TensorFlowNET.Core/Train/AdamOptimizer.cs index c273f4d4..673e1307 100644 --- a/src/TensorFlowNET.Core/Train/AdamOptimizer.cs +++ b/src/TensorFlowNET.Core/Train/AdamOptimizer.cs @@ -81,7 +81,7 @@ namespace Tensorflow.Train var m = get_slot(var, "m"); var m_scaled_g_values = grad * (1 - beta1_t); var m_t = state_ops.assign(m, m * beta1_t, use_locking: _use_locking); - with(ops.control_dependencies(new[] { m_t }), delegate + tf_with(ops.control_dependencies(new[] { m_t }), delegate { m_t = scatter_add(m, indices, m_scaled_g_values); }); @@ -89,7 +89,7 @@ namespace Tensorflow.Train var v = get_slot(var, "v"); var v_scaled_g_values = (grad * grad) * (1 - beta2_t); var v_t = state_ops.assign(v, v * beta2_t, use_locking: _use_locking); - with(ops.control_dependencies(new[] { v_t }), delegate + tf_with(ops.control_dependencies(new[] { v_t }), delegate { v_t = scatter_add(v, indices, v_scaled_g_values); }); @@ -117,7 +117,7 @@ namespace Tensorflow.Train var operations = new List(); operations.AddRange(update_ops); - with(ops.control_dependencies(update_ops), delegate + tf_with(ops.control_dependencies(update_ops), delegate { var (beta1_power, beta2_power) = _get_beta_accumulators(); ops.colocate_with(beta1_power); diff --git a/src/TensorFlowNET.Core/Train/Optimizer.cs b/src/TensorFlowNET.Core/Train/Optimizer.cs index 3e1d86c5..e945b120 100644 --- a/src/TensorFlowNET.Core/Train/Optimizer.cs +++ b/src/TensorFlowNET.Core/Train/Optimizer.cs @@ -151,7 +151,7 @@ namespace Tensorflow _create_slots(var_list); var update_ops = new List(); - return with(ops.name_scope(name, Name), scope => + return tf_with(ops.name_scope(name, Name), scope => { name = scope; _prepare(); @@ -162,7 +162,7 @@ namespace Tensorflow continue; var scope_name = var.op.name; - with(ops.name_scope("update_" + scope_name), scope2 => + tf_with(ops.name_scope("update_" + scope_name), scope2 => { var op = processor.update_op(this, grad); update_ops.Add(op); @@ -176,7 +176,7 @@ namespace Tensorflow } else { - with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep => + tf_with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep => { ops.colocate_with(global_step); // TODO: port this if branch once ResourceVariable has been ported! diff --git a/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs b/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs index 9deca740..95775a72 100644 --- a/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs +++ b/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs @@ -102,7 +102,7 @@ namespace Tensorflow Tensor save_tensor = null; Operation restore_op = null; - return with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope => + return tf_with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope => { name = scope; diff --git a/src/TensorFlowNET.Core/Train/SlotCreator.cs b/src/TensorFlowNET.Core/Train/SlotCreator.cs index aaa2c171..57582355 100644 --- a/src/TensorFlowNET.Core/Train/SlotCreator.cs +++ b/src/TensorFlowNET.Core/Train/SlotCreator.cs @@ -57,7 +57,7 @@ namespace Tensorflow.Train { var validate_shape = shape.is_fully_defined(); var prefix = primary.op.name; - return with(new variable_scope(string.Empty, prefix + "/" + name), delegate + return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate { return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype); }); diff --git a/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs b/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs index 8c2c1204..5adf5d9a 100644 --- a/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs +++ b/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs @@ -32,7 +32,7 @@ namespace Tensorflow private static Tensor op_helper(string default_name, RefVariable x, T y) { var tensor1 = x.value(); - return with(ops.name_scope(null, default_name, new { tensor1, y }), scope => { + return tf_with(ops.name_scope(null, default_name, new { tensor1, y }), scope => { var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y"); return gen_math_ops.add(tensor1, tensor2, scope); }); diff --git a/src/TensorFlowNET.Core/Variables/RefVariable.cs b/src/TensorFlowNET.Core/Variables/RefVariable.cs index 78a241c2..463ba2d0 100644 --- a/src/TensorFlowNET.Core/Variables/RefVariable.cs +++ b/src/TensorFlowNET.Core/Variables/RefVariable.cs @@ -134,7 +134,7 @@ namespace Tensorflow ops.init_scope(); var values = init_from_fn ? new object[0] : new object[] { initial_value }; - with(ops.name_scope(name, "Variable", values), scope => + tf_with(ops.name_scope(name, "Variable", values), scope => { name = scope; if (init_from_fn) @@ -148,7 +148,7 @@ namespace Tensorflow List = new AttrValue.Types.ListValue() }; attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}")); - with(ops.name_scope("Initializer"), scope2 => + tf_with(ops.name_scope("Initializer"), scope2 => { _initial_value = (initial_value as Func)(); _initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype); diff --git a/src/TensorFlowNET.Core/Variables/VariableScope.cs b/src/TensorFlowNET.Core/Variables/VariableScope.cs index d509d6b2..778e59b1 100644 --- a/src/TensorFlowNET.Core/Variables/VariableScope.cs +++ b/src/TensorFlowNET.Core/Variables/VariableScope.cs @@ -56,7 +56,7 @@ namespace Tensorflow VariableAggregation aggregation= VariableAggregation.None) { string full_name = !string.IsNullOrEmpty(this.name) ? this.name + "/" + name : name; - return with(ops.name_scope(null), scope => + return tf_with(ops.name_scope(null), scope => { if (dtype == TF_DataType.DtInvalid) dtype = _dtype; diff --git a/src/TensorFlowNET.Core/ops.py.cs b/src/TensorFlowNET.Core/ops.py.cs index 8f7fce29..979e132e 100644 --- a/src/TensorFlowNET.Core/ops.py.cs +++ b/src/TensorFlowNET.Core/ops.py.cs @@ -295,7 +295,7 @@ namespace Tensorflow // inner_device_stack = default_graph._device_function_stack // var outer_context = default_graph.as_default; - with(ops.control_dependencies(null), delegate + tf_with(ops.control_dependencies(null), delegate { var outer_graph = get_default_graph(); // outer_device_stack = None diff --git a/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs b/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs index 44e5c7a7..38c650a3 100644 --- a/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs +++ b/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs @@ -77,7 +77,7 @@ let run()= let init = tf.global_variables_initializer() - Tensorflow.Python.``with``(tf.Session(), fun (sess:Session) -> + Tensorflow.Python.``tf_with``(tf.Session(), fun (sess:Session) -> sess.run(init) |> ignore // Loop over epochs for epoch in [0..training_epochs] do diff --git a/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs b/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs index 3b52a75e..7bacb28d 100644 --- a/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs +++ b/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs @@ -52,10 +52,10 @@ namespace TensorFlowNET.Examples { PrepareData(); var graph = ImportGraph(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { Train(sess); - }); + } return accuray_test > 0.70; } diff --git a/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs b/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs index f7baef1d..0098404d 100644 --- a/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs +++ b/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs @@ -71,7 +71,7 @@ namespace TensorFlowNET.Examples var init = tf.global_variables_initializer(); // Start training - return with(tf.Session(), sess => + using (var sess = tf.Session()) { // Run the initializer sess.run(init); @@ -114,7 +114,7 @@ namespace TensorFlowNET.Examples Console.WriteLine($"Absolute mean square loss difference: {diff}"); return diff < 0.01; - }); + } } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs b/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs index 1d7808b7..ca691d40 100644 --- a/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs +++ b/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs @@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples var sw = new Stopwatch(); - return with(tf.Session(), sess => + using (var sess = tf.Session()) { // Run the initializer sess.run(init); @@ -119,7 +119,7 @@ namespace TensorFlowNET.Examples print($"Accuracy: {acc.ToString("F4")}"); return acc > 0.9; - }); + } } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs b/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs index d77c6902..358a3301 100644 --- a/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs +++ b/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs @@ -48,14 +48,14 @@ namespace TensorFlowNET.Examples float y_max = X.amax(0).Data(1) + 0.5f; var (xx, yy) = np.meshgrid(np.linspace(x_min, x_max, 30), np.linspace(y_min, y_max, 30)); - with(tf.Session(), sess => + using (var sess = tf.Session()) { //var samples = np.vstack(xx.ravel(), yy.ravel()); //samples = np.transpose(samples); var array = np.Load(Path.Join("nb", "nb_example.npy")); var samples = np.array(array).astype(np.float32); var Z = sess.run(predict(samples)); - }); + } return true; } diff --git a/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs b/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs index d1d867a2..8f761d00 100644 --- a/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs +++ b/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs @@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples float accuracy = 0f; // Initialize the variables (i.e. assign their default value) var init = tf.global_variables_initializer(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { // Run the initializer sess.run(init); @@ -77,7 +77,7 @@ namespace TensorFlowNET.Examples } print($"Accuracy: {accuracy}"); - }); + } return accuracy > 0.8; } diff --git a/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs b/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs index a4a2901c..12687e3f 100644 --- a/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs +++ b/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs @@ -90,7 +90,7 @@ namespace TensorFlowNET.Examples var init = tf.global_variables_initializer(); float loss_value = 0; // Start tf session - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { sess.run(init); var step = 0; @@ -110,7 +110,7 @@ namespace TensorFlowNET.Examples Console.WriteLine($"Step {step} loss: {loss_value}"); } Console.WriteLine($"Final loss: {loss_value}"); - }); + } return loss_value; } @@ -128,7 +128,7 @@ namespace TensorFlowNET.Examples float loss_value = 0; // Start tf session - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { sess.run(init); var step = 0; @@ -143,7 +143,7 @@ namespace TensorFlowNET.Examples Console.WriteLine($"Step {step} loss: {loss_value}"); } Console.WriteLine($"Final loss: {loss_value}"); - }); + } return loss_value; } diff --git a/test/TensorFlowNET.Examples/BasicOperations.cs b/test/TensorFlowNET.Examples/BasicOperations.cs index 5fd52e2d..c7314abe 100644 --- a/test/TensorFlowNET.Examples/BasicOperations.cs +++ b/test/TensorFlowNET.Examples/BasicOperations.cs @@ -134,7 +134,7 @@ namespace TensorFlowNET.Examples 3, 3, 2)); var batchMul = tf.batch_matmul(firstTensor, secondTensor); var checkTensor = np.array(0, 6, 0, 15, 0, 24, 3, 1, 6, 4, 9, 7, 6, 0, 15, 0, 24, 0); - return with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(batchMul); Console.WriteLine(result.ToString()); @@ -152,7 +152,7 @@ namespace TensorFlowNET.Examples // [24, 0]]]) return np.reshape(result, 18) .array_equal(checkTensor); - }); + } } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/HelloWorld.cs b/test/TensorFlowNET.Examples/HelloWorld.cs index e9c91336..52e47e3d 100644 --- a/test/TensorFlowNET.Examples/HelloWorld.cs +++ b/test/TensorFlowNET.Examples/HelloWorld.cs @@ -25,13 +25,13 @@ namespace TensorFlowNET.Examples var hello = tf.constant(str); // Start tf session - return with(tf.Session(), sess => + using (var sess = tf.Session()) { // Run the op var result = sess.run(hello); Console.WriteLine(result.ToString()); return result.ToString().Equals(str); - }); + } } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs index ac763da2..4b882a1a 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs @@ -79,11 +79,11 @@ namespace TensorFlowNET.Examples PrepareData(); BuildGraph(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { Train(sess); Test(sess); - }); + } return loss_test < 0.05 && accuracy_test > 0.98; } @@ -92,7 +92,7 @@ namespace TensorFlowNET.Examples { var graph = new Graph().as_default(); - with(tf.name_scope("Input"), delegate + tf_with(tf.name_scope("Input"), delegate { // Placeholders for inputs (x) and outputs(y) x = tf.placeholder(tf.float32, shape: (-1, img_h, img_w, n_channels), name: "X"); @@ -107,25 +107,25 @@ namespace TensorFlowNET.Examples var fc1 = fc_layer(layer_flat, h1, "FC1", use_relu: true); var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false); - with(tf.variable_scope("Train"), delegate + tf_with(tf.variable_scope("Train"), delegate { - with(tf.variable_scope("Loss"), delegate + tf_with(tf.variable_scope("Loss"), delegate { loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits), name: "loss"); }); - with(tf.variable_scope("Optimizer"), delegate + tf_with(tf.variable_scope("Optimizer"), delegate { optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss); }); - with(tf.variable_scope("Accuracy"), delegate + tf_with(tf.variable_scope("Accuracy"), delegate { var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred"); accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy"); }); - with(tf.variable_scope("Prediction"), delegate + tf_with(tf.variable_scope("Prediction"), delegate { cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions"); }); @@ -204,7 +204,7 @@ namespace TensorFlowNET.Examples /// The output array private Tensor conv_layer(Tensor x, int filter_size, int num_filters, int stride, string name) { - return with(tf.variable_scope(name), delegate { + return tf_with(tf.variable_scope(name), delegate { var num_in_channel = x.shape[x.NDims - 1]; var shape = new[] { filter_size, filter_size, num_in_channel, num_filters }; @@ -244,7 +244,7 @@ namespace TensorFlowNET.Examples /// flattened array private Tensor flatten_layer(Tensor layer) { - return with(tf.variable_scope("Flatten_layer"), delegate + return tf_with(tf.variable_scope("Flatten_layer"), delegate { var layer_shape = layer.TensorShape; var num_features = layer_shape[new Slice(1, 4)].Size; @@ -293,7 +293,7 @@ namespace TensorFlowNET.Examples /// The output array private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true) { - return with(tf.variable_scope(name), delegate + return tf_with(tf.variable_scope(name), delegate { var in_dim = x.shape[1]; diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs index e604afff..02feecb9 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs @@ -59,11 +59,11 @@ namespace TensorFlowNET.Examples PrepareData(); BuildGraph(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { Train(sess); Test(sess); - }); + }; return loss_test < 0.09 && accuracy_test > 0.95; } diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs index f769371a..b91a19ca 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs @@ -64,11 +64,11 @@ namespace TensorFlowNET.Examples PrepareData(); BuildGraph(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { Train(sess); Test(sess); - }); + } return loss_test < 0.09 && accuracy_test > 0.95; } diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs index db148f14..8eed577b 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs @@ -32,11 +32,11 @@ namespace TensorFlowNET.Examples Tensor output = graph.OperationByName("SemanticPredictions"); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { // Runs inference on a single image. sess.run(output, new FeedItem(output, "[np.asarray(resized_image)]")); - }); + } return false; } diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs index a0f61029..efcb0b73 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples var result_labels = new List(); var sw = new Stopwatch(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { foreach (var nd in file_ndarrays) { @@ -58,7 +58,7 @@ namespace TensorFlowNET.Examples Console.WriteLine($"{labels[idx]} {results[idx]} in {sw.ElapsedMilliseconds}ms", Color.Tan); result_labels.Add(labels[idx]); } - }); + } return result_labels.Contains("military uniform"); } @@ -69,19 +69,19 @@ namespace TensorFlowNET.Examples int input_mean = 117, int input_std = 1) { - return with(tf.Graph().as_default(), graph => - { - var file_reader = tf.read_file(file_name, "file_reader"); - var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); - var cast = tf.cast(decodeJpeg, tf.float32); - var dims_expander = tf.expand_dims(cast, 0); - var resize = tf.constant(new int[] { input_height, input_width }); - var bilinear = tf.image.resize_bilinear(dims_expander, resize); - var sub = tf.subtract(bilinear, new float[] { input_mean }); - var normalized = tf.divide(sub, new float[] { input_std }); - - return with(tf.Session(graph), sess => sess.run(normalized)); - }); + var graph = tf.Graph().as_default(); + + var file_reader = tf.read_file(file_name, "file_reader"); + var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); + var cast = tf.cast(decodeJpeg, tf.float32); + var dims_expander = tf.expand_dims(cast, 0); + var resize = tf.constant(new int[] { input_height, input_width }); + var bilinear = tf.image.resize_bilinear(dims_expander, resize); + var sub = tf.subtract(bilinear, new float[] { input_mean }); + var normalized = tf.divide(sub, new float[] { input_std }); + + using (var sess = tf.Session(graph)) + return sess.run(normalized); } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs index 2f6b9ab1..f51833d2 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs @@ -45,9 +45,12 @@ namespace TensorFlowNET.Examples var input_operation = graph.get_operation_by_name(input_name); var output_operation = graph.get_operation_by_name(output_name); - var results = with(tf.Session(graph), - sess => sess.run(output_operation.outputs[0], - new FeedItem(input_operation.outputs[0], nd))); + NDArray results; + using (var sess = tf.Session(graph)) + { + results = sess.run(output_operation.outputs[0], + new FeedItem(input_operation.outputs[0], nd)); + } results = np.squeeze(results); @@ -69,19 +72,19 @@ namespace TensorFlowNET.Examples int input_mean = 0, int input_std = 255) { - return with(tf.Graph().as_default(), graph => - { - var file_reader = tf.read_file(file_name, "file_reader"); - var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); - var caster = tf.cast(image_reader, tf.float32); - var dims_expander = tf.expand_dims(caster, 0); - var resize = tf.constant(new int[] { input_height, input_width }); - var bilinear = tf.image.resize_bilinear(dims_expander, resize); - var sub = tf.subtract(bilinear, new float[] { input_mean }); - var normalized = tf.divide(sub, new float[] { input_std }); - - return with(tf.Session(graph), sess => sess.run(normalized)); - }); + var graph = tf.Graph().as_default(); + + var file_reader = tf.read_file(file_name, "file_reader"); + var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); + var caster = tf.cast(image_reader, tf.float32); + var dims_expander = tf.expand_dims(caster, 0); + var resize = tf.constant(new int[] { input_height, input_width }); + var bilinear = tf.image.resize_bilinear(dims_expander, resize); + var sub = tf.subtract(bilinear, new float[] { input_mean }); + var normalized = tf.divide(sub, new float[] { input_std }); + + using (var sess = tf.Session(graph)) + return sess.run(normalized); } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs b/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs index f40be91f..8f8d0dd9 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs @@ -51,7 +51,8 @@ namespace TensorFlowNET.Examples var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); - with(tf.Session(graph), sess => Predict(sess)); + using (var sess = tf.Session(graph)) + Predict(sess); return true; } @@ -101,14 +102,15 @@ namespace TensorFlowNET.Examples private NDArray ReadTensorFromImageFile(string file_name) { - return with(tf.Graph().as_default(), graph => - { - var file_reader = tf.read_file(file_name, "file_reader"); - var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); - var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8); - var dims_expander = tf.expand_dims(casted, 0); - return with(tf.Session(graph), sess => sess.run(dims_expander)); - }); + var graph = tf.Graph().as_default(); + + var file_reader = tf.read_file(file_name, "file_reader"); + var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); + var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8); + var dims_expander = tf.expand_dims(casted, 0); + + using (var sess = tf.Session(graph)) + return sess.run(dims_expander); } private void buildOutputImage(NDArray[] resultArr) diff --git a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs index ee462c4c..becd9f7e 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs @@ -92,10 +92,10 @@ namespace TensorFlowNET.Examples var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { Train(sess); - }); + } return test_accuracy > 0.75f; } @@ -141,20 +141,18 @@ namespace TensorFlowNET.Examples Tensor evaluation_step = null; Tensor prediction = null; - with(eval_graph.as_default(), graph => - { - // Add the new layer for exporting. - var (_, _, bottleneck_input, ground_truth_input, final_tensor) = - add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, - wants_quantization, is_training: false); + var graph = eval_graph.as_default(); + // Add the new layer for exporting. + var (_, _, bottleneck_input, ground_truth_input, final_tensor) = + add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, + wants_quantization, is_training: false); - // Now we need to restore the values from the training graph to the eval - // graph. - tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME); + // Now we need to restore the values from the training graph to the eval + // graph. + tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME); - (evaluation_step, prediction) = add_evaluation_step(final_tensor, - ground_truth_input); - }); + (evaluation_step, prediction) = add_evaluation_step(final_tensor, + ground_truth_input); return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input, evaluation_step, prediction); @@ -180,7 +178,7 @@ namespace TensorFlowNET.Examples Tensor bottleneck_tensor, bool quantize_layer, bool is_training) { var (batch_size, bottleneck_tensor_size) = (bottleneck_tensor.TensorShape.Dimensions[0], bottleneck_tensor.TensorShape.Dimensions[1]); - with(tf.name_scope("input"), scope => + tf_with(tf.name_scope("input"), scope => { bottleneck_input = tf.placeholder_with_default( bottleneck_tensor, @@ -193,10 +191,10 @@ namespace TensorFlowNET.Examples // Organizing the following ops so they are easier to see in TensorBoard. string layer_name = "final_retrain_ops"; Tensor logits = null; - with(tf.name_scope(layer_name), scope => + tf_with(tf.name_scope(layer_name), scope => { RefVariable layer_weights = null; - with(tf.name_scope("weights"), delegate + tf_with(tf.name_scope("weights"), delegate { var initial_value = tf.truncated_normal(new int[] { bottleneck_tensor_size, class_count }, stddev: 0.001f); layer_weights = tf.Variable(initial_value, name: "final_weights"); @@ -204,13 +202,13 @@ namespace TensorFlowNET.Examples }); RefVariable layer_biases = null; - with(tf.name_scope("biases"), delegate + tf_with(tf.name_scope("biases"), delegate { layer_biases = tf.Variable(tf.zeros(class_count), name: "final_biases"); variable_summaries(layer_biases); }); - with(tf.name_scope("Wx_plus_b"), delegate + tf_with(tf.name_scope("Wx_plus_b"), delegate { logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases; tf.summary.histogram("pre_activations", logits); @@ -239,7 +237,7 @@ namespace TensorFlowNET.Examples return (null, null, bottleneck_input, ground_truth_input, final_tensor); Tensor cross_entropy_mean = null; - with(tf.name_scope("cross_entropy"), delegate + tf_with(tf.name_scope("cross_entropy"), delegate { cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy( labels: ground_truth_input, logits: logits); @@ -247,7 +245,7 @@ namespace TensorFlowNET.Examples tf.summary.scalar("cross_entropy", cross_entropy_mean); - with(tf.name_scope("train"), delegate + tf_with(tf.name_scope("train"), delegate { var optimizer = tf.train.GradientDescentOptimizer(learning_rate); train_step = optimizer.minimize(cross_entropy_mean); @@ -259,12 +257,12 @@ namespace TensorFlowNET.Examples private void variable_summaries(RefVariable var) { - with(tf.name_scope("summaries"), delegate + tf_with(tf.name_scope("summaries"), delegate { var mean = tf.reduce_mean(var); tf.summary.scalar("mean", mean); Tensor stddev = null; - with(tf.name_scope("stddev"), delegate + tf_with(tf.name_scope("stddev"), delegate { stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))); }); @@ -279,7 +277,7 @@ namespace TensorFlowNET.Examples { var (height, width) = (299, 299); - return with(tf.Graph().as_default(), graph => + return tf_with(tf.Graph().as_default(), graph => { tf.train.import_meta_graph("graph/InceptionV3.meta"); Tensor resized_input_tensor = graph.OperationByName("Placeholder"); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3)); @@ -350,15 +348,15 @@ namespace TensorFlowNET.Examples { Tensor evaluation_step = null, correct_prediction = null, prediction = null; - with(tf.name_scope("accuracy"), scope => + tf_with(tf.name_scope("accuracy"), scope => { - with(tf.name_scope("correct_prediction"), delegate + tf_with(tf.name_scope("correct_prediction"), delegate { prediction = tf.argmax(result_tensor, 1); correct_prediction = tf.equal(prediction, ground_truth_tensor); }); - with(tf.name_scope("accuracy"), delegate + tf_with(tf.name_scope("accuracy"), delegate { evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); }); @@ -596,7 +594,7 @@ namespace TensorFlowNET.Examples create_module_graph(); // Add the new layer that we'll be training. - with(graph.as_default(), delegate + tf_with(graph.as_default(), delegate { (train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_final_retrain_ops( @@ -745,13 +743,13 @@ namespace TensorFlowNET.Examples Tensor input = graph.OperationByName("Placeholder"); Tensor output = graph.OperationByName("final_result"); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { var result = sess.run(output, new FeedItem(input, fileBytes)); var prob = np.squeeze(result); var idx = np.argmax(prob); print($"Prediction result: [{labels[idx]} {prob[idx][0]}] for {img_path}."); - }); + } } private NDArray ReadTensorFromImageFile(string file_name, @@ -760,19 +758,19 @@ namespace TensorFlowNET.Examples int input_mean = 0, int input_std = 255) { - return with(tf.Graph().as_default(), graph => - { - var file_reader = tf.read_file(file_name, "file_reader"); - var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); - var caster = tf.cast(image_reader, tf.float32); - var dims_expander = tf.expand_dims(caster, 0); - var resize = tf.constant(new int[] { input_height, input_width }); - var bilinear = tf.image.resize_bilinear(dims_expander, resize); - var sub = tf.subtract(bilinear, new float[] { input_mean }); - var normalized = tf.divide(sub, new float[] { input_std }); - - return with(tf.Session(graph), sess => sess.run(normalized)); - }); + var graph = tf.Graph().as_default(); + + var file_reader = tf.read_file(file_name, "file_reader"); + var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); + var caster = tf.cast(image_reader, tf.float32); + var dims_expander = tf.expand_dims(caster, 0); + var resize = tf.constant(new int[] { input_height, input_width }); + var bilinear = tf.image.resize_bilinear(dims_expander, resize); + var sub = tf.subtract(bilinear, new float[] { input_mean }); + var normalized = tf.divide(sub, new float[] { input_std }); + + using (var sess = tf.Session(graph)) + return sess.run(normalized); } public void Test(Session sess_) @@ -783,7 +781,7 @@ namespace TensorFlowNET.Examples var graph = Graph.ImportFromPB(output_graph); var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding(); - with(tf.Session(graph), sess => + tf_with(tf.Session(graph), sess => { (test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, diff --git a/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs b/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs index a2afe43d..9ec17f12 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs @@ -64,7 +64,9 @@ namespace TensorFlowNET.Examples { PrepareData(); var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); - with(tf.Session(graph), sess => Train(sess)); + + using (var sess = tf.Session(graph)) + Train(sess); return max_accuracy > 0.9; } diff --git a/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs b/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs index 2ec703ba..7e324c56 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs @@ -63,7 +63,7 @@ namespace TensorFlowNET.Examples.Text.NER var init = tf.global_variables_initializer(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { sess.run(init); @@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples.Text.NER loss_value = run_epoch(sess, train, dev, epoch); print($"train loss: {loss_value}"); } - }); + } return loss_value < 0.1; } diff --git a/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs b/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs index ce6628e3..8ed87748 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs @@ -66,7 +66,7 @@ namespace TensorFlowNET.Examples // Initialize the variables (i.e. assign their default value) var init = tf.global_variables_initializer(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { // Run the initializer sess.run(init); @@ -112,7 +112,7 @@ namespace TensorFlowNET.Examples } } } - }); + } return average_loss < 100; } diff --git a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs index 390c6040..75308b8c 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs @@ -26,7 +26,7 @@ namespace TensorFlowNET.Examples.Text Tensor conv3 = null, conv4 = null, conv5 = null, conv6 = null; Tensor h_pool = null; - with(tf.name_scope("conv-maxpool-1"), delegate + tf_with(tf.name_scope("conv-maxpool-1"), delegate { var conv1 = tf.layers.conv2d(x_expanded, filters: num_filters, @@ -40,7 +40,7 @@ namespace TensorFlowNET.Examples.Text pool1 = tf.transpose(pool1, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-maxpool-2"), delegate + tf_with(tf.name_scope("conv-maxpool-2"), delegate { var conv2 = tf.layers.conv2d(pool1, filters: num_filters, @@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples.Text pool2 = tf.transpose(pool2, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-3"), delegate + tf_with(tf.name_scope("conv-3"), delegate { conv3 = tf.layers.conv2d(pool2, filters: num_filters, @@ -64,7 +64,7 @@ namespace TensorFlowNET.Examples.Text conv3 = tf.transpose(conv3, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-4"), delegate + tf_with(tf.name_scope("conv-4"), delegate { conv4 = tf.layers.conv2d(conv3, filters: num_filters, @@ -74,7 +74,7 @@ namespace TensorFlowNET.Examples.Text conv4 = tf.transpose(conv4, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-5"), delegate + tf_with(tf.name_scope("conv-5"), delegate { conv5 = tf.layers.conv2d(conv4, filters: num_filters, @@ -84,7 +84,7 @@ namespace TensorFlowNET.Examples.Text conv5 = tf.transpose(conv5, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-maxpool-6"), delegate + tf_with(tf.name_scope("conv-maxpool-6"), delegate { conv6 = tf.layers.conv2d(conv5, filters: num_filters, @@ -105,7 +105,7 @@ namespace TensorFlowNET.Examples.Text Tensor logits = null; Tensor predictions = null; - with(tf.name_scope("fc-1"), delegate + tf_with(tf.name_scope("fc-1"), delegate { fc1_out = tf.layers.dense(h_pool, 1024, @@ -113,7 +113,7 @@ namespace TensorFlowNET.Examples.Text kernel_initializer: kernel_initializer); }); - with(tf.name_scope("fc-2"), delegate + tf_with(tf.name_scope("fc-2"), delegate { fc2_out = tf.layers.dense(fc1_out, 1024, @@ -121,7 +121,7 @@ namespace TensorFlowNET.Examples.Text kernel_initializer: kernel_initializer); }); - with(tf.name_scope("fc-3"), delegate + tf_with(tf.name_scope("fc-3"), delegate { logits = tf.layers.dense(fc2_out, num_class, @@ -129,7 +129,7 @@ namespace TensorFlowNET.Examples.Text predictions = tf.argmax(logits, -1, output_type: tf.int32); }); - with(tf.name_scope("loss"), delegate + tf_with(tf.name_scope("loss"), delegate { var y_one_hot = tf.one_hot(y, num_class); var loss = tf.reduce_mean( @@ -137,7 +137,7 @@ namespace TensorFlowNET.Examples.Text var optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step: global_step); }); - with(tf.name_scope("accuracy"), delegate + tf_with(tf.name_scope("accuracy"), delegate { var correct_predictions = tf.equal(predictions, y); var accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name: "accuracy"); diff --git a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs index c71376f8..8087a2b2 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs @@ -41,7 +41,7 @@ namespace TensorFlowNET.Examples.Text global_step = tf.Variable(0, trainable: false); // Embedding Layer - with(tf.name_scope("embedding"), delegate + tf_with(tf.name_scope("embedding"), delegate { var init_embeddings = tf.random_uniform(new int[] { alphabet_size, embedding_size }, -1.0f, 1.0f); embeddings = tf.get_variable("embeddings", initializer: init_embeddings); @@ -59,7 +59,7 @@ namespace TensorFlowNET.Examples.Text Tensor fc2_out = null; // First Convolution Layer - with(tf.variable_scope("conv-0"), delegate + tf_with(tf.variable_scope("conv-0"), delegate { conv0 = tf.layers.conv2d(x_expanded, filters: num_filters[0], @@ -70,25 +70,25 @@ namespace TensorFlowNET.Examples.Text conv0 = tf.transpose(conv0, new int[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-block-1"), delegate { + tf_with(tf.name_scope("conv-block-1"), delegate { conv1 = conv_block(conv0, 1); }); - with(tf.name_scope("conv-block-2"), delegate { + tf_with(tf.name_scope("conv-block-2"), delegate { conv2 = conv_block(conv1, 2); }); - with(tf.name_scope("conv-block-3"), delegate { + tf_with(tf.name_scope("conv-block-3"), delegate { conv3 = conv_block(conv2, 3); }); - with(tf.name_scope("conv-block-4"), delegate + tf_with(tf.name_scope("conv-block-4"), delegate { conv4 = conv_block(conv3, 4, max_pool: false); }); // ============= k-max Pooling ============= - with(tf.name_scope("k-max-pooling"), delegate + tf_with(tf.name_scope("k-max-pooling"), delegate { var h = tf.transpose(tf.squeeze(conv4, new int[] { -1 }), new int[] { 0, 2, 1 }); var top_k = tf.nn.top_k(h, k: 8, sorted: false)[0]; @@ -96,30 +96,30 @@ namespace TensorFlowNET.Examples.Text }); // ============= Fully Connected Layers ============= - with(tf.name_scope("fc-1"), scope => + tf_with(tf.name_scope("fc-1"), scope => { fc1_out = tf.layers.dense(h_flat, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer); }); - with(tf.name_scope("fc-2"), scope => + tf_with(tf.name_scope("fc-2"), scope => { fc2_out = tf.layers.dense(fc1_out, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer); }); - with(tf.name_scope("fc-3"), scope => + tf_with(tf.name_scope("fc-3"), scope => { logits = tf.layers.dense(fc2_out, num_class, activation: null, kernel_initializer: fc_initializer); predictions = tf.argmax(logits, -1, output_type: tf.int32); }); // ============= Loss and Accuracy ============= - with(tf.name_scope("loss"), delegate + tf_with(tf.name_scope("loss"), delegate { var y_one_hot = tf.one_hot(y, num_class); loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits: logits, labels: y_one_hot)); var update_ops = tf.get_collection(ops.GraphKeys.UPDATE_OPS) as List; - with(tf.control_dependencies(update_ops.Select(x => (Operation)x).ToArray()), delegate + tf_with(tf.control_dependencies(update_ops.Select(x => (Operation)x).ToArray()), delegate { var adam = tf.train.AdamOptimizer(learning_rate); adam.minimize(loss, global_step: global_step); @@ -129,13 +129,13 @@ namespace TensorFlowNET.Examples.Text private Tensor conv_block(Tensor input, int i, bool max_pool = true) { - return with(tf.variable_scope($"conv-block-{i}"), delegate + return tf_with(tf.variable_scope($"conv-block-{i}"), delegate { Tensor conv = null; // Two "conv-batch_norm-relu" layers. foreach (var j in Enumerable.Range(0, 2)) { - with(tf.variable_scope($"conv-{j}"), delegate + tf_with(tf.variable_scope($"conv-{j}"), delegate { // convolution conv = tf.layers.conv2d( diff --git a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs index b51f6719..3448ac83 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs @@ -36,7 +36,7 @@ namespace TensorFlowNET.Examples.Text var keep_prob = tf.where(is_training, 0.5f, 1.0f); Tensor x_emb = null; - with(tf.name_scope("embedding"), scope => + tf_with(tf.name_scope("embedding"), scope => { var init_embeddings = tf.random_uniform(new int[] { vocabulary_size, embedding_size }); var embeddings = tf.get_variable("embeddings", initializer: init_embeddings); @@ -68,20 +68,20 @@ namespace TensorFlowNET.Examples.Text var h_pool = tf.concat(pooled_outputs, 3); var h_pool_flat = tf.reshape(h_pool, new TensorShape(-1, num_filters * filter_sizes.Rank)); Tensor h_drop = null; - with(tf.name_scope("dropout"), delegate + tf_with(tf.name_scope("dropout"), delegate { h_drop = tf.nn.dropout(h_pool_flat, keep_prob); }); Tensor logits = null; Tensor predictions = null; - with(tf.name_scope("output"), delegate + tf_with(tf.name_scope("output"), delegate { logits = tf.layers.dense(h_drop, num_class); predictions = tf.argmax(logits, -1, output_type: tf.int32); }); - with(tf.name_scope("loss"), delegate + tf_with(tf.name_scope("loss"), delegate { var sscel = tf.nn.sparse_softmax_cross_entropy_with_logits(logits: logits, labels: y); var loss = tf.reduce_mean(sscel); @@ -89,7 +89,7 @@ namespace TensorFlowNET.Examples.Text var optimizer = adam.minimize(loss, global_step: global_step); }); - with(tf.name_scope("accuracy"), delegate + tf_with(tf.name_scope("accuracy"), delegate { var correct_predictions = tf.equal(predictions, y); var accuracy = tf.reduce_mean(tf.cast(correct_predictions, TF_DataType.TF_FLOAT), name: "accuracy"); diff --git a/test/TensorFlowNET.UnitTest/Basics/NegativeTests.cs b/test/TensorFlowNET.UnitTest/Basics/NegativeTests.cs index 811c58e5..e953eb8e 100644 --- a/test/TensorFlowNET.UnitTest/Basics/NegativeTests.cs +++ b/test/TensorFlowNET.UnitTest/Basics/NegativeTests.cs @@ -12,13 +12,13 @@ namespace TensorFlowNET.UnitTest.Basics { var x = tf.constant(new[,] { { 1, 2 } }); var neg_x = tf.negative(x); - with(tf.Session(), session => + using (var sess = tf.Session()) { - var result = session.run(neg_x); + var result = sess.run(neg_x); Assert.AreEqual(result[0][0], -1); Assert.AreEqual(result[0][1], -2); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/ConstantTest.cs b/test/TensorFlowNET.UnitTest/ConstantTest.cs index e16ba6a9..221d6196 100644 --- a/test/TensorFlowNET.UnitTest/ConstantTest.cs +++ b/test/TensorFlowNET.UnitTest/ConstantTest.cs @@ -94,18 +94,18 @@ namespace TensorFlowNET.UnitTest { // small size var tensor = tf.zeros(new Shape(3, 2), TF_DataType.TF_INT32, "small"); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(tensor); Assert.AreEqual(result.shape[0], 3); Assert.AreEqual(result.shape[1], 2); Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 0, 0, 0, 0, 0, 0 }, result.Data())); - }); + } // big size tensor = tf.zeros(new Shape(200, 100), TF_DataType.TF_INT32, "big"); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(tensor); @@ -116,21 +116,21 @@ namespace TensorFlowNET.UnitTest Assert.AreEqual(0, data[0]); Assert.AreEqual(0, data[500]); Assert.AreEqual(0, data[result.size - 1]); - }); + } } [TestMethod] public void OnesConst() { var ones = tf.ones(new Shape(3, 2), TF_DataType.TF_DOUBLE, "ones"); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(ones); Assert.AreEqual(result.shape[0], 3); Assert.AreEqual(result.shape[1], 2); Assert.IsTrue(new[] { 1, 1, 1, 1, 1, 1 }.SequenceEqual(result.Data())); - }); + } } [TestMethod] @@ -138,14 +138,14 @@ namespace TensorFlowNET.UnitTest { var ones = tf.ones(new Shape(3, 2), TF_DataType.TF_DOUBLE, "ones"); var halfes = ones * 0.5; - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(halfes); Assert.AreEqual(result.shape[0], 3); Assert.AreEqual(result.shape[1], 2); Assert.IsTrue(new[] { .5, .5, .5, .5, .5, .5 }.SequenceEqual(result.Data())); - }); + } } [TestMethod] @@ -158,7 +158,7 @@ namespace TensorFlowNET.UnitTest }); var tensor = tf.constant(nd); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(tensor); var data = result.Data(); @@ -166,7 +166,7 @@ namespace TensorFlowNET.UnitTest Assert.AreEqual(result.shape[0], 2); Assert.AreEqual(result.shape[1], 3); Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 3, 1, 1, 2, 1, 3 }, data)); - }); + } } [TestMethod] diff --git a/test/TensorFlowNET.UnitTest/GradientTest.cs b/test/TensorFlowNET.UnitTest/GradientTest.cs index 8497be1f..372715dc 100644 --- a/test/TensorFlowNET.UnitTest/GradientTest.cs +++ b/test/TensorFlowNET.UnitTest/GradientTest.cs @@ -33,7 +33,8 @@ namespace TensorFlowNET.UnitTest public void Gradient2x() { var graph = tf.Graph().as_default(); - with(tf.Session(graph), sess => { + using (var sess = tf.Session(graph)) + { var x = tf.constant(7.0f); var y = x * x * tf.constant(0.1f); @@ -42,14 +43,14 @@ namespace TensorFlowNET.UnitTest float r = sess.run(grad[0]); Assert.AreEqual(r, 1.4f); - }); + } } [TestMethod] public void Gradient3x() { var graph = tf.Graph().as_default(); - with(tf.Session(graph), sess => { + tf_with(tf.Session(graph), sess => { var x = tf.constant(7.0f); var y = x * x * x * tf.constant(0.1f); @@ -91,14 +92,14 @@ namespace TensorFlowNET.UnitTest var g = tf.gradients(y, new Tensor[] { slice, slice }); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { var r = sess.run(slice); Assert.IsTrue(Enumerable.SequenceEqual(r.shape, new[] { 2, 1, 2 })); Assert.IsTrue(Enumerable.SequenceEqual(r[0].GetData(), new[] { 11, 13 })); Assert.IsTrue(Enumerable.SequenceEqual(r[1].GetData(), new[] { 51, 53 })); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/GraphTest.cs b/test/TensorFlowNET.UnitTest/GraphTest.cs index e50e19f8..31abd561 100644 --- a/test/TensorFlowNET.UnitTest/GraphTest.cs +++ b/test/TensorFlowNET.UnitTest/GraphTest.cs @@ -419,7 +419,7 @@ namespace TensorFlowNET.UnitTest public void ImportGraphMeta() { var dir = "my-save-dir/"; - with(tf.Session(), sess => + using (var sess = tf.Session()) { var new_saver = tf.train.import_meta_graph(dir + "my-model-10000.meta"); new_saver.restore(sess, dir + "my-model-10000"); @@ -428,7 +428,7 @@ namespace TensorFlowNET.UnitTest var logits = (tf.get_collection("logits") as List)[0] as Tensor; var loss = tf.losses.sparse_softmax_cross_entropy(labels: labels, logits: logits); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/NameScopeTest.cs b/test/TensorFlowNET.UnitTest/NameScopeTest.cs index 8d364c8b..39401fe0 100644 --- a/test/TensorFlowNET.UnitTest/NameScopeTest.cs +++ b/test/TensorFlowNET.UnitTest/NameScopeTest.cs @@ -13,7 +13,7 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void NestedNameScope() { - with(new ops.NameScope("scope1"), scope1 => + tf_with(new ops.NameScope("scope1"), scope1 => { name = scope1; Assert.AreEqual("scope1", g._name_stack); @@ -22,7 +22,7 @@ namespace TensorFlowNET.UnitTest var const1 = tf.constant(1.0); Assert.AreEqual("scope1/Const:0", const1.name); - with(new ops.NameScope("scope2"), scope2 => + tf_with(new ops.NameScope("scope2"), scope2 => { name = scope2; Assert.AreEqual("scope1/scope2", g._name_stack); diff --git a/test/TensorFlowNET.UnitTest/PlaceholderTest.cs b/test/TensorFlowNET.UnitTest/PlaceholderTest.cs index c5a575c1..a8cec568 100644 --- a/test/TensorFlowNET.UnitTest/PlaceholderTest.cs +++ b/test/TensorFlowNET.UnitTest/PlaceholderTest.cs @@ -13,12 +13,12 @@ namespace TensorFlowNET.UnitTest var x = tf.placeholder(tf.int32); var y = x * 3; - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(y, new FeedItem(x, 2)); Assert.AreEqual((int)result, 6); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/PythonTest.cs b/test/TensorFlowNET.UnitTest/PythonTest.cs index 6ea4fd19..d51ccc62 100644 --- a/test/TensorFlowNET.UnitTest/PythonTest.cs +++ b/test/TensorFlowNET.UnitTest/PythonTest.cs @@ -164,7 +164,7 @@ namespace TensorFlowNET.UnitTest // return self._eval_helper(tensors) // else: { - with(tf.Session(), s => + using (var sess = tf.Session()) { var ndarray=tensor.eval(); if (typeof(T) == typeof(double)) @@ -181,7 +181,8 @@ namespace TensorFlowNET.UnitTest { result = ndarray; } - }); + } + return (T)result; } } diff --git a/test/TensorFlowNET.UnitTest/SessionTest.cs b/test/TensorFlowNET.UnitTest/SessionTest.cs index 72e5e24a..51620b19 100644 --- a/test/TensorFlowNET.UnitTest/SessionTest.cs +++ b/test/TensorFlowNET.UnitTest/SessionTest.cs @@ -82,11 +82,11 @@ namespace TensorFlowNET.UnitTest var a = constant_op.constant(np.array(3.0).reshape(1, 1)); var b = constant_op.constant(np.array(2.0).reshape(1, 1)); var c = math_ops.matmul(a, b, name: "matmul"); - with(tf.Session(), delegate + using (var sess = tf.Session()) { var result = c.eval(); Assert.AreEqual(6, result.Data()[0]); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/TensorTest.cs b/test/TensorFlowNET.UnitTest/TensorTest.cs index 419ab4be..6666443c 100644 --- a/test/TensorFlowNET.UnitTest/TensorTest.cs +++ b/test/TensorFlowNET.UnitTest/TensorTest.cs @@ -12,7 +12,7 @@ namespace TensorFlowNET.UnitTest [TestClass] public class TensorTest : CApiTest { - //[TestMethod] + [Ignore("Not for mult-thread")] public void TensorDeallocationThreadSafety() { var tensors = new Tensor[1000]; diff --git a/test/TensorFlowNET.UnitTest/TrainSaverTest.cs b/test/TensorFlowNET.UnitTest/TrainSaverTest.cs index c437c862..625ca874 100644 --- a/test/TensorFlowNET.UnitTest/TrainSaverTest.cs +++ b/test/TensorFlowNET.UnitTest/TrainSaverTest.cs @@ -17,10 +17,10 @@ namespace TensorFlowNET.UnitTest public void ImportGraph() { - with(tf.Session(), sess => + using (var sess = tf.Session()) { var new_saver = tf.train.import_meta_graph("C:/tmp/my-model.meta"); - }); + } //tf.train.export_meta_graph(filename: "linear_regression.meta.bin"); // import meta @@ -42,7 +42,7 @@ namespace TensorFlowNET.UnitTest public void ImportSavedModel() { - with(Session.LoadFromSavedModel("mobilenet"), sess => + tf_with(Session.LoadFromSavedModel("mobilenet"), sess => { }); @@ -63,14 +63,14 @@ namespace TensorFlowNET.UnitTest // Add ops to save and restore all the variables. var saver = tf.train.Saver(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { sess.run(init_op); // Save the variables to disk. var save_path = saver.save(sess, "/tmp/model1.ckpt"); Console.WriteLine($"Model saved in path: {save_path}"); - }); + } } public void Save2() @@ -87,7 +87,7 @@ namespace TensorFlowNET.UnitTest // Add ops to save and restore all the variables. var saver = tf.train.Saver(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { sess.run(init_op); // o some work with the model. @@ -97,7 +97,7 @@ namespace TensorFlowNET.UnitTest // Save the variables to disk. var save_path = saver.save(sess, "/tmp/model2.ckpt"); Console.WriteLine($"Model saved in path: {save_path}"); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/VariableTest.cs b/test/TensorFlowNET.UnitTest/VariableTest.cs index 0b9a44d8..e170bcab 100644 --- a/test/TensorFlowNET.UnitTest/VariableTest.cs +++ b/test/TensorFlowNET.UnitTest/VariableTest.cs @@ -35,9 +35,9 @@ namespace TensorFlowNET.UnitTest public void VarCreation() { tf.Graph().as_default(); - with(tf.variable_scope("foo"), delegate + tf_with(tf.variable_scope("foo"), delegate { - with(tf.variable_scope("bar"), delegate + tf_with(tf.variable_scope("bar"), delegate { var v = tf.get_variable("v", new TensorShape(1)); Assert.AreEqual(v.name, "foo/bar/v:0"); @@ -53,14 +53,14 @@ namespace TensorFlowNET.UnitTest { tf.Graph().as_default(); variable_scope vs = null; - with(tf.variable_scope("foo"), v => vs = v); + tf_with(tf.variable_scope("foo"), v => vs = v); // Re-enter the variable scope. - with(tf.variable_scope(vs, auxiliary_name_scope: false), v => + tf_with(tf.variable_scope(vs, auxiliary_name_scope: false), v => { var vs1 = (VariableScope)v; // Restore the original name_scope. - with(tf.name_scope(vs1.original_name_scope), delegate + tf_with(tf.name_scope(vs1.original_name_scope), delegate { var v1 = tf.get_variable("v", new TensorShape(1)); Assert.AreEqual(v1.name, "foo/v:0"); @@ -89,21 +89,20 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void Assign1() { - with(tf.Graph().as_default(), graph => - { - var variable = tf.Variable(31, name: "tree"); - var init = tf.global_variables_initializer(); + var graph = tf.Graph().as_default(); - var sess = tf.Session(graph); - sess.run(init); + var variable = tf.Variable(31, name: "tree"); + var init = tf.global_variables_initializer(); - var result = sess.run(variable); - Assert.IsTrue((int)result == 31); + var sess = tf.Session(graph); + sess.run(init); - var assign = variable.assign(12); - result = sess.run(assign); - Assert.IsTrue((int)result == 12); - }); + var result = sess.run(variable); + Assert.IsTrue((int)result == 31); + + var assign = variable.assign(12); + result = sess.run(assign); + Assert.IsTrue((int)result == 12); } [TestMethod] @@ -115,12 +114,12 @@ namespace TensorFlowNET.UnitTest // Add an op to initialize the variables. var init_op = tf.global_variables_initializer(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { sess.run(init_op); // o some work with the model. inc_v1.op.run(); - }); + } } /// diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs index a116c91a..94686049 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs @@ -16,7 +16,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test { var graph = tf.Graph().as_default(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { var x = tf.constant(2, name: "x"); var y = tf.constant(5, name: "y"); @@ -27,7 +27,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test int result = z.eval(sess); assertEquals(result, 22); - }); + } } [Ignore("need tesnroflow expose AddControlInput API")] @@ -36,7 +36,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test { var graph = tf.Graph().as_default(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { var x = tf.constant(2, name: "x"); var y = tf.constant(1, name: "y"); @@ -47,7 +47,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test int result = z.eval(sess); assertEquals(result, 11); - }); + } } [Ignore("need tesnroflow expose AddControlInput API")] diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs index 47cb397a..682b826f 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs @@ -11,7 +11,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test private void _testWhileContextHelper(int? maximum_iterations = null) { // TODO: implement missing code dependencies - with(this.cached_session(), sess => + using (var sess = this.cached_session()) { var i = constant_op.constant(0, name: "i"); var c = new Func(x => gen_math_ops.less(x, 10, name: "c")); @@ -26,7 +26,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test WhileContext.from_proto( control_flow_context.to_proto()).to_proto(), "");*/ } - }); + } } [Ignore("TODO")] diff --git a/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs b/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs index d1249cf0..68ee14e4 100644 --- a/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs +++ b/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs @@ -15,20 +15,18 @@ namespace TensorFlowNET.UnitTest.gradients_test [TestMethod] public void testGradients() { - with(tf.Graph().as_default(), g => - { - var inp = tf.constant(1.0, shape: new[] { 32, 100 }, name: "in"); - var w = tf.constant(1.0, shape: new[] { 100, 10 }, name: "w"); - var b = tf.constant(1.0, shape: new[] { 10 }, name: "b"); - var xw = math_ops.matmul(inp, w, name: "xw"); - var h = nn_ops.bias_add(xw, b, name: "h"); - var w_grad = gradients_impl.gradients(new[] { h }, new[] { w })[0]; - self.assertEquals("MatMul", w_grad.op.type); - // TODO: Operation._original_op - //self.assertEquals(w_grad.op._original_op, xw.op); - self.assertTrue((bool)w_grad.op.get_attr("transpose_a")); - self.assertFalse((bool)w_grad.op.get_attr("transpose_b")); - }); + var g = tf.Graph().as_default(); + var inp = tf.constant(1.0, shape: new[] { 32, 100 }, name: "in"); + var w = tf.constant(1.0, shape: new[] { 100, 10 }, name: "w"); + var b = tf.constant(1.0, shape: new[] { 10 }, name: "b"); + var xw = math_ops.matmul(inp, w, name: "xw"); + var h = nn_ops.bias_add(xw, b, name: "h"); + var w_grad = gradients_impl.gradients(new[] { h }, new[] { w })[0]; + self.assertEquals("MatMul", w_grad.op.type); + // TODO: Operation._original_op + //self.assertEquals(w_grad.op._original_op, xw.op); + self.assertTrue((bool)w_grad.op.get_attr("transpose_a")); + self.assertFalse((bool)w_grad.op.get_attr("transpose_b")); } [TestMethod] @@ -104,14 +102,14 @@ namespace TensorFlowNET.UnitTest.gradients_test tf.constant(new[] { 1 }, tf.int32, new[] { 1 }) ); var g = tf.gradients(b, a); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(new object[] { g, b }); var actualDeriv = np.squeeze(result[0]); var actual = np.squeeze(result[1]); self.assertEquals(new float[] { 1, 0 }, new float[] { actualDeriv[0], actualDeriv[1] }); self.assertEquals(0.9640276f, (float)actual); - }); + } } [TestMethod] diff --git a/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs b/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs index 33a16ba8..47baeeb5 100644 --- a/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs +++ b/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs @@ -73,12 +73,13 @@ namespace TensorFlowNET.UnitTest.nn_test { var value = array_ops.placeholder(dtype: dtypes.float32); var sparsity = nn_impl.zero_fraction(value); - with(self.cached_session(), sess => { + using (var sess = self.cached_session()) + { // TODO: make this compile - //self.assertAllClose( - // 0.25, - // sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); - }); + //self.assertAllClose( + // 0.25, + // sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); + } } diff --git a/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs b/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs index 21ee3f6d..8c8a89dd 100644 --- a/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs +++ b/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs @@ -16,19 +16,18 @@ namespace TensorFlowNET.UnitTest.ops_test [TestMethod] public void TestBasic() { - var graph = tf.Graph().as_default(); + var g = tf.Graph().as_default(); Tensor a = null, b = null, c = null, d = null, e = null; - with(graph, g => + + a = constant_op.constant(1.0); + b = constant_op.constant(1.0); + tf_with(g.control_dependencies(new[] { a }), x => { - a = constant_op.constant(1.0); - b = constant_op.constant(1.0); - with(g.control_dependencies(new[] { a }), x => - { - c = constant_op.constant(1.0); - d = array_ops.identity(b); - e = array_ops.identity(c); - }); + c = constant_op.constant(1.0); + d = array_ops.identity(b); + e = array_ops.identity(c); }); + Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op })); Assert.IsTrue(Enumerable.SequenceEqual(d.op.control_inputs, new[] { a.op })); // e should be dominated by c. @@ -56,7 +55,7 @@ namespace TensorFlowNET.UnitTest.ops_test // TODO: make this compile (see original Python code below) a = constant_op.constant(1.0); b = future; // <--- {henon} obviously, this doesn't compile, looks like control_dependencies needs to be able to take callables as well. - with(ops.control_dependencies(new object[] { a, b }), ctrl => + tf_with(ops.control_dependencies(new object[] { a, b }), ctrl => { return c = constant_op.constant(3.0); }); @@ -64,19 +63,15 @@ namespace TensorFlowNET.UnitTest.ops_test } else { - var graph = tf.Graph().as_default(); - with(graph, g => + var g = tf.Graph().as_default(); + a = constant_op.constant(1.0); + var b1 = future(); + tf_with(g.control_dependencies(new[] { a, b }), ctrl => { - a = constant_op.constant(1.0); - var b1 = future(); - with(g.control_dependencies(new[] { a, b }), ctrl => - { - c = constant_op.constant(3.0); - }); - Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); - Assert.AreEqual(1, calls); + c = constant_op.constant(3.0); }); - + Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); + Assert.AreEqual(1, calls); } } /* @@ -139,17 +134,17 @@ namespace TensorFlowNET.UnitTest.ops_test var a_3 = constant_op.constant(4.0); var a_4 = constant_op.constant(5.0); Tensor b_1 = null, b_2 = null; - with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => + tf_with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => { b_1 = constant_op.constant(6.0); }); - with(g.control_dependencies(new[] { a_1 }), ctrl1 => + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => { - with(g.control_dependencies(new[] { a_2 }), ctrl2 => + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => { - with(g.control_dependencies(new[] { a_3 }), ctrl3 => + tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => { - with(g.control_dependencies(new[] { a_4 }), ctrl4 => + tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => { b_2 = constant_op.constant(7.0); }); @@ -175,15 +170,15 @@ namespace TensorFlowNET.UnitTest.ops_test var a_3 = constant_op.constant(4.0); var a_4 = constant_op.constant(5.0); Operation b_3_4 = null, b_3 = null, b_none = null, b_1 = null, b_1_2 = null, b_none2 = null; - with(g.control_dependencies(new[] { a_1 }), ctrl1 => + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => { - with(g.control_dependencies(new[] { a_2 }), ctrl2 => + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => { - with(g.control_dependencies(null), ctrl3 => + tf_with(g.control_dependencies(null), ctrl3 => { - with(g.control_dependencies(new[] { a_3 }), ctrl4 => + tf_with(g.control_dependencies(new[] { a_3 }), ctrl4 => { - with(g.control_dependencies(new[] { a_4 }), ctrl5 => + tf_with(g.control_dependencies(new[] { a_4 }), ctrl5 => { // deps [a_3, a_4] b_3_4 = constant_op.constant(7.0); @@ -199,7 +194,7 @@ namespace TensorFlowNET.UnitTest.ops_test }); // deps back to [a_1] b_1 = constant_op.constant(11.0); - with(g.control_dependencies(null), ctrl6 => + tf_with(g.control_dependencies(null), ctrl6 => { // deps are None again b_none2 = constant_op.constant(12.0); @@ -233,25 +228,25 @@ namespace TensorFlowNET.UnitTest.ops_test Operation c_1 = null, c_2 = null, c_3 = null, c_4 = null; Operation d_1 = null, d_2 = null, d_3 = null, d_4 = null; Operation e_1 = null, e_2 = null, e_3 = null, e_4 = null; - with(g.control_dependencies(new[] { a_1 }), ctrl1 => + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => { b_1 = tf.multiply(a_3, a_4); c_1 = tf.multiply(a_1, b_1.output); d_1 = tf.multiply(b_1.output, c_1.output); e_1 = constant_op.constant(5.0); - with(g.control_dependencies(new[] { a_2 }), ctrl2 => + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => { b_2 = tf.multiply(a_3, a_4); c_2 = tf.multiply(a_1, b_1.output); d_2 = tf.multiply(b_2.output, c_2.output); e_2 = tf.multiply(e_1.output, e_1.output); - with(g.control_dependencies(new[] { a_3 }), ctrl3 => + tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => { b_3 = tf.multiply(a_3, a_4); c_3 = tf.multiply(a_1, b_1.output); d_3 = tf.multiply(b_3.output, c_3.output); e_3 = tf.multiply(e_2.output, e_2.output); - with(g.control_dependencies(new[] { a_4 }), ctrl4 => + tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => { b_4 = tf.multiply(a_3, a_4); c_4 = tf.multiply(a_1, b_1.output); @@ -310,7 +305,7 @@ namespace TensorFlowNET.UnitTest.ops_test var g = tf.Graph().as_default(); Operation b = null; var a = constant_op.constant(100.0); - with(g.control_dependencies(new[] { a }), ctrl1 => + tf_with(g.control_dependencies(new[] { a }), ctrl1 => { b = array_ops.identity(a); }); diff --git a/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs b/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs index a961d7ec..dcaeaf11 100644 --- a/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs +++ b/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs @@ -24,81 +24,73 @@ namespace TensorFlowNET.UnitTest.ops_test [TestMethod] public void TestShape() { - var graph = tf.Graph().as_default(); - with(graph, g => - { - var x = constant_op.constant(new[,] { { 1, 2, 3 }, { 4, 5, 6 } }); - var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] { x }, new Operation[0]); - var op = g._create_op_from_tf_operation(c_op); - - Assert.AreEqual("myop", op.name); - Assert.AreEqual("Identity", op.type); - Assert.AreEqual(1, len(op.outputs)); - assertItemsEqual(new[] { 2, 3 }, op.outputs[0].shape); - }); + var g = tf.Graph().as_default(); + + var x = constant_op.constant(new[,] { { 1, 2, 3 }, { 4, 5, 6 } }); + var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] { x }, new Operation[0]); + var op = g._create_op_from_tf_operation(c_op); + + Assert.AreEqual("myop", op.name); + Assert.AreEqual("Identity", op.type); + Assert.AreEqual(1, len(op.outputs)); + assertItemsEqual(new[] { 2, 3 }, op.outputs[0].shape); } [TestMethod] public void TestUniqueName() { var graph = tf.Graph().as_default(); - with(graph, g => - { - //var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); - //var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); - //var op = g._create_op_from_tf_operation(c_op); - //var op2 = g._create_op_from_tf_operation(c_op2); - var op = constant_op.constant(0, name: "myop").op; - var op2 = constant_op.constant(0, name: "myop_1").op; - - // Create ops with same names as op1 and op2. We expect the new names to be - // uniquified. - var op3 = constant_op.constant(0, name: "myop").op; - var op4 = constant_op.constant(0, name: "myop_1").op; - - self.assertEqual(op.name, "myop"); - self.assertEqual(op2.name, "myop_1"); - self.assertEqual(op3.name, "myop_2"); - self.assertEqual(op4.name, "myop_1_1"); - }); + //var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); + //var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); + //var op = g._create_op_from_tf_operation(c_op); + //var op2 = g._create_op_from_tf_operation(c_op2); + var op = constant_op.constant(0, name: "myop").op; + var op2 = constant_op.constant(0, name: "myop_1").op; + + // Create ops with same names as op1 and op2. We expect the new names to be + // uniquified. + var op3 = constant_op.constant(0, name: "myop").op; + var op4 = constant_op.constant(0, name: "myop_1").op; + + self.assertEqual(op.name, "myop"); + self.assertEqual(op2.name, "myop_1"); + self.assertEqual(op3.name, "myop_2"); + self.assertEqual(op4.name, "myop_1_1"); } [Ignore("need tesnroflow expose UpdateEdge API")] [TestMethod] public void TestCond() { - var graph = tf.Graph().as_default(); - with(graph, g => + var g = tf.Graph().as_default(); + var x = constant_op.constant(10); + + var true_fn = new Func(() => { - var x = constant_op.constant(10); - - var true_fn = new Func(() => - { - var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); - var new_ops = g._add_new_tf_operations(); - self.assertEqual(len(new_ops), 1); - return x; - }); - - control_flow_ops.cond(x < 10, true_fn, () => x); - - var op = g.get_operation_by_name("cond/myop"); - - //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); - //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); - - self.assertIsNotNone(op); - self.assertEqual(op.name, "cond/myop"); - self.assertEqual(op.type, "Identity"); - //self.assertEqual(op.outputs, new object[0]); - var op_input = op.inputs[0].op; - self.assertEqual(op_input.type, "Switch"); - self.assertEqual(op_input.inputs[0].name, x.name); - self.assertEqual(op.graph, g); - self.assertIsNotNone(op._get_control_flow_context()); - var cond_text = op._get_control_flow_context() as ControlFlowContext; - self.assertEqual(cond_text.name, "cond/cond_text"); + var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); + var new_ops = g._add_new_tf_operations(); + self.assertEqual(len(new_ops), 1); + return x; }); + + control_flow_ops.cond(x < 10, true_fn, () => x); + + var op = g.get_operation_by_name("cond/myop"); + + //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); + //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); + + self.assertIsNotNone(op); + self.assertEqual(op.name, "cond/myop"); + self.assertEqual(op.type, "Identity"); + //self.assertEqual(op.outputs, new object[0]); + var op_input = op.inputs[0].op; + self.assertEqual(op_input.type, "Switch"); + self.assertEqual(op_input.inputs[0].name, x.name); + self.assertEqual(op.graph, g); + self.assertIsNotNone(op._get_control_flow_context()); + var cond_text = op._get_control_flow_context() as ControlFlowContext; + self.assertEqual(cond_text.name, "cond/cond_text"); } [Ignore("Todo: Port")] @@ -107,20 +99,17 @@ namespace TensorFlowNET.UnitTest.ops_test { var graph = tf.Graph().as_default(); Operation x=null; - with(graph, g => + x = constant_op.constant(42); + var body = new Func(i => { - x = constant_op.constant(42); - var body = new Func(i => - { - ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, - new Operation[0]); - var new_ops = g._add_new_tf_operations(); - self.assertEqual(len(new_ops), 1); - return i; - }); - // TODO: port control_flow_ops.while_loop - //control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); + ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, + new Operation[0]); + var new_ops = graph._add_new_tf_operations(); + self.assertEqual(len(new_ops), 1); + return i; }); + // TODO: port control_flow_ops.while_loop + //control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); var op = graph.get_operation_by_name("myloop/myop"); self.assertIsNotNone(op); self.assertEqual(op.name, "myloop/myop"); From 25a79c19f79ebbb7ab4bdf1f2a82c5744605a3ff Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Fri, 2 Aug 2019 22:38:20 -0500 Subject: [PATCH 07/13] Downgrade protobuf to 3.5.1. --- src/TensorFlowNET.Core/TensorFlowNET.Core.csproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj index 007eccc8..3037221c 100644 --- a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj +++ b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj @@ -68,7 +68,7 @@ Docs: https://tensorflownet.readthedocs.io - + From 3d085c4e739b039d8e2c356f85913446fd90adee Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Sat, 3 Aug 2019 01:01:33 -0500 Subject: [PATCH 08/13] exception when OpType handle is zero. --- src/TensorFlowNET.Core/Operations/Operation.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index 8c4ce606..d7590b97 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -56,8 +56,8 @@ namespace Tensorflow public TF_DataType dtype => TF_DataType.DtInvalid; public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle)); - public string OpType => c_api.StringPiece(c_api.TF_OperationOpType(_handle)); - public string Device => c_api.StringPiece(c_api.TF_OperationDevice(_handle)); + public string OpType => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationOpType(_handle)); + public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle)); private NodeDef _node_def; public NodeDef node_def From de696fb341863f96e37e6c7afcde2cf3df5bd20d Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Sat, 3 Aug 2019 02:39:54 -0500 Subject: [PATCH 09/13] new operation alwasy in current graph. --- src/TensorFlowNET.Core/Graphs/Graph.Import.cs | 12 ++++++++---- src/TensorFlowNET.Core/Graphs/Graph.Operation.cs | 8 +++++++- src/TensorFlowNET.Core/TensorFlowNET.Core.csproj | 6 +++--- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs index af7ebfd1..1c91868b 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs @@ -59,12 +59,16 @@ namespace Tensorflow return status; } + static object locker = new object(); public static Graph ImportFromPB(string file_path, string name = null) { - var graph = tf.Graph().as_default(); - var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(file_path)); - importer.import_graph_def(graph_def, name: name); - return graph; + lock (locker) + { + var graph = tf.Graph().as_default(); + var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(file_path)); + importer.import_graph_def(graph_def, name: name); + return graph; + } } } } diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs index 09e09573..1030922b 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs @@ -55,7 +55,13 @@ namespace Tensorflow public Operation OperationByName(string operName) { - return c_api.TF_GraphOperationByName(_handle, operName); + var handle = c_api.TF_GraphOperationByName(_handle, operName); + if(graph_key != tf.get_default_graph().graph_key) + { + Console.WriteLine($"Current graph is not default graph."); + // throw new ValueError($"Current graph is not default graph."); + } + return new Operation(handle, g: this); } public ITensorOrOperation[] get_operations() diff --git a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj index 3037221c..4c2b1650 100644 --- a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj +++ b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj @@ -5,7 +5,7 @@ TensorFlow.NET Tensorflow 1.14.0 - 0.10.8 + 0.10.9 Haiping Chen, Meinrad Recheis SciSharp STACK true @@ -17,7 +17,7 @@ TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C# Google's TensorFlow full binding in .NET Standard. Docs: https://tensorflownet.readthedocs.io - 0.10.8.0 + 0.10.9.0 Changes since v0.9.0: 1. Added full connected Convolution Neural Network example. @@ -37,7 +37,7 @@ Docs: https://tensorflownet.readthedocs.io 15. Fix Tensor memory leak. 16. Rename with to tf_with that is only used to build graph purpose. 7.2 - 0.10.8.0 + 0.10.9.0 LICENSE true true From 31909cd802c03ef9ff4512cb3e65a3ae8241ce4e Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Sat, 3 Aug 2019 14:23:18 -0500 Subject: [PATCH 10/13] Change Graph.Import interface. --- .../Framework/importer.py.cs | 19 ++++---- src/TensorFlowNET.Core/Graphs/Graph.Import.cs | 46 +++++++++---------- .../Graphs/ImportGraphDefOptions.cs | 10 ++-- .../TensorFlowNET.Core.csproj | 6 +-- .../ImageRecognitionInception.cs | 2 +- .../ImageProcessing/InceptionArchGoogLeNet.cs | 3 +- .../ImageProcessing/RetrainImageClassifier.cs | 6 ++- 7 files changed, 45 insertions(+), 47 deletions(-) diff --git a/src/TensorFlowNET.Core/Framework/importer.py.cs b/src/TensorFlowNET.Core/Framework/importer.py.cs index 0c405be9..254fda19 100644 --- a/src/TensorFlowNET.Core/Framework/importer.py.cs +++ b/src/TensorFlowNET.Core/Framework/importer.py.cs @@ -54,16 +54,17 @@ namespace Tensorflow input_map = _ConvertInputMapValues(name, input_map); }); - var scoped_options = c_api_util.ScopedTFImportGraphDefOptions(); - _PopulateTFImportGraphDefOptions(scoped_options, prefix, input_map, return_elements); - var bytes = graph_def.ToByteString().ToArray(); - IntPtr buffer = c_api_util.tf_buffer(bytes); - - var status = new Status(); - // need to create a class ImportGraphDefWithResults with IDisposal - var results = c_api.TF_GraphImportGraphDefWithResults(graph, buffer, scoped_options, status); - status.Check(true); + using (var buffer = c_api_util.tf_buffer(bytes)) + using (var scoped_options = c_api_util.ScopedTFImportGraphDefOptions()) + using (var status = new Status()) + { + _PopulateTFImportGraphDefOptions(scoped_options, prefix, input_map, return_elements); + // need to create a class ImportGraphDefWithResults with IDisposal + var results = c_api.TF_GraphImportGraphDefWithResults(graph, buffer, scoped_options, status); + status.Check(true); + c_api.TF_DeleteImportGraphDefResults(results); + } _ProcessNewOps(graph); diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs index 1c91868b..82695527 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs @@ -23,6 +23,7 @@ namespace Tensorflow { public unsafe TF_Output[] ImportGraphDefWithReturnOutputs(Buffer graph_def, ImportGraphDefOptions opts, Status s) { + as_default(); var num_return_outputs = opts.NumReturnOutputs; var return_outputs = new TF_Output[num_return_outputs]; int size = Marshal.SizeOf(); @@ -35,40 +36,37 @@ namespace Tensorflow return_outputs[i] = Marshal.PtrToStructure(handle); } + Marshal.FreeHGlobal(return_output_handle); + return return_outputs; } - public Status Import(string file_path) + public bool Import(string file_path, string prefix = "") { var bytes = File.ReadAllBytes(file_path); - var graph_def = new Tensorflow.Buffer(bytes); - var opts = c_api.TF_NewImportGraphDefOptions(); - var status = new Status(); - c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); - return status; - } - - public Status Import(byte[] bytes, string prefix = "") - { - var graph_def = new Tensorflow.Buffer(bytes); - var opts = c_api.TF_NewImportGraphDefOptions(); - c_api.TF_ImportGraphDefOptionsSetPrefix(opts, prefix); - var status = new Status(); - c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); - c_api.TF_DeleteImportGraphDefOptions(opts); - return status; + return Import(bytes, prefix: prefix); } - static object locker = new object(); - public static Graph ImportFromPB(string file_path, string name = null) + public bool Import(byte[] bytes, string prefix = "") { - lock (locker) + using (var opts = new ImportGraphDefOptions()) + using (var status = new Status()) + using (var graph_def = new Buffer(bytes)) { - var graph = tf.Graph().as_default(); - var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(file_path)); - importer.import_graph_def(graph_def, name: name); - return graph; + as_default(); + c_api.TF_ImportGraphDefOptionsSetPrefix(opts, prefix); + c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); + status.Check(true); + return status.Code == TF_Code.TF_OK; } } + + /*public Graph Import(string file_path, string name = null) + { + as_default(); + var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(file_path)); + importer.import_graph_def(graph_def, name: name); + return this; + }*/ } } diff --git a/src/TensorFlowNET.Core/Graphs/ImportGraphDefOptions.cs b/src/TensorFlowNET.Core/Graphs/ImportGraphDefOptions.cs index 6a0a812a..97720206 100644 --- a/src/TensorFlowNET.Core/Graphs/ImportGraphDefOptions.cs +++ b/src/TensorFlowNET.Core/Graphs/ImportGraphDefOptions.cs @@ -18,10 +18,8 @@ using System; namespace Tensorflow { - public class ImportGraphDefOptions : IDisposable + public class ImportGraphDefOptions : DisposableObject { - private IntPtr _handle; - public int NumReturnOutputs => c_api.TF_ImportGraphDefOptionsNumReturnOutputs(_handle); public ImportGraphDefOptions() @@ -39,10 +37,8 @@ namespace Tensorflow c_api.TF_ImportGraphDefOptionsAddReturnOutput(_handle, name, index); } - public void Dispose() - { - c_api.TF_DeleteImportGraphDefOptions(_handle); - } + protected override void DisposeUnManagedState(IntPtr handle) + => c_api.TF_DeleteImportGraphDefOptions(handle); public static implicit operator IntPtr(ImportGraphDefOptions opts) => opts._handle; public static implicit operator ImportGraphDefOptions(IntPtr handle) => new ImportGraphDefOptions(handle); diff --git a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj index 4c2b1650..2afe9057 100644 --- a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj +++ b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj @@ -5,7 +5,7 @@ TensorFlow.NET Tensorflow 1.14.0 - 0.10.9 + 0.10.10 Haiping Chen, Meinrad Recheis SciSharp STACK true @@ -17,7 +17,7 @@ TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C# Google's TensorFlow full binding in .NET Standard. Docs: https://tensorflownet.readthedocs.io - 0.10.9.0 + 0.10.10.0 Changes since v0.9.0: 1. Added full connected Convolution Neural Network example. @@ -37,7 +37,7 @@ Docs: https://tensorflownet.readthedocs.io 15. Fix Tensor memory leak. 16. Rename with to tf_with that is only used to build graph purpose. 7.2 - 0.10.9.0 + 0.10.10.0 LICENSE true true diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs index efcb0b73..548c84f4 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs @@ -31,7 +31,7 @@ namespace TensorFlowNET.Examples { PrepareData(); - var graph = new Graph().as_default(); + var graph = new Graph(); //import GraphDef from pb file graph.Import(Path.Join(dir, pbFile)); diff --git a/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs index f51833d2..93fa9c2c 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs @@ -41,7 +41,8 @@ namespace TensorFlowNET.Examples input_mean: input_mean, input_std: input_std); - var graph = Graph.ImportFromPB(Path.Join(dir, pbFile)); + var graph = new Graph(); + graph.Import(Path.Join(dir, pbFile)); var input_operation = graph.get_operation_by_name(input_name); var output_operation = graph.get_operation_by_name(output_name); diff --git a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs index becd9f7e..7adb249b 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs @@ -738,7 +738,8 @@ namespace TensorFlowNET.Examples var fileBytes = ReadTensorFromImageFile(img_path); // import graph and variables - var graph = Graph.ImportFromPB(output_graph, ""); + var graph = new Graph(); + graph.Import(output_graph, ""); Tensor input = graph.OperationByName("Placeholder"); Tensor output = graph.OperationByName("final_result"); @@ -778,7 +779,8 @@ namespace TensorFlowNET.Examples if (!File.Exists(output_graph)) return; - var graph = Graph.ImportFromPB(output_graph); + var graph = new Graph(); + graph.Import(output_graph); var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding(); tf_with(tf.Session(graph), sess => From fa2cc20fc2fb05d5db6d183dd2c1b49d855c5c4b Mon Sep 17 00:00:00 2001 From: Kerry Jiang Date: Sat, 3 Aug 2019 16:55:46 -0700 Subject: [PATCH 11/13] removed the direct console output for now, will add logging later --- src/TensorFlowHub/Utils.cs | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/src/TensorFlowHub/Utils.cs b/src/TensorFlowHub/Utils.cs index 72ee9430..3245071f 100644 --- a/src/TensorFlowHub/Utils.cs +++ b/src/TensorFlowHub/Utils.cs @@ -28,22 +28,14 @@ namespace Tensorflow.Hub var fileSaveTo = Path.Combine(dirSaveTo, fileName); if (File.Exists(fileSaveTo)) + return; + + Directory.CreateDirectory(dirSaveTo); + + using (var wc = new WebClient()) { - //TODO:maybe you can check file's hashcode and "donglowad.info" to complete file ... - Console.WriteLine($"{fileSaveTo} already exists."); + await wc.DownloadFileTaskAsync(url, fileSaveTo); } - else - { - if (!Directory.Exists(dirSaveTo)) - Directory.CreateDirectory(dirSaveTo); - - using (var wc = new WebClient()) - { - await wc.DownloadFileTaskAsync(url, fileSaveTo); - } - - } - } public static async Task UnzipAsync(this IModelLoader modelLoader, string zipFile, string saveTo) @@ -52,8 +44,7 @@ namespace Tensorflow.Hub if (!Path.IsPathRooted(saveTo)) saveTo = Path.Combine(AppContext.BaseDirectory, saveTo); - if (!Directory.Exists(saveTo)) - Directory.CreateDirectory(saveTo); + Directory.CreateDirectory(saveTo); if (!Path.IsPathRooted(zipFile)) zipFile = Path.Combine(AppContext.BaseDirectory, zipFile); From bad9aba49ee7a0c39cfc76723cd6473a318895ca Mon Sep 17 00:00:00 2001 From: Antonio Cifonelli Date: Mon, 5 Aug 2019 23:07:26 +0200 Subject: [PATCH 12/13] Adding `logical_not` operator (#343) Relative unit test in `OperationTest`. --- src/TensorFlowNET.Core/APIs/tf.math.cs | 3 +++ src/TensorFlowNET.Core/Operations/gen_math_ops.cs | 7 +++++++ test/TensorFlowNET.UnitTest/OperationsTest.cs | 11 ++++++++++- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index fb65d31b..b787bf1d 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -192,6 +192,9 @@ namespace Tensorflow public static Tensor logical_and(Tensor x, Tensor y, string name = null) => gen_math_ops.logical_and(x, y, name); + public static Tensor logical_not(Tensor x, string name = null) + => gen_math_ops.logical_not(x, name); + /// /// Clips tensor values to a specified min and max. /// diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index a8b9ac49..c3b30d8f 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -357,6 +357,13 @@ namespace Tensorflow return _op.outputs[0]; } + public static Tensor logical_not(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("LogicalNot", name, args: new { x }); + + return _op.outputs[0]; + } + public static Tensor squared_difference(Tensor x, Tensor y, string name = null) { var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name }); diff --git a/test/TensorFlowNET.UnitTest/OperationsTest.cs b/test/TensorFlowNET.UnitTest/OperationsTest.cs index 10046f0c..68c44831 100644 --- a/test/TensorFlowNET.UnitTest/OperationsTest.cs +++ b/test/TensorFlowNET.UnitTest/OperationsTest.cs @@ -131,7 +131,7 @@ namespace TensorFlowNET.UnitTest } [TestMethod] - public void logicalAndTest() + public void logicalOpsTest() { var a = tf.constant(new[] {1f, 2f, 3f, 4f, -4f, -3f, -2f, -1f}); var b = tf.less(a, 0f); @@ -144,6 +144,15 @@ namespace TensorFlowNET.UnitTest var o = sess.run(d); Assert.IsTrue(o.array_equal(check)); } + + d = tf.cast(tf.logical_not(b), tf.int32); + check = np.array(new[] { 1, 1, 1, 1, 0, 0, 0, 0 }); + + using (var sess = tf.Session()) + { + var o = sess.run(d); + Assert.IsTrue(o.array_equal(check)); + } } [TestMethod] From 8a50673baad5581022b321b3628b9cd044facef6 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Mon, 5 Aug 2019 07:28:19 -0500 Subject: [PATCH 13/13] Make Graph inherits from DisposableObject. --- src/TensorFlowHub/TensorFlowHub.csproj | 4 +- .../Graphs/DefaultGraphStack.cs | 11 ++++- src/TensorFlowNET.Core/Graphs/Graph.cs | 40 +++++++++--------- .../Operations/Operation.cs | 2 +- .../Sessions/BaseSession.cs | 5 +-- src/TensorFlowNET.Core/Sessions/Session.cs | 4 +- .../ImageProcessing/RetrainImageClassifier.cs | 41 ++++++++----------- test/TensorFlowNET.UnitTest/NameScopeTest.cs | 5 ++- 8 files changed, 57 insertions(+), 55 deletions(-) diff --git a/src/TensorFlowHub/TensorFlowHub.csproj b/src/TensorFlowHub/TensorFlowHub.csproj index b945c373..0ad9714f 100644 --- a/src/TensorFlowHub/TensorFlowHub.csproj +++ b/src/TensorFlowHub/TensorFlowHub.csproj @@ -2,7 +2,7 @@ Tensorflow.Hub netstandard2.0 - 0.0.1 + 0.0.2 Kerry Jiang SciSharp STACK Apache 2.0 @@ -13,7 +13,7 @@ TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models. SciSharp.TensorFlowHub true - 1. Add MNIST loader. + https://avatars3.githubusercontent.com/u/44989469?s=200&v=4 diff --git a/src/TensorFlowNET.Core/Graphs/DefaultGraphStack.cs b/src/TensorFlowNET.Core/Graphs/DefaultGraphStack.cs index 9e6f6346..f066676d 100644 --- a/src/TensorFlowNET.Core/Graphs/DefaultGraphStack.cs +++ b/src/TensorFlowNET.Core/Graphs/DefaultGraphStack.cs @@ -34,10 +34,17 @@ namespace Tensorflow public Graph get_controller() { - if (stack.Count == 0) + if (stack.Count(x => x.IsDefault) == 0) stack.Add(new StackModel { Graph = tf.Graph(), IsDefault = true }); - return stack.First(x => x.IsDefault).Graph; + return stack.Last(x => x.IsDefault).Graph; + } + + public bool remove(Graph g) + { + var sm = stack.FirstOrDefault(x => x.Graph == g); + if (sm == null) return false; + return stack.Remove(sm); } public void reset() diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs index 7121e0be..2949b5d4 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.cs @@ -73,9 +73,8 @@ namespace Tensorflow all variables that are created during the construction of a graph. The caller may define additional collections by specifying a new name. */ - public partial class Graph : IPython, IDisposable, IEnumerable + public partial class Graph : DisposableObject, IEnumerable { - private IntPtr _handle; private Dictionary _nodes_by_id; public Dictionary _nodes_by_name; private Dictionary _names_in_use; @@ -121,10 +120,6 @@ namespace Tensorflow _graph_key = $"grap-key-{ops.uid()}/"; } - public void __enter__() - { - } - public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true) { return _as_graph_element_locked(obj, allow_tensor, allow_operation); @@ -443,14 +438,15 @@ namespace Tensorflow _unfetchable_ops.Add(op); } - public void Dispose() - { - /*if (_handle != IntPtr.Zero) - c_api.TF_DeleteGraph(_handle); - - _handle = IntPtr.Zero; - - GC.SuppressFinalize(this);*/ + protected override void DisposeManagedState() + { + ops.default_graph_stack.remove(this); + } + + protected override void DisposeUnManagedState(IntPtr handle) + { + Console.WriteLine($"Destroy graph {handle}"); + c_api.TF_DeleteGraph(handle); } /// @@ -481,17 +477,19 @@ namespace Tensorflow return new TensorShape(dims.Select(x => (int)x).ToArray()); } + string debugString = string.Empty; public override string ToString() { - int len = 0; - return c_api.TF_GraphDebugString(_handle, out len); + return $"{graph_key}, ({_handle})"; + /*if (string.IsNullOrEmpty(debugString)) + { + int len = 0; + debugString = c_api.TF_GraphDebugString(_handle, out len); + } + + return debugString;*/ } - public void __exit__() - { - - } - private IEnumerable GetEnumerable() => c_api_util.tf_operations(this); diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index d7590b97..059290f4 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -84,7 +84,7 @@ namespace Tensorflow // Dict mapping op name to file and line information for op colocation // context managers. - _control_flow_context = graph._get_control_flow_context(); + _control_flow_context = _graph._get_control_flow_context(); // Note: _control_flow_post_processing() must not be called here, the caller is responsible for calling it when using this constructor. } diff --git a/src/TensorFlowNET.Core/Sessions/BaseSession.cs b/src/TensorFlowNET.Core/Sessions/BaseSession.cs index fa3947f5..4eacc7bf 100644 --- a/src/TensorFlowNET.Core/Sessions/BaseSession.cs +++ b/src/TensorFlowNET.Core/Sessions/BaseSession.cs @@ -31,7 +31,6 @@ namespace Tensorflow protected bool _closed; protected int _current_version; protected byte[] _target; - protected IntPtr _session; public Graph graph => _graph; public BaseSession(string target = "", Graph g = null, SessionOptions opts = null) @@ -46,7 +45,7 @@ namespace Tensorflow var status = new Status(); - _session = c_api.TF_NewSession(_graph, opts ?? newOpts, status); + _handle = c_api.TF_NewSession(_graph, opts ?? newOpts, status); status.Check(true); } @@ -212,7 +211,7 @@ namespace Tensorflow var output_values = fetch_list.Select(x => IntPtr.Zero).ToArray(); - c_api.TF_SessionRun(_session, + c_api.TF_SessionRun(_handle, run_options: null, inputs: feed_dict.Select(f => f.Key).ToArray(), input_values: feed_dict.Select(f => (IntPtr)f.Value).ToArray(), diff --git a/src/TensorFlowNET.Core/Sessions/Session.cs b/src/TensorFlowNET.Core/Sessions/Session.cs index 36797ec7..6b238ae7 100644 --- a/src/TensorFlowNET.Core/Sessions/Session.cs +++ b/src/TensorFlowNET.Core/Sessions/Session.cs @@ -30,7 +30,7 @@ namespace Tensorflow public Session(IntPtr handle, Graph g = null) : base("", g, null) { - _session = handle; + _handle = handle; } public Session(Graph g, SessionOptions opts = null, Status s = null) @@ -73,7 +73,7 @@ namespace Tensorflow return new Session(sess, g: new Graph(graph).as_default()); } - public static implicit operator IntPtr(Session session) => session._session; + public static implicit operator IntPtr(Session session) => session._handle; public static implicit operator Session(IntPtr handle) => new Session(handle); public void __enter__() diff --git a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs index 7adb249b..03ff572a 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs @@ -52,7 +52,8 @@ namespace TensorFlowNET.Examples // The location where variable checkpoints will be stored. string CHECKPOINT_NAME = Path.Join(data_dir, "_retrain_checkpoint"); string tfhub_module = "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/3"; - string final_tensor_name = "final_result"; + string input_tensor_name = "Placeholder"; + string final_tensor_name = "Score"; float testing_percentage = 0.1f; float validation_percentage = 0.1f; float learning_rate = 0.01f; @@ -81,13 +82,13 @@ namespace TensorFlowNET.Examples PrepareData(); #region For debug purpose - + // predict images // Predict(null); // load saved pb and test new images. // Test(null); - + #endregion var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); @@ -276,16 +277,13 @@ namespace TensorFlowNET.Examples private (Graph, Tensor, Tensor, bool) create_module_graph() { var (height, width) = (299, 299); - - return tf_with(tf.Graph().as_default(), graph => - { - tf.train.import_meta_graph("graph/InceptionV3.meta"); - Tensor resized_input_tensor = graph.OperationByName("Placeholder"); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3)); - // var m = hub.Module(module_spec); - Tensor bottleneck_tensor = graph.OperationByName("module_apply_default/hub_output/feature_vector/SpatialSqueeze");// m(resized_input_tensor); - var wants_quantization = false; - return (graph, bottleneck_tensor, resized_input_tensor, wants_quantization); - }); + var graph = tf.Graph().as_default(); + tf.train.import_meta_graph("graph/InceptionV3.meta"); + Tensor resized_input_tensor = graph.OperationByName(input_tensor_name); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3)); + // var m = hub.Module(module_spec); + Tensor bottleneck_tensor = graph.OperationByName("module_apply_default/hub_output/feature_vector/SpatialSqueeze");// m(resized_input_tensor); + var wants_quantization = false; + return (graph, bottleneck_tensor, resized_input_tensor, wants_quantization); } private (NDArray, long[], string[]) get_random_cached_bottlenecks(Session sess, Dictionary> image_lists, @@ -594,13 +592,10 @@ namespace TensorFlowNET.Examples create_module_graph(); // Add the new layer that we'll be training. - tf_with(graph.as_default(), delegate - { - (train_step, cross_entropy, bottleneck_input, - ground_truth_input, final_tensor) = add_final_retrain_ops( - class_count, final_tensor_name, bottleneck_tensor, - wants_quantization, is_training: true); - }); + (train_step, cross_entropy, bottleneck_input, + ground_truth_input, final_tensor) = add_final_retrain_ops( + class_count, final_tensor_name, bottleneck_tensor, + wants_quantization, is_training: true); return graph; } @@ -734,15 +729,15 @@ namespace TensorFlowNET.Examples var labels = File.ReadAllLines(output_labels); // predict image - var img_path = Path.Join(image_dir, "roses", "12240303_80d87f77a3_n.jpg"); + var img_path = Path.Join(image_dir, "daisy", "5547758_eea9edfd54_n.jpg"); var fileBytes = ReadTensorFromImageFile(img_path); // import graph and variables var graph = new Graph(); graph.Import(output_graph, ""); - Tensor input = graph.OperationByName("Placeholder"); - Tensor output = graph.OperationByName("final_result"); + Tensor input = graph.OperationByName(input_tensor_name); + Tensor output = graph.OperationByName(final_tensor_name); using (var sess = tf.Session(graph)) { diff --git a/test/TensorFlowNET.UnitTest/NameScopeTest.cs b/test/TensorFlowNET.UnitTest/NameScopeTest.cs index 39401fe0..9a2b4346 100644 --- a/test/TensorFlowNET.UnitTest/NameScopeTest.cs +++ b/test/TensorFlowNET.UnitTest/NameScopeTest.cs @@ -7,12 +7,13 @@ namespace TensorFlowNET.UnitTest [TestClass] public class NameScopeTest { - Graph g = ops.get_default_graph(); string name = ""; [TestMethod] public void NestedNameScope() { + Graph g = tf.Graph().as_default(); + tf_with(new ops.NameScope("scope1"), scope1 => { name = scope1; @@ -37,6 +38,8 @@ namespace TensorFlowNET.UnitTest Assert.AreEqual("scope1/Const_1:0", const3.name); }); + g.Dispose(); + Assert.AreEqual("", g._name_stack); } }