diff --git a/.gitignore b/.gitignore index eee1dc7b..ce600fbb 100644 --- a/.gitignore +++ b/.gitignore @@ -332,3 +332,7 @@ src/TensorFlowNET.Native/bazel-* src/TensorFlowNET.Native/c_api.h /.vscode test/TensorFlowNET.Examples/mnist + + +# training model resources +.resources diff --git a/README.md b/README.md index 7f7d14a4..9cf23da2 100644 --- a/README.md +++ b/README.md @@ -28,8 +28,14 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr Install TF.NET and TensorFlow binary through NuGet. ```sh +### install tensorflow C# binding PM> Install-Package TensorFlow.NET + +### Install tensorflow binary +### For CPU version PM> Install-Package SciSharp.TensorFlow.Redist +### For GPU version (CUDA and cuDNN are required) +PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU ``` Import TF.NET. diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 5d6e5fe7..689965c4 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -17,7 +17,11 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowBenchmark", "src\ EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowDatasets", "src\TensorFlowDatasets\TensorFlowDatasets.csproj", "{DF151A51-E9FD-41BD-B0F4-08A743755D44}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples.GPU", "test\TensorFlowNET.Examples\TensorFlowNET.Examples.GPU.csproj", "{6F6B3382-8F87-4CD9-BF87-C81D5405685A}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -57,6 +61,14 @@ Global {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.Build.0 = Debug|Any CPU {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.ActiveCfg = Release|Any CPU {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.Build.0 = Release|Any CPU + {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.Build.0 = Debug|Any CPU + {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.ActiveCfg = Release|Any CPU + {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.Build.0 = Release|Any CPU + {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/src/KerasNET.Core/Model.cs b/src/KerasNET.Core/Model.cs index b1e6de57..d1d05159 100644 --- a/src/KerasNET.Core/Model.cs +++ b/src/KerasNET.Core/Model.cs @@ -115,7 +115,7 @@ namespace Keras var init = tf.global_variables_initializer(); float loss_value = 0; - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { sess.run(init); var step = 0; @@ -133,7 +133,7 @@ namespace Keras Console.WriteLine($"Step {step} loss: {loss_value}"); } Console.WriteLine($"Final loss: {loss_value}"); - }); + } return loss_value; } diff --git a/src/SciSharp.TensorFlow.Redist/README.md b/src/SciSharp.TensorFlow.Redist/README.md index 3f75c4cf..5bdf82a1 100644 --- a/src/SciSharp.TensorFlow.Redist/README.md +++ b/src/SciSharp.TensorFlow.Redist/README.md @@ -1,8 +1,14 @@ ## SciSharp.TensorFlow.Redist ## -`SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.4.0 going forward. +`SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.3.0 going forward. +* CPU version for all platforms (Windows, Linux, OSX) +```powershell +PM> Install-Package SciSharp.TensorFlow.Redist +``` + +* GPU version for Windows ```powershell PM> Install-Package SciSharp.TensorFlow.Redist ``` @@ -16,7 +22,7 @@ Related merged [commits](https://github.com/SciSharp/TensorFlow.NET/commit/854a5 On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries. 1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux. -2. Run `nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` +2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` diff --git a/src/SciSharp.TensorFlow.Redist/Redist.nuspec b/src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec similarity index 89% rename from src/SciSharp.TensorFlow.Redist/Redist.nuspec rename to src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec index d2527c8b..11919e8c 100644 --- a/src/SciSharp.TensorFlow.Redist/Redist.nuspec +++ b/src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec @@ -9,7 +9,7 @@ LICENSE.txt https://aka.ms/deprecateLicenseUrl https://www.tensorflow.org/ - $packageId$ contains the TensorFlow C library version $version$ redistributed as a NuGet package. + $packageId$ contains the TensorFlow C library CPU version $version$ redistributed as a NuGet package. https://github.com/tensorflow/tensorflow/releases/tag/v$version$ Copyright 2019 The TensorFlow Authors. All rights reserved. TensorFlow diff --git a/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec b/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec new file mode 100644 index 00000000..f010c96b --- /dev/null +++ b/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec @@ -0,0 +1,26 @@ + + + + $packageId$ + $version$ + The TensorFlow Authors + The TensorFlow Authors + true + LICENSE.txt + https://aka.ms/deprecateLicenseUrl + https://www.tensorflow.org/ + $packageId$ contains the TensorFlow C library GPU version $version$ redistributed as a NuGet package. + https://github.com/tensorflow/tensorflow/releases/tag/v$version$ + Copyright 2019 The TensorFlow Authors. All rights reserved. + TensorFlow + + + + + + + + + + + \ No newline at end of file diff --git a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-CPU.nupkgproj similarity index 99% rename from src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj rename to src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-CPU.nupkgproj index a0ca0a0a..6a225ede 100644 --- a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj +++ b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-CPU.nupkgproj @@ -17,7 +17,7 @@ true false - Redist.nuspec + Redist-CPU.nuspec packageId=$(PackageId);version=$(PackageVersion) $(ProjDir) diff --git a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj new file mode 100644 index 00000000..08fd9386 --- /dev/null +++ b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj @@ -0,0 +1,187 @@ + + + + $(MSBuildThisFileDirectory) + $(ProjDir)bin\ + $(ProjDir)obj\ + + x64 + netstandard2.0 + 1.14.0 + 1 + + $(BinDir)packages\ + $(MSBuildProjectName) + $(TensorFlowVersion) + + true + false + + Redist-Windows-GPU.nuspec + packageId=$(PackageId);version=$(PackageVersion) + $(ProjDir) + + CopyFilesFromArchive + + win + linux + osx + $(PackageRid)-$(TargetArchitecture) + + + + + false + + + + + + + + + + + + + + + + + + + + + + + + + + <_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" /> + + + + + + + + + + + + + + + + + + + + + + @(FilesWithHashes->'%(FileHash)') + $([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", "")) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" /> + <_fileFromArchive DestinationFile="%(FileName)%(Extension)"/> + <_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" /> + + + <_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" /> + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/SciSharp.TensorFlow.Redist/libtensorflow-gpu-windows-x86_64-1.14.0.zip.sha b/src/SciSharp.TensorFlow.Redist/libtensorflow-gpu-windows-x86_64-1.14.0.zip.sha new file mode 100644 index 00000000..739129b1 --- /dev/null +++ b/src/SciSharp.TensorFlow.Redist/libtensorflow-gpu-windows-x86_64-1.14.0.zip.sha @@ -0,0 +1 @@ +850A27858FA951DF77A78CD1BD78B54F6EE2532DD5A49F0579A7B02C795C62F0212F20177EAEA2BD77BD451A57FBBD1348362492F9E14BFE5CA5028C71711293 diff --git a/src/TensorFlowDatasets/DatasetBuilder.cs b/src/TensorFlowDatasets/DatasetBuilder.cs new file mode 100644 index 00000000..bfb78d6e --- /dev/null +++ b/src/TensorFlowDatasets/DatasetBuilder.cs @@ -0,0 +1,24 @@ +using System; + +namespace TensorFlowDatasets +{ + /// + /// Abstract base class for all datasets. + /// + public class DatasetBuilder + { + /// + /// Downloads and prepares dataset for reading. + /// + /// + /// directory where downloaded files are stored. + /// + /// + /// further configuration for downloading and preparing dataset. + /// + public void download_and_prepare(string download_dir = null, DownloadConfig download_config = null) + { + + } + } +} diff --git a/src/TensorFlowDatasets/DownloadConfig.cs b/src/TensorFlowDatasets/DownloadConfig.cs new file mode 100644 index 00000000..0488e273 --- /dev/null +++ b/src/TensorFlowDatasets/DownloadConfig.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace TensorFlowDatasets +{ + public class DownloadConfig + { + } +} diff --git a/src/TensorFlowDatasets/TensorFlowDatasets.csproj b/src/TensorFlowDatasets/TensorFlowDatasets.csproj new file mode 100644 index 00000000..1b839c1f --- /dev/null +++ b/src/TensorFlowDatasets/TensorFlowDatasets.csproj @@ -0,0 +1,19 @@ + + + + netcoreapp2.2 + SciSharp.TensorFlowDatasets + 0.0.1 + SciSharp Team + TensorFlow Datasets + true + https://avatars3.githubusercontent.com/u/44989469?s=200&v=4 + http://scisharpstack.org + TensorFlow Datasets provides many public datasets as tf.data.Datasets. + https://github.com/SciSharp/TensorFlow.NET + git + SciSharp, Dataset, TensorFlow + Apache 2.0 + + + diff --git a/src/TensorFlowHub/MnistDataSet.cs b/src/TensorFlowHub/MnistDataSet.cs index e0717ccb..accc57e1 100644 --- a/src/TensorFlowHub/MnistDataSet.cs +++ b/src/TensorFlowHub/MnistDataSet.cs @@ -27,5 +27,54 @@ namespace Tensorflow.Hub labels.astype(dataType); Labels = labels; } + + public (NDArray, NDArray) GetNextBatch(int batch_size, bool fake_data = false, bool shuffle = true) + { + var start = IndexInEpoch; + // Shuffle for the first epoch + if(EpochsCompleted == 0 && start == 0 && shuffle) + { + var perm0 = np.arange(NumOfExamples); + np.random.shuffle(perm0); + Data = Data[perm0]; + Labels = Labels[perm0]; + } + + // Go to the next epoch + if (start + batch_size > NumOfExamples) + { + // Finished epoch + EpochsCompleted += 1; + + // Get the rest examples in this epoch + var rest_num_examples = NumOfExamples - start; + //var images_rest_part = _images[np.arange(start, _num_examples)]; + //var labels_rest_part = _labels[np.arange(start, _num_examples)]; + // Shuffle the data + if (shuffle) + { + var perm = np.arange(NumOfExamples); + np.random.shuffle(perm); + Data = Data[perm]; + Labels = Labels[perm]; + } + + start = 0; + IndexInEpoch = batch_size - rest_num_examples; + var end = IndexInEpoch; + var images_new_part = Data[np.arange(start, end)]; + var labels_new_part = Labels[np.arange(start, end)]; + + /*return (np.concatenate(new float[][] { images_rest_part.Data(), images_new_part.Data() }, axis: 0), + np.concatenate(new float[][] { labels_rest_part.Data(), labels_new_part.Data() }, axis: 0));*/ + return (images_new_part, labels_new_part); + } + else + { + IndexInEpoch += batch_size; + var end = IndexInEpoch; + return (Data[np.arange(start, end)], Labels[np.arange(start, end)]); + } + } } } diff --git a/src/TensorFlowHub/MnistModelLoader.cs b/src/TensorFlowHub/MnistModelLoader.cs index 7c4ff109..121c0961 100644 --- a/src/TensorFlowHub/MnistModelLoader.cs +++ b/src/TensorFlowHub/MnistModelLoader.cs @@ -15,14 +15,26 @@ namespace Tensorflow.Hub private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; - public static async Task> LoadAsync(string trainDir, bool oneHot = false) + public static async Task> LoadAsync(string trainDir, bool oneHot = false, int? trainSize = null, int? validationSize = null, int? testSize = null) { var loader = new MnistModelLoader(); - return await loader.LoadAsync(new ModelLoadSetting + + var setting = new ModelLoadSetting { TrainDir = trainDir, OneHot = oneHot - }); + }; + + if (trainSize.HasValue) + setting.TrainSize = trainSize.Value; + + if (validationSize.HasValue) + setting.ValidationSize = validationSize.Value; + + if (testSize.HasValue) + setting.TestSize = testSize.Value; + + return await loader.LoadAsync(setting); } public async Task> LoadAsync(ModelLoadSetting setting) @@ -86,7 +98,7 @@ namespace Tensorflow.Hub var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape); var validation = new MnistDataSet(validationImages, validationLabels, dtype, reshape); - var test = new MnistDataSet(trainImages, trainLabels, dtype, reshape); + var test = new MnistDataSet(testImages, testLabels, dtype, reshape); return new Datasets(train, validation, test); } diff --git a/src/TensorFlowHub/Utils.cs b/src/TensorFlowHub/Utils.cs index 10aaf958..72ee9430 100644 --- a/src/TensorFlowHub/Utils.cs +++ b/src/TensorFlowHub/Utils.cs @@ -25,13 +25,25 @@ namespace Tensorflow.Hub if (!Path.IsPathRooted(dirSaveTo)) dirSaveTo = Path.Combine(AppContext.BaseDirectory, dirSaveTo); - if (!Directory.Exists(dirSaveTo)) - Directory.CreateDirectory(dirSaveTo); - - using (var wc = new WebClient()) + var fileSaveTo = Path.Combine(dirSaveTo, fileName); + + if (File.Exists(fileSaveTo)) { - await wc.DownloadFileTaskAsync(url, Path.Combine(dirSaveTo, fileName)); + //TODO:maybe you can check file's hashcode and "donglowad.info" to complete file ... + Console.WriteLine($"{fileSaveTo} already exists."); } + else + { + if (!Directory.Exists(dirSaveTo)) + Directory.CreateDirectory(dirSaveTo); + + using (var wc = new WebClient()) + { + await wc.DownloadFileTaskAsync(url, fileSaveTo); + } + + } + } public static async Task UnzipAsync(this IModelLoader modelLoader, string zipFile, string saveTo) @@ -42,7 +54,7 @@ namespace Tensorflow.Hub if (!Directory.Exists(saveTo)) Directory.CreateDirectory(saveTo); - + if (!Path.IsPathRooted(zipFile)) zipFile = Path.Combine(AppContext.BaseDirectory, zipFile); @@ -78,7 +90,7 @@ namespace Tensorflow.Hub var cts = new CancellationTokenSource(); var showProgressTask = ShowProgressInConsole(cts); - + try { await task; @@ -86,7 +98,7 @@ namespace Tensorflow.Hub finally { cts.Cancel(); - } + } } private static async Task ShowProgressInConsole(CancellationTokenSource cts) diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index 5586840c..fb65d31b 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -189,6 +189,9 @@ namespace Tensorflow public static Tensor log1p(Tensor x, string name = null) => gen_math_ops.log1p(x, name); + public static Tensor logical_and(Tensor x, Tensor y, string name = null) + => gen_math_ops.logical_and(x, y, name); + /// /// Clips tensor values to a specified min and max. /// diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index 0bc9d0e5..67efe726 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -136,7 +136,7 @@ namespace Tensorflow public static Tensor bias_add(Tensor value, RefVariable bias, string data_format = null, string name = null) { - return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => + return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => { name = scope; return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); @@ -169,7 +169,7 @@ namespace Tensorflow /// public static Tensor softmax_cross_entropy_with_logits(Tensor labels, Tensor logits, int dim = -1, string name = null) { - with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope => + tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope => { name = scope; labels = array_ops.stop_gradient(labels, name: "labels_stop_gradient"); diff --git a/src/TensorFlowNET.Core/Buffers/Buffer.cs b/src/TensorFlowNET.Core/Buffers/Buffer.cs index 0b73265d..dbe576b8 100644 --- a/src/TensorFlowNET.Core/Buffers/Buffer.cs +++ b/src/TensorFlowNET.Core/Buffers/Buffer.cs @@ -19,10 +19,8 @@ using System.Runtime.InteropServices; namespace Tensorflow { - public class Buffer : IDisposable + public class Buffer : DisposableObject { - private IntPtr _handle; - private TF_Buffer buffer => Marshal.PtrToStructure(_handle); public byte[] Data @@ -30,8 +28,8 @@ namespace Tensorflow get { var data = new byte[buffer.length]; - if (buffer.length > 0) - Marshal.Copy(buffer.data, data, 0, (int)buffer.length); + if (data.Length > 0) + Marshal.Copy(buffer.data, data, 0, data.Length); return data; } } @@ -54,6 +52,8 @@ namespace Tensorflow Marshal.Copy(data, 0, dst, data.Length); _handle = c_api.TF_NewBufferFromString(dst, (ulong)data.Length); + + Marshal.FreeHGlobal(dst); } public static implicit operator IntPtr(Buffer buffer) @@ -66,9 +66,7 @@ namespace Tensorflow return buffer.Data; } - public void Dispose() - { - c_api.TF_DeleteBuffer(_handle); - } + protected override void DisposeUnManagedState(IntPtr handle) + => c_api.TF_DeleteBuffer(handle); } } diff --git a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs index a6253520..8112708f 100644 --- a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs +++ b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs @@ -68,7 +68,7 @@ namespace Tensorflow.Clustering private Tensor _initialize() { - return with(ops.control_dependencies(new Operation[] + return tf_with(ops.control_dependencies(new Operation[] { check_ops.assert_positive(_num_remaining) }), delegate diff --git a/src/TensorFlowNET.Core/DisposableObject.cs b/src/TensorFlowNET.Core/DisposableObject.cs new file mode 100644 index 00000000..7e416e6d --- /dev/null +++ b/src/TensorFlowNET.Core/DisposableObject.cs @@ -0,0 +1,77 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow +{ + /// + /// Abstract class for disposable object allocated in unmanaged runtime. + /// + public abstract class DisposableObject : IDisposable + { + protected IntPtr _handle; + + protected DisposableObject() { } + + public DisposableObject(IntPtr handle) + { + _handle = handle; + } + + protected virtual void DisposeManagedState() + { + } + + protected abstract void DisposeUnManagedState(IntPtr handle); + + protected virtual void Dispose(bool disposing) + { + if (disposing) + { + // free unmanaged resources (unmanaged objects) and override a finalizer below. + if (_handle != IntPtr.Zero) + { + // dispose managed state (managed objects). + DisposeManagedState(); + + // set large fields to null. + DisposeUnManagedState(_handle); + + _handle = IntPtr.Zero; + } + } + } + + // override a finalizer only if Dispose(bool disposing) above has code to free unmanaged resources. + ~DisposableObject() + { + // Do not change this code. Put cleanup code in Dispose(bool disposing) above. + Dispose(false); + } + + // This code added to correctly implement the disposable pattern. + public void Dispose() + { + // Do not change this code. Put cleanup code in Dispose(bool disposing) above. + Dispose(true); + // uncomment the following line if the finalizer is overridden above. + GC.SuppressFinalize(this); + } + } +} diff --git a/src/TensorFlowNET.Core/Framework/c_api_util.cs b/src/TensorFlowNET.Core/Framework/c_api_util.cs index 440cbf44..5d5cb9b3 100644 --- a/src/TensorFlowNET.Core/Framework/c_api_util.cs +++ b/src/TensorFlowNET.Core/Framework/c_api_util.cs @@ -128,7 +128,7 @@ namespace Tensorflow IntPtr c_op; while ((c_op = c_api.TF_GraphNextOperation(graph, ref pos)) != IntPtr.Zero) { - yield return c_op; + yield return new Operation(c_op, graph); } } } diff --git a/src/TensorFlowNET.Core/Framework/importer.py.cs b/src/TensorFlowNET.Core/Framework/importer.py.cs index 577d41aa..0c405be9 100644 --- a/src/TensorFlowNET.Core/Framework/importer.py.cs +++ b/src/TensorFlowNET.Core/Framework/importer.py.cs @@ -42,7 +42,7 @@ namespace Tensorflow string prefix = ""; var graph = ops.get_default_graph(); - with(ops.name_scope(name, "import", input_map.Values), scope => + tf_with(ops.name_scope(name, "import", input_map.Values), scope => { prefix = scope; /*if (!string.IsNullOrEmpty(prefix)) diff --git a/src/TensorFlowNET.Core/Gradients/gradients_util.cs b/src/TensorFlowNET.Core/Gradients/gradients_util.cs index 95f083da..43247fa4 100644 --- a/src/TensorFlowNET.Core/Gradients/gradients_util.cs +++ b/src/TensorFlowNET.Core/Gradients/gradients_util.cs @@ -55,7 +55,7 @@ namespace Tensorflow **/ var grads = new Dictionary>>(); - with(ops.name_scope(name, "gradients", + tf_with(ops.name_scope(name, "gradients", values: ys.Concat(xs).Concat(stop_gradients).Concat(grad_ys)), scope => { string grad_scope = scope; @@ -141,7 +141,7 @@ namespace Tensorflow } } - with(ops.name_scope(op.name + "_grad"), scope1 => + tf_with(ops.name_scope(op.name + "_grad"), scope1 => { string name1 = scope1; if (grad_fn != null) diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index a84185f3..a5ac79ba 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -90,7 +90,7 @@ namespace Tensorflow.Gradients { var grad = grads[0]; var y = op.outputs[0]; // y = e^x - return with(ops.control_dependencies(new Operation[] { grad }), dp => { + return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { y = math_ops.conj(y); return new Tensor[] { math_ops.mul_no_nan(y, grad) }; }); @@ -107,7 +107,7 @@ namespace Tensorflow.Gradients { var grad = grads[0]; var x = op.inputs[0]; - return with(ops.control_dependencies(new Operation[] { grad }), dp => { + return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { x = math_ops.conj(x); return new Tensor[] { grad * math_ops.digamma(x) }; }); @@ -118,7 +118,7 @@ namespace Tensorflow.Gradients { var grad = grads[0]; var x = op.inputs[0]; - return with(ops.control_dependencies(new Operation[] { grad }), dp => { + return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { x = math_ops.conj(x); return new Tensor[] { grad * math_ops.reciprocal(x) }; }); @@ -431,7 +431,7 @@ namespace Tensorflow.Gradients var grad = grads[0]; var y = op.outputs[0]; - return with(ops.control_dependencies(grads), delegate + return tf_with(ops.control_dependencies(grads), delegate { y = math_ops.conj(y); return new Tensor[] { gen_math_ops.sigmoid_grad(y, grad) }; @@ -453,7 +453,7 @@ namespace Tensorflow.Gradients var grad = grads[0]; var x = op.inputs[0]; - return with(ops.control_dependencies(grads), delegate + return tf_with(ops.control_dependencies(grads), delegate { x = math_ops.conj(x); var y = constant_op.constant(2.0f, dtype: x.dtype); @@ -467,7 +467,7 @@ namespace Tensorflow.Gradients var grad = grads[0]; var y = op.outputs[0]; - return with(ops.control_dependencies(grads), delegate + return tf_with(ops.control_dependencies(grads), delegate { y = math_ops.conj(y); return new Tensor[] { gen_math_ops.tanh_grad(y, grad) }; diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Export.cs b/src/TensorFlowNET.Core/Graphs/Graph.Export.cs index 67b93191..17828c73 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Export.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Export.cs @@ -22,7 +22,7 @@ namespace Tensorflow { var buffer = new Buffer(); c_api.TF_GraphToGraphDef(_handle, buffer, s); - s.Check(); + s.Check(true); // var def = GraphDef.Parser.ParseFrom(buffer); // buffer.Dispose(); @@ -31,8 +31,11 @@ namespace Tensorflow private GraphDef _as_graph_def(bool add_shapes = false) { - var buffer = ToGraphDef(Status); - Status.Check(); + var status = new Status(); + var buffer = ToGraphDef(status); + status.Check(true); + status.Dispose(); + var def = GraphDef.Parser.ParseFrom(buffer); buffer.Dispose(); diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs index 7fcfdbd7..af7ebfd1 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs @@ -43,16 +43,20 @@ namespace Tensorflow var bytes = File.ReadAllBytes(file_path); var graph_def = new Tensorflow.Buffer(bytes); var opts = c_api.TF_NewImportGraphDefOptions(); - c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, Status); - return Status; + var status = new Status(); + c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); + return status; } - public Status Import(byte[] bytes) + public Status Import(byte[] bytes, string prefix = "") { var graph_def = new Tensorflow.Buffer(bytes); var opts = c_api.TF_NewImportGraphDefOptions(); - c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, Status); - return Status; + c_api.TF_ImportGraphDefOptionsSetPrefix(opts, prefix); + var status = new Status(); + c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); + c_api.TF_DeleteImportGraphDefOptions(opts); + return status; } public static Graph ImportFromPB(string file_path, string name = null) diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs index 06b65f03..09e09573 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs @@ -38,6 +38,31 @@ namespace Tensorflow return c_api.TF_NewOperation(_handle, opType, opName); } + public unsafe Operation[] ReturnOperations(IntPtr results) + { + TF_Operation return_oper_handle = new TF_Operation(); + int num_return_opers = 0; + c_api.TF_ImportGraphDefResultsReturnOperations(results, ref num_return_opers, ref return_oper_handle); + Operation[] return_opers = new Operation[num_return_opers]; + for (int i = 0; i < num_return_opers; i++) + { + var handle = return_oper_handle.node + Marshal.SizeOf() * i; + return_opers[i] = new Operation(*(IntPtr*)handle); + } + + return return_opers; + } + + public Operation OperationByName(string operName) + { + return c_api.TF_GraphOperationByName(_handle, operName); + } + + public ITensorOrOperation[] get_operations() + { + return _nodes_by_name.Values.Select(x => x).ToArray(); + } + /// /// Returns the `Operation` with the given `name`. /// diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs index 82e83df1..7121e0be 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.cs @@ -15,6 +15,7 @@ ******************************************************************************/ using System; +using System.Collections; using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; @@ -72,7 +73,7 @@ namespace Tensorflow all variables that are created during the construction of a graph. The caller may define additional collections by specifying a new name. */ - public partial class Graph : IPython, IDisposable + public partial class Graph : IPython, IDisposable, IEnumerable { private IntPtr _handle; private Dictionary _nodes_by_id; @@ -87,8 +88,7 @@ namespace Tensorflow private string _graph_key; public string graph_key => _graph_key; public string _last_loss_reduction; - public bool _is_loss_scaled_by_optimizer { get; set; } - public Status Status { get; } + public bool _is_loss_scaled_by_optimizer { get; set; } /// /// True if the graph is considered "finalized". In that case no @@ -106,7 +106,6 @@ namespace Tensorflow public Graph() { _handle = c_api.TF_NewGraph(); - Status = new Status(); _nodes_by_id = new Dictionary(); _nodes_by_name = new Dictionary(); _names_in_use = new Dictionary(); @@ -116,11 +115,14 @@ namespace Tensorflow public Graph(IntPtr handle) { _handle = handle; - Status = new Status(); _nodes_by_id = new Dictionary(); _nodes_by_name = new Dictionary(); _names_in_use = new Dictionary(); _graph_key = $"grap-key-{ops.uid()}/"; + } + + public void __enter__() + { } public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true) @@ -409,31 +411,6 @@ namespace Tensorflow return return_outputs; } - public unsafe Operation[] ReturnOperations(IntPtr results) - { - TF_Operation return_oper_handle = new TF_Operation(); - int num_return_opers = 0; - c_api.TF_ImportGraphDefResultsReturnOperations(results, ref num_return_opers, ref return_oper_handle); - Operation[] return_opers = new Operation[num_return_opers]; - for (int i = 0; i < num_return_opers; i++) - { - var handle = return_oper_handle.node + Marshal.SizeOf() * i; - return_opers[i] = new Operation(*(IntPtr*)handle); - } - - return return_opers; - } - - public Operation OperationByName(string operName) - { - return c_api.TF_GraphOperationByName(_handle, operName); - } - - public ITensorOrOperation[] get_operations() - { - return _nodes_by_name.Values.Select(x => x).ToArray(); - } - public string[] get_all_collection_keys() { return _collections.Keys.Where(x => !x.StartsWith("__")).ToArray(); @@ -468,7 +445,12 @@ namespace Tensorflow public void Dispose() { - // c_api.TF_DeleteGraph(_handle); + /*if (_handle != IntPtr.Zero) + c_api.TF_DeleteGraph(_handle); + + _handle = IntPtr.Zero; + + GC.SuppressFinalize(this);*/ } /// @@ -481,17 +463,46 @@ namespace Tensorflow public Tensor get_tensor_by_name(string name) { return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false); - } - - public void __enter__() - { + } + + public TensorShape GetTensorShape(TF_Output output) + { + var status = new Status(); + var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status); + status.Check(); + + if (ndim == -1) + return new TensorShape(); + + var dims = new long[ndim]; + c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status); + status.Check(); + + return new TensorShape(dims.Select(x => (int)x).ToArray()); + } + + public override string ToString() + { + int len = 0; + return c_api.TF_GraphDebugString(_handle, out len); } public void __exit__() { - } + } + + private IEnumerable GetEnumerable() + => c_api_util.tf_operations(this); + IEnumerator IEnumerable.GetEnumerator() + => GetEnumerable().GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() + { + throw new NotImplementedException(); + } + public static implicit operator IntPtr(Graph graph) { return graph._handle; diff --git a/src/TensorFlowNET.Core/Graphs/c_api.graph.cs b/src/TensorFlowNET.Core/Graphs/c_api.graph.cs index 05cd5940..889949ef 100644 --- a/src/TensorFlowNET.Core/Graphs/c_api.graph.cs +++ b/src/TensorFlowNET.Core/Graphs/c_api.graph.cs @@ -43,6 +43,9 @@ namespace Tensorflow [DllImport(TensorFlowLibName)] public static extern void TF_DeleteImportGraphDefResults(IntPtr results); + [DllImport(TensorFlowLibName)] + public static extern string TF_GraphDebugString(IntPtr graph, out int len); + [DllImport(TensorFlowLibName)] public static extern void TF_GraphGetOpDef(IntPtr graph, string op_name, IntPtr output_op_def, IntPtr status); @@ -100,6 +103,7 @@ namespace Tensorflow /// TF_Status* [DllImport(TensorFlowLibName)] public static extern void TF_GraphImportGraphDef(IntPtr graph, IntPtr graph_def, IntPtr options, IntPtr status); + /// /// Iterate through the operations of a graph. /// diff --git a/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs b/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs index d10b1874..52dc7bf4 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs @@ -207,7 +207,7 @@ namespace Tensorflow.Keras.Layers public Tensor _assign_moving_average(RefVariable variable, Tensor value, Tensor momentum) { - return Python.with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope => + return Python.tf_with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope => { // var cm = ops.colocate_with(variable); var decay = ops.convert_to_tensor(1.0f - momentum, name: "decay"); diff --git a/src/TensorFlowNET.Core/Keras/Layers/Layer.cs b/src/TensorFlowNET.Core/Keras/Layers/Layer.cs index f380ce78..d96c1f14 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/Layer.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/Layer.cs @@ -125,7 +125,7 @@ namespace Tensorflow.Keras.Layers // Symbolic execution on symbolic tensors. We will attempt to build // the corresponding TF subgraph inside `backend.get_graph()` var graph = backend.get_graph().as_default(); - with(ops.name_scope(_name_scope()), delegate + tf_with(ops.name_scope(_name_scope()), delegate { // Build layer if applicable (if the `build` method has been // overridden). diff --git a/src/TensorFlowNET.Core/Layers/Layer.cs b/src/TensorFlowNET.Core/Layers/Layer.cs index 57c71e92..961952a6 100644 --- a/src/TensorFlowNET.Core/Layers/Layer.cs +++ b/src/TensorFlowNET.Core/Layers/Layer.cs @@ -72,7 +72,7 @@ namespace Tensorflow.Layers } Tensor outputs = null; - with(scope_context_manager, scope2 => + tf_with(scope_context_manager, scope2 => { _current_scope = scope2; // Actually call layer @@ -136,12 +136,12 @@ namespace Tensorflow.Layers _set_scope(); var reuse = built || (_reuse != null && _reuse.Value); - return with(tf.variable_scope(_scope, + return tf_with(tf.variable_scope(_scope, reuse: reuse, auxiliary_name_scope: false), scope => { _current_scope = scope; - return with(ops.name_scope(_name_scope()), delegate + return tf_with(ops.name_scope(_name_scope()), delegate { var variable = base.add_weight(name, shape, @@ -183,7 +183,7 @@ namespace Tensorflow.Layers } else { - with(tf.variable_scope(scope, default_name: _base_name), captured_scope => + tf_with(tf.variable_scope(scope, default_name: _base_name), captured_scope => { // convert variable_scope to VariableScope _scope = captured_scope; diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs index c6b7d24d..136c9e3b 100644 --- a/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs +++ b/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs @@ -122,7 +122,7 @@ namespace Tensorflow.Operations _external_values[result.name] = result; } - with(ops.control_dependencies(null), ctrl => + tf_with(ops.control_dependencies(null), ctrl => { var results = control_flow_ops._SwitchRefOrTensor(result, _pred); result = results[_branch]; diff --git a/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs b/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs index 19a1266b..69affeea 100644 --- a/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs +++ b/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs @@ -58,7 +58,7 @@ namespace Tensorflow private Tensor _call_log_prob (Tensor value, string name) { - return with(ops.name_scope(name, "moments", new { value }), scope => + return tf_with(ops.name_scope(name, "moments", new { value }), scope => { try { diff --git a/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs b/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs index 2aa15063..f4f4b4bf 100644 --- a/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs +++ b/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs @@ -50,9 +50,9 @@ namespace Tensorflow parameters.Add("validate_args", validate_args); parameters.Add("allow_nan_stats", allow_nan_stats); - with(ops.name_scope(name, "", new { loc, scale }), scope => + tf_with(ops.name_scope(name, "", new { loc, scale }), scope => { - with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd => + tf_with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd => { this._loc = array_ops.identity(loc, name); this._scale = array_ops.identity(scale, name); diff --git a/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs b/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs index 9e530a12..f8ed0446 100644 --- a/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs @@ -24,7 +24,7 @@ namespace Tensorflow public Tensor compute_weighted_loss(Tensor losses, Tensor weights = null, string scope = null, string loss_collection = ops.GraphKeys.LOSSES, string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { - return with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate + return tf_with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate { // Save the `reduction` argument for loss normalization when distributing // to multiple replicas. Used only for estimator + v1 optimizer flow. @@ -77,7 +77,7 @@ namespace Tensorflow public Tensor _num_present(Tensor losses, Tensor weights, bool per_batch = false) { - return with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope => + return tf_with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope => { string scope = name_scope; weights = math_ops.cast(weights, dtype: dtypes.float32); @@ -104,7 +104,7 @@ namespace Tensorflow string loss_collection= ops.GraphKeys.LOSSES, string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { - return with(ops.name_scope(scope, + return tf_with(ops.name_scope(scope, "sparse_softmax_cross_entropy_loss", (logits, labels, weights)), name_scope => diff --git a/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs b/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs index 9d1e5726..b385f9c8 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs @@ -30,7 +30,7 @@ namespace Tensorflow.Operations string data_format = "NHWC", string name = null) { - return with(ops.name_scope(name, "MaxPool", value), scope => + return tf_with(ops.name_scope(name, "MaxPool", value), scope => { name = scope; value = ops.convert_to_tensor(value, name: "input"); diff --git a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs index 3200e13f..5b820b3a 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs @@ -29,7 +29,7 @@ namespace Tensorflow.Operations TF_DataType dtype = TF_DataType.DtInvalid, int? parallel_iterations = null, bool swap_memory = false, bool time_major = false) { - with(tf.variable_scope("rnn"), scope => + tf_with(tf.variable_scope("rnn"), scope => { VariableScope varscope = scope; var flat_input = nest.flatten(inputs_tensor); @@ -139,7 +139,7 @@ namespace Tensorflow.Operations var time = array_ops.constant(0, dtype: dtypes.int32, name: "time"); string base_name = null; - with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); + tf_with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); Func _create_ta = (name, element_shape, dtype_) => { diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 033731b0..ab34a320 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -58,7 +58,7 @@ namespace Tensorflow var input_types = new List(); object values = null; - return with(ops.name_scope(name), scope => + return tf_with(ops.name_scope(name), scope => { var inferred_from = new Dictionary(); var base_types = new List(); diff --git a/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs b/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs index 1b99dcc8..8de412c8 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs @@ -23,7 +23,10 @@ namespace Tensorflow /// public partial class Operation { - public static implicit operator Operation(IntPtr handle) => new Operation(handle); + // make sure the new op is in the same graph instance + public static implicit operator Operation(IntPtr handle) + => new Operation(handle); + public static implicit operator IntPtr(Operation op) => op._handle; public static implicit operator Tensor(Operation op) => op.output; diff --git a/src/TensorFlowNET.Core/Operations/Operation.Input.cs b/src/TensorFlowNET.Core/Operations/Operation.Input.cs index 83e7567f..6d6403c9 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Input.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Input.cs @@ -26,7 +26,18 @@ namespace Tensorflow { public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index)); public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index)); - public int InputListLength(string name) => c_api.TF_OperationInputListLength(_handle, name, status); + + public int InputListLength(string name) + { + int num = 0; + using(var status = new Status()) + { + num = c_api.TF_OperationInputListLength(_handle, name, status); + status.Check(true); + } + return num; + } + public int NumInputs => c_api.TF_OperationNumInputs(_handle); private TF_DataType[] _input_types => _inputs._inputs.Select(x => x.dtype).ToArray(); diff --git a/src/TensorFlowNET.Core/Operations/Operation.Output.cs b/src/TensorFlowNET.Core/Operations/Operation.Output.cs index cefb76cf..24348322 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Output.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Output.cs @@ -24,7 +24,18 @@ namespace Tensorflow { public int NumOutputs => c_api.TF_OperationNumOutputs(_handle); public TF_DataType OutputType(int index) => c_api.TF_OperationOutputType(new TF_Output(_handle, index)); - public int OutputListLength(string name) => c_api.TF_OperationOutputListLength(_handle, name, status); + + public int OutputListLength(string name) + { + int num = 0; + using (var status = new Status()) + { + num = c_api.TF_OperationOutputListLength(_handle, name, status); + status.Check(true); + } + + return num; + } private Tensor[] _outputs; public Tensor[] outputs => _outputs; @@ -35,6 +46,8 @@ namespace Tensorflow public int OutputNumConsumers(int index) => c_api.TF_OperationOutputNumConsumers(new TF_Output(_handle, index)); + public TF_Output this[int index] => _tf_output(index); + public unsafe TF_Input[] OutputConsumers(int index, int max_consumers) { int size = Marshal.SizeOf(); diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index b673380b..d7590b97 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -54,11 +54,10 @@ namespace Tensorflow public Operation op => this; public TF_DataType dtype => TF_DataType.DtInvalid; - private Status status = new Status(); public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle)); - public string OpType => c_api.StringPiece(c_api.TF_OperationOpType(_handle)); - public string Device => c_api.StringPiece(c_api.TF_OperationDevice(_handle)); + public string OpType => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationOpType(_handle)); + public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle)); private NodeDef _node_def; public NodeDef node_def @@ -96,10 +95,14 @@ namespace Tensorflow _operDesc = c_api.TF_NewOperation(g, opType, oper_name); c_api.TF_SetAttrType(_operDesc, "dtype", TF_DataType.TF_INT32); - _handle = c_api.TF_FinishOperation(_operDesc, status); - - // Dict mapping op name to file and line information for op colocation - // context managers. + using (var status = new Status()) + { + _handle = c_api.TF_FinishOperation(_operDesc, status); + status.Check(true); + } + + // Dict mapping op name to file and line information for op colocation + // context managers. _control_flow_context = graph._get_control_flow_context(); } @@ -220,6 +223,7 @@ namespace Tensorflow { AttrValue x = null; + using (var status = new Status()) using (var buf = new Buffer()) { c_api.TF_OperationGetAttrValueProto(_handle, name, buf, status); @@ -274,12 +278,15 @@ namespace Tensorflow var output = tensor._as_tf_output(); // Reset cached inputs. - _inputs = null; + _inputs = null; // after the c_api call next time _inputs is accessed // the updated inputs are reloaded from the c_api - c_api.UpdateEdge(_graph, output, input, status); - //var updated_inputs = inputs; - status.Check(); + using (var status = new Status()) + { + c_api.UpdateEdge(_graph, output, input, status); + //var updated_inputs = inputs; + status.Check(); + } } private void _assert_same_graph(Tensor tensor) diff --git a/src/TensorFlowNET.Core/Operations/RNNCell.cs b/src/TensorFlowNET.Core/Operations/RNNCell.cs index 57f46e7b..1b260981 100644 --- a/src/TensorFlowNET.Core/Operations/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/RNNCell.cs @@ -82,7 +82,7 @@ namespace Tensorflow { Tensor output = null; var state_size = this.state_size; - with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate + tf_with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate { output = _zero_state_tensors(state_size, batch_size, dtype); }); diff --git a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs index b4619c05..bbeee929 100644 --- a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs @@ -66,7 +66,7 @@ namespace Tensorflow.Operations _element_shape = new List { }; } - with(ops.name_scope(name, "", new { handle, size, flow }), scope => + tf_with(ops.name_scope(name, "", new { handle, size, flow }), scope => { if(handle != null) { diff --git a/src/TensorFlowNET.Core/Operations/array_ops.py.cs b/src/TensorFlowNET.Core/Operations/array_ops.py.cs index c3f52cb8..2e909ab8 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.py.cs @@ -43,7 +43,7 @@ namespace Tensorflow public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "zeros", shape), scope => + return tf_with(ops.name_scope(name, "zeros", shape), scope => { name = scope; switch (dtype) @@ -67,7 +67,7 @@ namespace Tensorflow public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "zeros", shape), scope => + return tf_with(ops.name_scope(name, "zeros", shape), scope => { name = scope; switch (dtype) @@ -140,7 +140,7 @@ namespace Tensorflow { var must_pack = false; var converted_elems = new List(); - return with(ops.name_scope(name), scope => + return tf_with(ops.name_scope(name), scope => { foreach (var (i, elem) in enumerate(list_or_tuple)) { @@ -189,7 +189,7 @@ namespace Tensorflow public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true) { - return with(ops.name_scope(name, "Rank", new List { input }), scope => + return tf_with(ops.name_scope(name, "Rank", new List { input }), scope => { name = scope; var input_tensor = ops.convert_to_tensor(input); @@ -217,7 +217,7 @@ namespace Tensorflow private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true) { - return with(ops.name_scope(name, "ones_like", new { tensor }), scope => + return tf_with(ops.name_scope(name, "ones_like", new { tensor }), scope => { name = scope; var tensor1 = ops.convert_to_tensor(tensor, name: "tensor"); @@ -233,7 +233,7 @@ namespace Tensorflow public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "ones", new { shape }), scope => + return tf_with(ops.name_scope(name, "ones", new { shape }), scope => { name = scope; var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name); @@ -244,7 +244,7 @@ namespace Tensorflow public static Tensor ones(Tensor[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "ones", new { shape }), scope => + return tf_with(ops.name_scope(name, "ones", new { shape }), scope => { name = scope; var output = _constant_if_small(1, shape[0]); @@ -257,7 +257,7 @@ namespace Tensorflow public static Tensor ones(int[] dims, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); - return with(ops.name_scope(name, "ones", new { dims }), scope => + return tf_with(ops.name_scope(name, "ones", new { dims }), scope => { name = scope; var shape = ops.convert_to_tensor(dims, dtype: TF_DataType.TF_INT32); @@ -273,7 +273,7 @@ namespace Tensorflow int axis = -1, string name = null) { - return with(ops.name_scope(name, "one_hot", new { indices, depth, dtype }), scope => + return tf_with(ops.name_scope(name, "one_hot", new { indices, depth, dtype }), scope => { name = scope; var on_exists = false; @@ -341,7 +341,7 @@ namespace Tensorflow private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) { - return with(ops.name_scope(name, "Shape", new { input }), scope => + return tf_with(ops.name_scope(name, "Shape", new { input }), scope => { name = scope; @@ -362,7 +362,7 @@ namespace Tensorflow private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) { - return with(ops.name_scope(name, "Size", new { input }), scope => + return tf_with(ops.name_scope(name, "Size", new { input }), scope => { name = scope; @@ -382,7 +382,7 @@ namespace Tensorflow public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) { - return with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => + return tf_with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => { name = scope; tensor = ops.convert_to_tensor(tensor, name: "tensor"); @@ -516,7 +516,7 @@ namespace Tensorflow { if(values.Length == 1) // Degenerate case of one tensor. { - return with(ops.name_scope(name), scope => { + return tf_with(ops.name_scope(name), scope => { var t = ops.convert_to_tensor(axis, name: "concat_dim", dtype: TF_DataType.TF_INT32); return identity(values[0], name: scope); }); @@ -535,7 +535,7 @@ namespace Tensorflow public static Tensor transpose(T1 a, T2 perm, string name = "transpose", bool conjugate = false) { - return with(ops.name_scope(name, "transpose", new { a }), scope => + return tf_with(ops.name_scope(name, "transpose", new { a }), scope => { return gen_array_ops.transpose(a, perm, name: scope); }); diff --git a/src/TensorFlowNET.Core/Operations/check_ops.cs b/src/TensorFlowNET.Core/Operations/check_ops.cs index d5d76dd3..06b648b6 100644 --- a/src/TensorFlowNET.Core/Operations/check_ops.cs +++ b/src/TensorFlowNET.Core/Operations/check_ops.cs @@ -31,7 +31,7 @@ namespace Tensorflow if (message == null) message = ""; - return with(ops.name_scope(name, "assert_equal", new { t1, t2, data }), delegate + return tf_with(ops.name_scope(name, "assert_equal", new { t1, t2, data }), delegate { var x = ops.convert_to_tensor(t1, name: "x"); var y = ops.convert_to_tensor(t2, name: "y"); @@ -62,7 +62,7 @@ namespace Tensorflow if (message == null) message = ""; - return with(ops.name_scope(name, "assert_positive", new { x, data }), delegate + return tf_with(ops.name_scope(name, "assert_positive", new { x, data }), delegate { x = ops.convert_to_tensor(x, name: "x"); if (data == null) @@ -86,7 +86,7 @@ namespace Tensorflow if (message == null) message = ""; - return with(ops.name_scope(name, "assert_less", new { x, y, data }), delegate + return tf_with(ops.name_scope(name, "assert_less", new { x, y, data }), delegate { x = ops.convert_to_tensor(x, name: "x"); y = ops.convert_to_tensor(y, name: "y"); diff --git a/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs b/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs index be5ca217..680b500c 100644 --- a/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs +++ b/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs @@ -34,7 +34,7 @@ namespace Tensorflow int expected_rank_diff = 0, string name = null) { - return with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate + return tf_with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate { predictions = ops.convert_to_tensor(predictions); labels = ops.convert_to_tensor(labels); diff --git a/src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs b/src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs index 39a4538b..2717fd3e 100644 --- a/src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/control_flow_ops.py.cs @@ -28,7 +28,7 @@ namespace Tensorflow { public static Operation Assert(Tensor condition, object[] data, int? summarize = null, string name = null) { - return with(ops.name_scope(name, "Assert", new { condition, data }), scope => + return tf_with(ops.name_scope(name, "Assert", new { condition, data }), scope => { name = scope; var xs = ops.convert_n_to_tensor(data); @@ -53,7 +53,7 @@ namespace Tensorflow public static Operation group(T[] inputs, string name = null) where T : ITensorOrOperation { - return with(ops.name_scope(name, "group_deps", inputs), scope => + return tf_with(ops.name_scope(name, "group_deps", inputs), scope => { name = scope; @@ -91,7 +91,7 @@ namespace Tensorflow private static Operation _GroupControlDeps(string dev, Operation[] deps, string name = null) { - return with(ops.control_dependencies(deps), ctl => + return tf_with(ops.control_dependencies(deps), ctl => { if (dev == null) { @@ -135,7 +135,7 @@ namespace Tensorflow public static Tensor[] tuple(Tensor[] tensors, string name = null, Operation[] control_inputs = null) { - return with(ops.name_scope(name, "tuple", tensors), scope => + return tf_with(ops.name_scope(name, "tuple", tensors), scope => { name = scope; var gating_ops = tensors.Where(x => x != null).Select(x => x.op).ToList(); @@ -189,13 +189,13 @@ namespace Tensorflow values.AddRange(dependencies); values.Add(output_tensor); - return with(ops.name_scope(name, "control_dependency", values), scope => + return tf_with(ops.name_scope(name, "control_dependency", values), scope => { name = scope; // TODO: missing original code //with ops.colocate_with(output_tensor): { - return with(ops.control_dependencies(dependencies), ctl => + return tf_with(ops.control_dependencies(dependencies), ctl => { output_tensor = ops.convert_to_tensor_or_composite(output_tensor); return _Identity(output_tensor, name: name); @@ -306,7 +306,7 @@ namespace Tensorflow bool strict = false, string name = null) { - return with(ops.name_scope(name, "cond", new { pred }), delegate + return tf_with(ops.name_scope(name, "cond", new { pred }), delegate { // TODO: here a chunk of original code is missing /* @@ -398,7 +398,7 @@ namespace Tensorflow bool strict = false, string name = null) { - return with(ops.name_scope(name, "cond", new { pred }), delegate + return tf_with(ops.name_scope(name, "cond", new { pred }), delegate { // Add the Switch to the graph. var switch_result = @switch(pred, pred); @@ -467,7 +467,7 @@ namespace Tensorflow { if (inputs.Any(x => x == null)) throw new ValueError($"At least one of the merge inputs is null: {inputs}"); - return with(ops.name_scope(name, "Merge", inputs), scope => + return tf_with(ops.name_scope(name, "Merge", inputs), scope => { name = scope; inputs = inputs.Select(inp => @@ -489,7 +489,7 @@ namespace Tensorflow TF_DataType dtype = TF_DataType.DtInvalid, string name = null) { - return with(ops.name_scope(name, "Switch", new { data, pred }), scope => + return tf_with(ops.name_scope(name, "Switch", new { data, pred }), scope => { name = scope; data = ops.internal_convert_to_tensor_or_indexed_slices(data, diff --git a/src/TensorFlowNET.Core/Operations/embedding_ops.cs b/src/TensorFlowNET.Core/Operations/embedding_ops.cs index e52107dc..23864329 100644 --- a/src/TensorFlowNET.Core/Operations/embedding_ops.cs +++ b/src/TensorFlowNET.Core/Operations/embedding_ops.cs @@ -35,7 +35,7 @@ namespace Tensorflow string name = null, string max_norm = null) { - return with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => + return tf_with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => { name = scope; int np = 1; @@ -58,7 +58,7 @@ namespace Tensorflow string name = null, string max_norm = null) { - return with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => + return tf_with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => { name = scope; int np = @params.Length; diff --git a/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs b/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs index dee089ea..dc7188a8 100644 --- a/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs @@ -28,7 +28,7 @@ namespace Tensorflow if (dtype == image.dtype) return array_ops.identity(image, name: name); - return with(ops.name_scope(name, "convert_image", image), scope => + return tf_with(ops.name_scope(name, "convert_image", image), scope => { name = scope; diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 8ec7e253..a8b9ac49 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -350,6 +350,13 @@ namespace Tensorflow return _op.outputs[0]; } + public static Tensor logical_and(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y }); + + return _op.outputs[0]; + } + public static Tensor squared_difference(Tensor x, Tensor y, string name = null) { var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name }); diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index fc8f08ac..a5d26b23 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -29,7 +29,7 @@ namespace Tensorflow { public static Tensor abs(Tensor x, string name = null) { - return with(ops.name_scope(name, "Abs", new { x }), scope => + return tf_with(ops.name_scope(name, "Abs", new { x }), scope => { x = ops.convert_to_tensor(x, name: "x"); if (x.dtype.is_complex()) @@ -69,7 +69,7 @@ namespace Tensorflow if(base_type == x.dtype) return x; - return with(ops.name_scope(name, "Cast", new { x }), scope => + return tf_with(ops.name_scope(name, "Cast", new { x }), scope => { name = scope; x = ops.convert_to_tensor(x, name: "x"); @@ -82,7 +82,7 @@ namespace Tensorflow public static Tensor cumsum(Tensor x, int axis = 0, bool exclusive = false, bool reverse = false, string name = null) { - return with(ops.name_scope(name, "Cumsum", new {x}), scope => + return tf_with(ops.name_scope(name, "Cumsum", new {x}), scope => { name = scope; x = ops.convert_to_tensor(x, name: "x"); @@ -110,7 +110,7 @@ namespace Tensorflow /// `x / y` returns the quotient of x and y. public static Tensor div(Tensor x, Tensor y, string name = null) { - return with(ops.name_scope(name, "div", (x, y)), name_scope => + return tf_with(ops.name_scope(name, "div", (x, y)), name_scope => { name = name_scope; x = ops.convert_to_tensor(x, name: "x"); @@ -146,7 +146,7 @@ namespace Tensorflow /// public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) { - return with(ops.name_scope(name, "div_no_nan", (x, y)), name_scope => + return tf_with(ops.name_scope(name, "div_no_nan", (x, y)), name_scope => { name = name_scope; x = ops.convert_to_tensor(x, name: "x"); @@ -229,7 +229,7 @@ namespace Tensorflow public static Tensor sign(Tensor x, string name = null) { - return with(ops.name_scope(name, "Sign", new {x}), scope => + return tf_with(ops.name_scope(name, "Sign", new {x}), scope => { x = ops.convert_to_tensor(x, name: "x"); return gen_math_ops.sign(x); @@ -337,7 +337,7 @@ namespace Tensorflow /// The reduced tensor. public static Tensor reduce_logsumexp(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) { - return with(ops.name_scope(name, "ReduceLogSumExp", new { input_tensor }), scope => + return tf_with(ops.name_scope(name, "ReduceLogSumExp", new { input_tensor }), scope => { var raw_max = reduce_max(input_tensor, axis, true); var my_max = array_ops.stop_gradient(array_ops.where(gen_math_ops.is_finite(raw_max), raw_max, array_ops.zeros_like(raw_max))); @@ -497,7 +497,7 @@ namespace Tensorflow if (delta == null) delta = 1; - return with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => + return tf_with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => { name = scope; var start1 = ops.convert_to_tensor(start, name: "start"); @@ -510,7 +510,7 @@ namespace Tensorflow public static Tensor floordiv(Tensor x, Tensor y, string name = null) { - return with(ops.name_scope(name, "floordiv", new { x, y }), scope => + return tf_with(ops.name_scope(name, "floordiv", new { x, y }), scope => { return gen_math_ops.floor_div(x, y, scope); }); @@ -527,7 +527,7 @@ namespace Tensorflow { Tensor result = null; - with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => + tf_with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => { name = scope; @@ -551,7 +551,7 @@ namespace Tensorflow { Tensor result = null; - with(ops.name_scope(name, "MatMul", new Tensor[] { x, y }), scope => + tf_with(ops.name_scope(name, "MatMul", new Tensor[] { x, y }), scope => { name = scope; @@ -576,7 +576,7 @@ namespace Tensorflow if (dt.is_floating() || dt.is_integer()) return x; - return with(ops.name_scope(name, "Conj", new List { x }), scope => + return tf_with(ops.name_scope(name, "Conj", new List { x }), scope => { return x; @@ -591,7 +591,7 @@ namespace Tensorflow public static Tensor _truediv_python3(Tensor x, Tensor y, string name = null) { - return with(ops.name_scope(name, "truediv", new { x, y }), scope => + return tf_with(ops.name_scope(name, "truediv", new { x, y }), scope => { name = scope; var x_dtype = x.dtype.as_base_dtype(); diff --git a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs index 7555f1cd..84fb5486 100644 --- a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs @@ -34,7 +34,7 @@ namespace Tensorflow float epsilon = 1e-12f, string name = null) { - return with(ops.name_scope(name, "l2_normalize", new { x }), scope => + return tf_with(ops.name_scope(name, "l2_normalize", new { x }), scope => { x = ops.convert_to_tensor(x, name: "x"); var sq = math_ops.square(x); @@ -57,7 +57,7 @@ namespace Tensorflow string name = null, bool keep_dims = false) { - return with(ops.name_scope(name, "moments", new { x, axes }), scope => + return tf_with(ops.name_scope(name, "moments", new { x, axes }), scope => { // The dynamic range of fp16 is too limited to support the collection of // sufficient statistics. As a workaround we simply perform the operations @@ -123,7 +123,7 @@ namespace Tensorflow /// number of nonzero values with type dtype private static Tensor _count_nonzero(Tensor input_tensor, TF_DataType dtype = TF_DataType.TF_INT64) { - return with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => + return tf_with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => { var zero = array_ops.zeros(new NumSharp.Shape(), dtype: input_tensor.dtype); var nonzero_count = math_ops.reduce_sum( @@ -140,7 +140,7 @@ namespace Tensorflow /// The fraction of zeros in value, with type float32. public static Tensor zero_fraction(Tensor value, string name = null) { - return with(ops.name_scope(name, "zero_fraction", new { value }), scope => + return tf_with(ops.name_scope(name, "zero_fraction", new { value }), scope => { value = ops.convert_to_tensor(value, name: "value"); Tensor size = array_ops.size(value, out_type: dtypes.int64); @@ -153,7 +153,7 @@ namespace Tensorflow () => _count_nonzero(value, dtype: dtypes.int64) ); - with(ops.name_scope("counts_to_fraction"), count_scope => + tf_with(ops.name_scope("counts_to_fraction"), count_scope => { var num_zero = math_ops.subtract(math_ops.cast(size, TF_DataType.TF_INT64), num_nonzero); var num_zero_float32 = math_ops.cast(num_zero, dtype: dtypes.float32); diff --git a/src/TensorFlowNET.Core/Operations/nn_ops.cs b/src/TensorFlowNET.Core/Operations/nn_ops.cs index 0f7efce3..b3dda42f 100644 --- a/src/TensorFlowNET.Core/Operations/nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/nn_ops.cs @@ -50,7 +50,7 @@ namespace Tensorflow string data_format = null, string name = null) { - return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => + return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => { name = scope; value = ops.convert_to_tensor(value, name: "input"); @@ -70,7 +70,7 @@ namespace Tensorflow /// public static Tensor dropout_v2(Tensor x, Tensor rate, Tensor noise_shape = null, int? seed = null, string name = null) { - return with(ops.name_scope(name, "dropout", x), scope => + return tf_with(ops.name_scope(name, "dropout", x), scope => { name = scope; x = ops.convert_to_tensor(x, name: "x"); @@ -134,7 +134,7 @@ namespace Tensorflow /// public static Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null) { - return with(ops.name_scope(name, "MaxPool", value), scope => + return tf_with(ops.name_scope(name, "MaxPool", value), scope => { name = scope; value = ops.convert_to_tensor(value, name: "input"); @@ -171,7 +171,7 @@ namespace Tensorflow Tensor logits = null, string name = null) { // Reshape logits and labels to rank 2. - return with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate + return tf_with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate { labels = ops.convert_to_tensor(labels); logits = ops.convert_to_tensor(logits); @@ -206,7 +206,7 @@ namespace Tensorflow int axis = -1, string name = null) { - return with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { logits, labels }), scope => + return tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { logits, labels }), scope => { name = scope; var precise_logits = logits; diff --git a/src/TensorFlowNET.Core/Operations/random_ops.py.cs b/src/TensorFlowNET.Core/Operations/random_ops.py.cs index 37ede456..3232c917 100644 --- a/src/TensorFlowNET.Core/Operations/random_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/random_ops.py.cs @@ -37,7 +37,7 @@ namespace Tensorflow int? seed = null, string name = null) { - return with(ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope => + return tf_with(ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope => { var shape_tensor = _ShapeTensor(shape); var mean_tensor = ops.convert_to_tensor(mean, dtype: dtype, name: "mean"); @@ -67,7 +67,7 @@ namespace Tensorflow int? seed = null, string name = null) { - return with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => + return tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => { name = scope; var tensorShape = _ShapeTensor(shape); @@ -85,7 +85,7 @@ namespace Tensorflow int? seed = null, string name = null) { - return with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => + return tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => { name = scope; var minTensor = ops.convert_to_tensor(minval, dtype: dtype, name: "min"); @@ -110,7 +110,7 @@ namespace Tensorflow int? seed = null, string name = null) { - return with(ops.name_scope(name, "truncated_normal", new { shape, mean, stddev }), scope => + return tf_with(ops.name_scope(name, "truncated_normal", new { shape, mean, stddev }), scope => { name = scope; var shape_tensor = _ShapeTensor(shape); diff --git a/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs b/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs index 7b889bf8..d8bfcbac 100644 --- a/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs +++ b/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs @@ -22,7 +22,7 @@ namespace Tensorflow { public static Tensor broadcast_weights(Tensor weights, Tensor values) { - return with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope => + return tf_with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope => { values = ops.convert_to_tensor(values, name: "values"); weights = ops.convert_to_tensor( diff --git a/src/TensorFlowNET.Core/Python.cs b/src/TensorFlowNET.Core/Python.cs index 1d205fde..6565037b 100644 --- a/src/TensorFlowNET.Core/Python.cs +++ b/src/TensorFlowNET.Core/Python.cs @@ -75,7 +75,7 @@ namespace Tensorflow } [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception - public static void with(IPython py, Action action) + public static void tf_with(IPython py, Action action) { try { @@ -95,7 +95,7 @@ namespace Tensorflow } [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception - public static void with(T py, Action action) where T : IPython + public static void tf_with(T py, Action action) where T : IPython { try { @@ -115,7 +115,7 @@ namespace Tensorflow } [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception - public static TOut with(TIn py, Func action) where TIn : IPython + public static TOut tf_with(TIn py, Func action) where TIn : IPython { try { diff --git a/src/TensorFlowNET.Core/Sessions/BaseSession.cs b/src/TensorFlowNET.Core/Sessions/BaseSession.cs index deb82b51..fa3947f5 100644 --- a/src/TensorFlowNET.Core/Sessions/BaseSession.cs +++ b/src/TensorFlowNET.Core/Sessions/BaseSession.cs @@ -24,7 +24,7 @@ using System.Text; namespace Tensorflow { - public class BaseSession + public class BaseSession : DisposableObject { protected Graph _graph; protected bool _opened; @@ -32,28 +32,23 @@ namespace Tensorflow protected int _current_version; protected byte[] _target; protected IntPtr _session; - public Status Status; public Graph graph => _graph; public BaseSession(string target = "", Graph g = null, SessionOptions opts = null) { _graph = g is null ? ops.get_default_graph() : g; - + _graph.as_default(); _target = UTF8Encoding.UTF8.GetBytes(target); SessionOptions newOpts = null; if (opts == null) - newOpts = c_api.TF_NewSessionOptions(); - - Status = new Status(); + newOpts = new SessionOptions(); - _session = c_api.TF_NewSession(_graph, opts ?? newOpts, Status); + var status = new Status(); - // dispose newOpts - if (opts == null) - c_api.TF_DeleteSessionOptions(newOpts); + _session = c_api.TF_NewSession(_graph, opts ?? newOpts, status); - Status.Check(true); + status.Check(true); } public virtual NDArray run(object fetches, params FeedItem[] feed_dict) @@ -325,5 +320,19 @@ namespace Tensorflow { } + + public void close() + { + Dispose(); + } + + protected override void DisposeUnManagedState(IntPtr handle) + { + using (var status = new Status()) + { + c_api.TF_DeleteSession(handle, status); + status.Check(true); + } + } } } diff --git a/src/TensorFlowNET.Core/Sessions/Session.cs b/src/TensorFlowNET.Core/Sessions/Session.cs index 21c4de09..36797ec7 100644 --- a/src/TensorFlowNET.Core/Sessions/Session.cs +++ b/src/TensorFlowNET.Core/Sessions/Session.cs @@ -15,6 +15,7 @@ ******************************************************************************/ using System; +using System.Runtime.InteropServices; namespace Tensorflow { @@ -26,8 +27,8 @@ namespace Tensorflow } - public Session(IntPtr handle) - : base("", null, null) + public Session(IntPtr handle, Graph g = null) + : base("", g, null) { _session = handle; } @@ -36,7 +37,7 @@ namespace Tensorflow : base("", g, opts) { if (s == null) - s = Status; + s = new Status(); } public Session as_default() @@ -49,35 +50,32 @@ namespace Tensorflow { var graph = c_api.TF_NewGraph(); var status = new Status(); - var opt = c_api.TF_NewSessionOptions(); + var opt = new SessionOptions(); + var tags = new string[] { "serve" }; var buffer = new TF_Buffer(); - var sess = c_api.TF_LoadSessionFromSavedModel(opt, IntPtr.Zero, path, new string[0], 0, graph, ref buffer, status); - //var bytes = new Buffer(buffer.data).Data; - //var meta_graph = MetaGraphDef.Parser.ParseFrom(bytes); - - status.Check(); - - new Graph(graph).as_default(); - - return sess; + var sess = c_api.TF_LoadSessionFromSavedModel(opt, + IntPtr.Zero, + path, + tags, + tags.Length, + graph, + ref buffer, + status); + + // load graph bytes + // var data = new byte[buffer.length]; + // Marshal.Copy(buffer.data, data, 0, (int)buffer.length); + // var meta_graph = MetaGraphDef.Parser.ParseFrom(data);*/ + status.Check(true); + + return new Session(sess, g: new Graph(graph).as_default()); } public static implicit operator IntPtr(Session session) => session._session; public static implicit operator Session(IntPtr handle) => new Session(handle); - public void close() - { - Dispose(); - } - - public void Dispose() - { - c_api.TF_DeleteSession(_session, Status); - Status.Dispose(); - } - public void __enter__() { diff --git a/src/TensorFlowNET.Core/Sessions/SessionOptions.cs b/src/TensorFlowNET.Core/Sessions/SessionOptions.cs index 361a48d6..8e0a0a74 100644 --- a/src/TensorFlowNET.Core/Sessions/SessionOptions.cs +++ b/src/TensorFlowNET.Core/Sessions/SessionOptions.cs @@ -20,37 +20,34 @@ using System.Runtime.InteropServices; namespace Tensorflow { - public class SessionOptions : IDisposable + public class SessionOptions : DisposableObject { - private IntPtr _handle; - private Status _status; - - public unsafe SessionOptions() + public SessionOptions() { - var opts = c_api.TF_NewSessionOptions(); - _handle = opts; - _status = new Status(); + _handle = c_api.TF_NewSessionOptions(); } - public unsafe SessionOptions(IntPtr handle) + public SessionOptions(IntPtr handle) { _handle = handle; } - public void Dispose() - { - c_api.TF_DeleteSessionOptions(_handle); - _status.Dispose(); - } + protected override void DisposeUnManagedState(IntPtr handle) + => c_api.TF_DeleteSessionOptions(handle); - public Status SetConfig(ConfigProto config) + public void SetConfig(ConfigProto config) { var bytes = config.ToByteArray(); var proto = Marshal.AllocHGlobal(bytes.Length); Marshal.Copy(bytes, 0, proto, bytes.Length); - c_api.TF_SetConfig(_handle, proto, (ulong)bytes.Length, _status); - _status.Check(false); - return _status; + + using (var status = new Status()) + { + c_api.TF_SetConfig(_handle, proto, (ulong)bytes.Length, status); + status.Check(false); + } + + Marshal.FreeHGlobal(proto); } public static implicit operator IntPtr(SessionOptions opts) => opts._handle; diff --git a/src/TensorFlowNET.Core/Status/Status.cs b/src/TensorFlowNET.Core/Status/Status.cs index d39a73c7..7eb2d7e3 100644 --- a/src/TensorFlowNET.Core/Status/Status.cs +++ b/src/TensorFlowNET.Core/Status/Status.cs @@ -22,10 +22,8 @@ namespace Tensorflow /// TF_Status holds error information. It either has an OK code, or /// else an error code with an associated error message. /// - public class Status : IDisposable + public class Status : DisposableObject { - protected IntPtr _handle; - /// /// Error message /// @@ -67,22 +65,7 @@ namespace Tensorflow return status._handle; } - public void Dispose() - { - IntPtr h = IntPtr.Zero; - lock (this) - { - h = _handle; - _handle = IntPtr.Zero; - } - if (h != IntPtr.Zero) - c_api.TF_DeleteStatus(h); - GC.SuppressFinalize(this); - } - - ~Status() - { - Dispose(); - } + protected override void DisposeUnManagedState(IntPtr handle) + => c_api.TF_DeleteStatus(handle); } } diff --git a/src/TensorFlowNET.Core/Summaries/Summary.cs b/src/TensorFlowNET.Core/Summaries/Summary.cs index 5a22385f..258edf88 100644 --- a/src/TensorFlowNET.Core/Summaries/Summary.cs +++ b/src/TensorFlowNET.Core/Summaries/Summary.cs @@ -55,7 +55,7 @@ namespace Tensorflow.Summaries /// public Tensor merge(Tensor[] inputs, string[] collections = null, string name = null) { - return with(ops.name_scope(name, "Merge", inputs), delegate + return tf_with(ops.name_scope(name, "Merge", inputs), delegate { var val = gen_logging_ops.merge_summary(inputs: inputs, name: name); collect(val, collections?.ToList(), new List()); @@ -88,7 +88,7 @@ namespace Tensorflow.Summaries public (string, string) summary_scope(string name, string family = null, string default_name = null, Tensor[] values = null) { string scope_base_name = string.IsNullOrEmpty(family) ? name : $"{family}/{name}"; - return with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => + return tf_with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => { var tag = scope._name_scope; if (string.IsNullOrEmpty(family)) diff --git a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj index 1ec4f6f3..3037221c 100644 --- a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj +++ b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj @@ -5,7 +5,7 @@ TensorFlow.NET Tensorflow 1.14.0 - 0.10.3 + 0.10.8 Haiping Chen, Meinrad Recheis SciSharp STACK true @@ -17,7 +17,7 @@ TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C# Google's TensorFlow full binding in .NET Standard. Docs: https://tensorflownet.readthedocs.io - 0.10.3.0 + 0.10.8.0 Changes since v0.9.0: 1. Added full connected Convolution Neural Network example. @@ -29,9 +29,15 @@ Docs: https://tensorflownet.readthedocs.io 7. Add BatchMatMulGrad. 8. Upgrade NumSharp. 9. Fix strided_slice_grad type convention error. -10. Add AbsGrad. +10. Add AbsGrad. +11. Fix Session.LoadFromSavedModel(string). +12. Add Tensor operator overloads. +13. Fix default graph and operation issue when import model. +14. Fix TF_String endcode and decode. +15. Fix Tensor memory leak. +16. Rename with to tf_with that is only used to build graph purpose. 7.2 - 0.10.3.0 + 0.10.8.0 LICENSE true true @@ -62,7 +68,7 @@ Docs: https://tensorflownet.readthedocs.io - + diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs index f5ac5f77..a104f066 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs @@ -1,648 +1,717 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using NumSharp; -using System; -using System.Linq; -using System.Numerics; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; -using System.Text; -using static Tensorflow.c_api; - -namespace Tensorflow -{ - public partial class Tensor - { - /// - /// true if unmanaged buffer has been freed. - /// - private bool _deallocator_called => _deallocatorArgs.deallocator_called; - - /// - /// true if the Tensor was created from a managed array - /// - private bool _isPinnedArray => _deallocatorArgs.gc_handle != IntPtr.Zero; - - /// - /// True only if the Tensor object was created in a way that the Tensor object itself allocated memory or pinned a managed object. - /// False if the Tensor was created from a pointer - /// - public bool IsMemoryOwner { get; private set; } - - /// - /// This holds values that are used by the unmanaged deallocator callback - /// - private DeallocatorArgs _deallocatorArgs = new DeallocatorArgs() { gc_handle = IntPtr.Zero }; - - // note: they must be assigned to a static variable in order to work as unmanaged callbacks - static Deallocator _hGlobalDeallocator = FreeHGlobalMemory; - static Deallocator _gcHandleDeallocator = FreeGCHandle; - private static Deallocator _nothingDeallocator = FreeNothing; - - /// - /// Create a Tensor object from an existing TF handle - /// - /// - public Tensor(IntPtr handle) - { - _handle = handle; - IsMemoryOwner = false; - } - - /// - /// Create a new Tensor from the given unmanaged memory pointer (which must be allocated, fixed or pinned by the caller) - /// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor - /// but not the memory itself! - /// - /// Pointer to unmanaged, fixed or pinned memory which the caller owns - /// Tensor shape - /// TF data type - /// Size of the tensor in memory - public Tensor(IntPtr ptr, long[] shape, TF_DataType dType, int num_bytes) - { - _handle = TF_NewTensor(dType, dims: shape, num_dims: shape.Length, data: ptr, len: (UIntPtr)num_bytes, deallocator: _nothingDeallocator, ref _deallocatorArgs); - IsMemoryOwner = false; - } - -#if _REGEN - %types=["sbyte", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"] - %foreach types% - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(#1[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), new long[]{data.Length}, data, Marshal.SizeOf<#1>()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(#1[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), shape, data, Marshal.SizeOf<#1>()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(#1 value, TF_DataType? dType = null) - { - var v = (#1*)Marshal.AllocHGlobal(sizeof(#1)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(#1)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(#1), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - % -#else - - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(sbyte[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(sbyte[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(sbyte value, TF_DataType? dType = null) - { - var v = (sbyte*)Marshal.AllocHGlobal(sizeof(sbyte)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(sbyte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(sbyte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(byte[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(byte[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(byte value, TF_DataType? dType = null) - { - var v = (byte*)Marshal.AllocHGlobal(sizeof(byte)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(byte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(byte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(short[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(short[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(short value, TF_DataType? dType = null) - { - var v = (short*)Marshal.AllocHGlobal(sizeof(short)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(short)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(short), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(ushort[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(ushort[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(ushort value, TF_DataType? dType = null) - { - var v = (ushort*)Marshal.AllocHGlobal(sizeof(ushort)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ushort)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ushort), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(int[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(int[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(int value, TF_DataType? dType = null) - { - var v = (int*)Marshal.AllocHGlobal(sizeof(int)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(int)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(int), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(uint[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(uint[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(uint value, TF_DataType? dType = null) - { - var v = (uint*)Marshal.AllocHGlobal(sizeof(uint)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(uint)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(uint), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(long[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(long[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(long value, TF_DataType? dType = null) - { - var v = (long*)Marshal.AllocHGlobal(sizeof(long)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(long)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(long), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(ulong[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(ulong[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(ulong value, TF_DataType? dType = null) - { - var v = (ulong*)Marshal.AllocHGlobal(sizeof(ulong)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ulong)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ulong), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(float[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(float[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(float value, TF_DataType? dType = null) - { - var v = (float*)Marshal.AllocHGlobal(sizeof(float)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(float)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(float), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(double[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(double[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(double value, TF_DataType? dType = null) - { - var v = (double*)Marshal.AllocHGlobal(sizeof(double)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(double)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(double), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } - - /// - /// Create a 1d Tensor from the given linear array and shape - /// - public Tensor(Complex[] data, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), new long[]{data.Length}, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a N-dimensional Tensor from the given array - /// - public Tensor(Complex[] data, long[] shape, TF_DataType? dType = null) - { - _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), shape, data, Marshal.SizeOf()); - IsMemoryOwner=true; - } - - /// - /// Create a scalar Tensor from the given value - /// - public unsafe Tensor(Complex value, TF_DataType? dType = null) - { - var v = (Complex*)Marshal.AllocHGlobal(sizeof(Complex)); - *v = value; - _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(Complex)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(Complex), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); - IsMemoryOwner=true; - } -#endif - - /// - /// Create a string Tensor from the given string - /// - public unsafe Tensor(string str) - { - var buffer = Encoding.UTF8.GetBytes(str); - var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length); - var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8)); - - IntPtr tensor = c_api.TF_TensorData(handle); - Marshal.WriteInt64(tensor, 0); - fixed (byte* src = &buffer[0]) - c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status); - _handle = handle; - status.Check(true); - } - - public unsafe Tensor(NDArray nd, TF_DataType? tensorDType = null) - { - if (tensorDType == TF_DataType.TF_STRING && nd.dtype.Name == "Byte") - { - var buffer = nd.Data(); - var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length); - var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8)); - - IntPtr tensor = c_api.TF_TensorData(handle); - Marshal.WriteInt64(tensor, 0); - fixed (byte* src = &buffer[0]) - c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status); - - status.Check(true); - _handle=handle; - IsMemoryOwner = false; - return; - } - _handle = Allocate(nd, tensorDType: tensorDType); - IsMemoryOwner = true; - } - - private unsafe IntPtr Allocate(NDArray nd, TF_DataType? tensorDType = null) - { - IntPtr dotHandle = IntPtr.Zero; - int buffersize = 0; - - if (nd.dtype.Name != "String") +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using NumSharp; +using System; +using System.Linq; +using System.Numerics; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Text; +using static Tensorflow.c_api; + +namespace Tensorflow +{ + public partial class Tensor + { + /// + /// true if unmanaged buffer has been freed. + /// + private bool _deallocator_called => _deallocatorArgs.deallocator_called; + + /// + /// true if the Tensor was created from a managed array + /// + private bool _isPinnedArray => _deallocatorArgs.gc_handle != IntPtr.Zero; + + /// + /// True only if the Tensor object was created in a way that the Tensor object itself allocated memory or pinned a managed object. + /// False if the Tensor was created from a pointer + /// + public bool IsMemoryOwner { get; private set; } + + /// + /// This holds values that are used by the unmanaged deallocator callback + /// + private DeallocatorArgs _deallocatorArgs = new DeallocatorArgs() { gc_handle = IntPtr.Zero }; + + // note: they must be assigned to a static variable in order to work as unmanaged callbacks + static Deallocator _hGlobalDeallocator = FreeHGlobalMemory; + static Deallocator _gcHandleDeallocator = FreeGCHandle; + private static Deallocator _nothingDeallocator = FreeNothing; + + /// + /// Create a Tensor object from an existing TF handle + /// + /// + public Tensor(IntPtr handle) + { + _handle = handle; + IsMemoryOwner = false; + } + + /// + /// Create a new Tensor from the given unmanaged memory pointer (which must be allocated, fixed or pinned by the caller) + /// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor + /// but not the memory itself! + /// + /// Pointer to unmanaged, fixed or pinned memory which the caller owns + /// Tensor shape + /// TF data type + /// Size of the tensor in memory + public Tensor(IntPtr ptr, long[] shape, TF_DataType dType, int num_bytes) + { + _handle = TF_NewTensor(dType, dims: shape, num_dims: shape.Length, data: ptr, len: (UIntPtr)num_bytes, deallocator: _nothingDeallocator, ref _deallocatorArgs); + IsMemoryOwner = false; + } + +#if _REGEN + %types=["sbyte", "bool", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"] + %foreach types% + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(#1[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), new long[]{data.Length}, data, Marshal.SizeOf<#1>()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(#1[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), shape, data, Marshal.SizeOf<#1>()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(#1 value, TF_DataType? dType = null) + { + var v = (#1*)Marshal.AllocHGlobal(sizeof(#1)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(#1)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(#1), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + % +#else + + + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(sbyte[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(sbyte[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(sbyte value, TF_DataType? dType = null) + { + var v = (sbyte*)Marshal.AllocHGlobal(sizeof(sbyte)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(sbyte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(sbyte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(bool[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(bool)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(bool[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(bool)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(bool value, TF_DataType? dType = null) + { + var v = (bool*)Marshal.AllocHGlobal(sizeof(bool)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(bool)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(bool), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(byte[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(byte[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(byte value, TF_DataType? dType = null) + { + var v = (byte*)Marshal.AllocHGlobal(sizeof(byte)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(byte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(byte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(short[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(short[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(short value, TF_DataType? dType = null) + { + var v = (short*)Marshal.AllocHGlobal(sizeof(short)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(short)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(short), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(ushort[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(ushort[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(ushort value, TF_DataType? dType = null) + { + var v = (ushort*)Marshal.AllocHGlobal(sizeof(ushort)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ushort)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ushort), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(int[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(int[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(int value, TF_DataType? dType = null) + { + var v = (int*)Marshal.AllocHGlobal(sizeof(int)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(int)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(int), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(uint[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(uint[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(uint value, TF_DataType? dType = null) + { + var v = (uint*)Marshal.AllocHGlobal(sizeof(uint)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(uint)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(uint), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(long[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(long[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(long value, TF_DataType? dType = null) + { + var v = (long*)Marshal.AllocHGlobal(sizeof(long)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(long)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(long), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(ulong[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(ulong[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(ulong value, TF_DataType? dType = null) + { + var v = (ulong*)Marshal.AllocHGlobal(sizeof(ulong)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ulong)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ulong), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(float[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(float[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(float value, TF_DataType? dType = null) + { + var v = (float*)Marshal.AllocHGlobal(sizeof(float)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(float)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(float), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(double[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(double[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(double value, TF_DataType? dType = null) + { + var v = (double*)Marshal.AllocHGlobal(sizeof(double)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(double)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(double), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } + + /// + /// Create a 1d Tensor from the given linear array and shape + /// + public Tensor(Complex[] data, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), new long[]{data.Length}, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a N-dimensional Tensor from the given array + /// + public Tensor(Complex[] data, long[] shape, TF_DataType? dType = null) + { + _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), shape, data, Marshal.SizeOf()); + IsMemoryOwner=true; + } + + /// + /// Create a scalar Tensor from the given value + /// + public unsafe Tensor(Complex value, TF_DataType? dType = null) + { + var v = (Complex*)Marshal.AllocHGlobal(sizeof(Complex)); + *v = value; + _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(Complex)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(Complex), deallocator: _hGlobalDeallocator, ref _deallocatorArgs); + IsMemoryOwner=true; + } +#endif + + /// + /// Create a string Tensor from the given string + /// + public unsafe Tensor(string str) + { + var status = new Status(); + var buffer = Encoding.UTF8.GetBytes(str); + var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length); + var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8)); + + IntPtr tensor = c_api.TF_TensorData(handle); + Marshal.WriteInt64(tensor, 0); + fixed (byte* src = &buffer[0]) + c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status); + _handle = handle; + status.Check(true); + } + + public unsafe Tensor(NDArray nd, TF_DataType? tensorDType = null) + { + if (tensorDType == TF_DataType.TF_STRING && nd.dtype.Name == "Byte") + { + var buffer = nd.Data(); + var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length); + var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8)); + + IntPtr tensor = c_api.TF_TensorData(handle); + Marshal.WriteInt64(tensor, 0); + + var status = new Status(); + fixed (byte* src = &buffer[0]) + c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status); + + status.Check(true); + _handle=handle; + IsMemoryOwner = false; + return; + } + _handle = Allocate(nd, tensorDType: tensorDType); + IsMemoryOwner = true; + } + + private unsafe IntPtr Allocate(NDArray nd, TF_DataType? tensorDType = null) + { + IntPtr dotHandle = IntPtr.Zero; + int buffersize = 0; + + if (nd.dtype.Name != "String") + { + buffersize = (nd.size * nd.dtypesize); + dotHandle = Marshal.AllocHGlobal(buffersize); + } + + var dataType = ToTFDataType(nd.dtype); + // shape + var dims = nd.shape.Select(x => (long)x).ToArray(); + var nd1 = nd.ravel(); + switch (nd.dtype.Name) + { + case "Boolean": + var boolVals = Array.ConvertAll(nd1.Data(), x => Convert.ToByte(x)); + Marshal.Copy(boolVals, 0, dotHandle, nd.size); + break; + case "Int16": + Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); + break; + case "Int32": + Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); + break; + case "Int64": + Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); + break; + case "Single": + Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); + break; + case "Double": + Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); + break; + case "Byte": + Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); + break; + case "String": + return new Tensor(UTF8Encoding.UTF8.GetBytes(nd.Data(0)), TF_DataType.TF_STRING); + default: + throw new NotImplementedException($"Marshal.Copy failed for {nd.dtype.Name}."); + } + var tfHandle = c_api.TF_NewTensor(dataType, + dims, + dims.Length, + dotHandle, + (UIntPtr)buffersize, + _hGlobalDeallocator, + ref _deallocatorArgs); + + return tfHandle; + } + + public unsafe Tensor(byte[][] buffer, long[] shape) + { + int size = 0; + foreach (var b in buffer) { - buffersize = (nd.size * nd.dtypesize); - dotHandle = Marshal.AllocHGlobal(buffersize); + size += (int)TF_StringEncodedSize((UIntPtr)b.Length); } - - var dataType = ToTFDataType(nd.dtype); - // shape - var dims = nd.shape.Select(x => (long)x).ToArray(); - var nd1 = nd.ravel(); - switch (nd.dtype.Name) + int totalSize = size + buffer.Length * 8; + ulong offset = 0; + IntPtr handle = TF_AllocateTensor(TF_DataType.TF_STRING, shape, shape.Length, (UIntPtr)totalSize); + + // Clear offset table + IntPtr pOffset = TF_TensorData(handle); + IntPtr dst = pOffset + buffer.Length * 8; + IntPtr dstLimit = pOffset + totalSize; + for (int i = 0; i < buffer.Length; i++) { - case "Boolean": - var boolVals = Array.ConvertAll(nd1.Data(), x => Convert.ToByte(x)); - Marshal.Copy(boolVals, 0, dotHandle, nd.size); - break; - case "Int16": - Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); - break; - case "Int32": - Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); - break; - case "Int64": - Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); - break; - case "Single": - Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); - break; - case "Double": - Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); - break; - case "Byte": - Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size); - break; - case "String": - return new Tensor(UTF8Encoding.UTF8.GetBytes(nd.Data(0)), TF_DataType.TF_STRING); - default: - throw new NotImplementedException($"Marshal.Copy failed for {nd.dtype.Name}."); + Marshal.WriteInt64(pOffset, (long)offset); + using (var status = new Status()) + { + fixed (byte* src = &buffer[i][0]) + { + var written = TF_StringEncode(src, (UIntPtr)buffer[i].Length, (sbyte*)dst, (UIntPtr)(dstLimit.ToInt64() - dst.ToInt64()), status); + status.Check(true); + pOffset += 8; + dst += (int)written; + offset += written; + } + } } - var tfHandle = c_api.TF_NewTensor(dataType, - dims, - dims.Length, - dotHandle, - (UIntPtr)buffersize, - _hGlobalDeallocator, - ref _deallocatorArgs); - - return tfHandle; - } - - public Tensor(Operation op, int value_index, TF_DataType dtype) - { - _op = op; - _value_index = value_index; - _dtype = dtype; - _id = ops.uid(); - } - - - /// - /// Creates a new tensor from the given array without copying memory. The array is pinned down and the pointer passed on. - /// - /// Represents the tensor shape. - /// The linear array of data, the data must fit in the tensor with the specified dimensions. - /// The number of bytes in memory of a single array element - /// - /// Use the FromBuffer method to create a tensor that has the specified dimensions - /// and is initialized with data from the data array. The data is copied starting - /// at the start offset, for count bytes and is laid out into the tensor following the - /// specified dimensions. - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int element_size) - { - if (dt == TF_DataType.TF_STRING && data is byte[]) - { - var buffer = (byte[])data; - var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length); - var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8)); - IntPtr tensor = c_api.TF_TensorData(handle); - Marshal.WriteInt64(tensor, 0); - fixed (byte* src = &buffer[0]) - c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status); - - status.Check(true); - return handle; - } - return CreateTensorWithoutCopying(dt, shape, data, 0, data.Length, element_size); - } - - /// - /// Creates a new tensor from a subsection of the given array without copying memory. The array is pinned down and the pointer passed on. - /// - /// Represents the tensor shape. - /// The linear array of data, the data must fit in the tensor with the specified dimensions. - /// The offset into the provided data array where the data resides. - /// The number of elements to copy from data. - /// The number of bytes in memory of a single array element - /// - /// Use the FromBuffer method to create a tensor that has the specified dimensions - /// and is initialized with data from the data array. The data is copied starting - /// at the start offset, for count bytes and is laid out into the tensor following the - /// specified dimensions. - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int start, int count, int element_size) - { - if (start < 0 || start > data.Length - count) - throw new ArgumentException($"Array length {data.Length} does not match the given shape {new Shape(shape.Cast().ToArray())}"); - - // get a handle to the pinned array which we will pass on to the tensor computation engine to use - var gcHandle = GCHandle.Alloc(data, GCHandleType.Pinned); - _deallocatorArgs = new DeallocatorArgs() { gc_handle = GCHandle.ToIntPtr(gcHandle) }; - if (shape == null || shape.Length == 0) - return TF_NewTensor(dt, new long[0], 0, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs); - else - return TF_NewTensor(dt, shape, shape.Length, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs); - } - - [MonoPInvokeCallback(typeof(Deallocator))] - internal static void FreeHGlobalMemory(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args) - { - if (args.deallocator_called) - return; - Marshal.FreeHGlobal(dataPtr); - args.deallocator_called = true; - } - - [MonoPInvokeCallback(typeof(Deallocator))] - internal static void FreeGCHandle(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args) - { - if (args.deallocator_called || args.gc_handle == IntPtr.Zero) - return; - // note: since the ptr given to tensorflow is just the addr of the pinned object we can not directly free it! we need to free the gcHandle instead - GCHandle.FromIntPtr(args.gc_handle).Free(); - args.deallocator_called = true; - } - - [MonoPInvokeCallback(typeof(Deallocator))] - internal static void FreeNothing(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args) - { - args.deallocator_called = true; - } - - } - - /// - /// This attribute can be applied to callback functions that will be invoked - /// from unmanaged code to managed code. - /// - /// - /// - /// [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))] - /// internal static void MyFreeFunc (IntPtr data, IntPtr length){..} - /// - /// - public sealed class MonoPInvokeCallbackAttribute : Attribute - { - /// - /// Use this constructor to annotate the type of the callback function that - /// will be invoked from unmanaged code. - /// - /// T. - public MonoPInvokeCallbackAttribute(Type t) { } - } - -} + _handle = handle; + } + + public Tensor(Operation op, int value_index, TF_DataType dtype) + { + _op = op; + _value_index = value_index; + _dtype = dtype; + _id = ops.uid(); + } + + + /// + /// Creates a new tensor from the given array without copying memory. The array is pinned down and the pointer passed on. + /// + /// Represents the tensor shape. + /// The linear array of data, the data must fit in the tensor with the specified dimensions. + /// The number of bytes in memory of a single array element + /// + /// Use the FromBuffer method to create a tensor that has the specified dimensions + /// and is initialized with data from the data array. The data is copied starting + /// at the start offset, for count bytes and is laid out into the tensor following the + /// specified dimensions. + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int element_size) + { + if (dt == TF_DataType.TF_STRING && data is byte[]) + { + var buffer = (byte[])data; + var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length); + var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8)); + + IntPtr tensor = c_api.TF_TensorData(handle); + Marshal.WriteInt64(tensor, 0); + + var status = new Status(); + fixed (byte* src = &buffer[0]) + c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status); + + status.Check(true); + return handle; + } + return CreateTensorWithoutCopying(dt, shape, data, 0, data.Length, element_size); + } + + /// + /// Creates a new tensor from a subsection of the given array without copying memory. The array is pinned down and the pointer passed on. + /// + /// Represents the tensor shape. + /// The linear array of data, the data must fit in the tensor with the specified dimensions. + /// The offset into the provided data array where the data resides. + /// The number of elements to copy from data. + /// The number of bytes in memory of a single array element + /// + /// Use the FromBuffer method to create a tensor that has the specified dimensions + /// and is initialized with data from the data array. The data is copied starting + /// at the start offset, for count bytes and is laid out into the tensor following the + /// specified dimensions. + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int start, int count, int element_size) + { + if (start < 0 || start > data.Length - count) + throw new ArgumentException($"Array length {data.Length} does not match the given shape {new Shape(shape.Cast().ToArray())}"); + + // get a handle to the pinned array which we will pass on to the tensor computation engine to use + var gcHandle = GCHandle.Alloc(data, GCHandleType.Pinned); + _deallocatorArgs = new DeallocatorArgs() { gc_handle = GCHandle.ToIntPtr(gcHandle) }; + if (shape == null || shape.Length == 0) + return TF_NewTensor(dt, new long[0], 0, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs); + else + return TF_NewTensor(dt, shape, shape.Length, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs); + } + + [MonoPInvokeCallback(typeof(Deallocator))] + internal static void FreeHGlobalMemory(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args) + { + if (args.deallocator_called) + return; + Marshal.FreeHGlobal(dataPtr); + args.deallocator_called = true; + } + + [MonoPInvokeCallback(typeof(Deallocator))] + internal static void FreeGCHandle(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args) + { + if (args.deallocator_called || args.gc_handle == IntPtr.Zero) + return; + // note: since the ptr given to tensorflow is just the addr of the pinned object we can not directly free it! we need to free the gcHandle instead + GCHandle.FromIntPtr(args.gc_handle).Free(); + args.deallocator_called = true; + } + + [MonoPInvokeCallback(typeof(Deallocator))] + internal static void FreeNothing(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args) + { + args.deallocator_called = true; + } + + } + + /// + /// This attribute can be applied to callback functions that will be invoked + /// from unmanaged code to managed code. + /// + /// + /// + /// [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))] + /// internal static void MyFreeFunc (IntPtr data, IntPtr length){..} + /// + /// + public sealed class MonoPInvokeCallbackAttribute : Attribute + { + /// + /// Use this constructor to annotate the type of the callback function that + /// will be invoked from unmanaged code. + /// + /// T. + public MonoPInvokeCallbackAttribute(Type t) { } + } + +} diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs index 0b9c7f3e..4bd32d74 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs @@ -15,6 +15,7 @@ ******************************************************************************/ using System; +using System.Linq; using static Tensorflow.Python; namespace Tensorflow @@ -63,22 +64,56 @@ namespace Tensorflow public static Tensor operator *(long constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); public static Tensor operator *(ulong constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); - public static Tensor operator /(Tensor x, Tensor y) => BinaryOpWrapper("truediv", x, y); - public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y); + private static readonly TF_DataType[] _intTfDataTypes = { + TF_DataType.TF_INT8, TF_DataType.TF_INT16, TF_DataType.TF_INT32, TF_DataType.TF_INT64, + TF_DataType.TF_QINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QINT32, + TF_DataType.TF_UINT8, TF_DataType.TF_UINT16, TF_DataType.TF_UINT32, TF_DataType.TF_UINT64 + }; + public static Tensor operator /(double x, Tensor y) => BinaryOpWrapper("truediv", x, y); public static Tensor operator /(float x, Tensor y) => BinaryOpWrapper("truediv", x, y); + public static Tensor operator /(int x, Tensor y) => BinaryOpWrapper("floordiv", x, y); + public static Tensor operator /(Tensor x, Tensor y) => + _intTfDataTypes.Contains(x._dtype) + ? BinaryOpWrapper("floordiv", x, y) + : BinaryOpWrapper("truediv", x, y); + public static Tensor operator /(Tensor x, int y) => BinaryOpWrapper("floordiv", x, y); + public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y); public static Tensor operator /(Tensor x, double y) => BinaryOpWrapper("truediv", x, y); public static Tensor operator %(Tensor x, Tensor y) => BinaryOpWrapper("mod", x, y); + public static Tensor operator >(double x, Tensor y) => gen_math_ops.greater(x, y); + public static Tensor operator >(float x, Tensor y) => gen_math_ops.greater(x, y); + public static Tensor operator >(int x, Tensor y) => gen_math_ops.greater(x, y); + public static Tensor operator >(Tensor x, Tensor y) => gen_math_ops.greater(x, y); public static Tensor operator >(Tensor x, int y) => gen_math_ops.greater(x, y); - public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y); public static Tensor operator >(Tensor x, float y) => gen_math_ops.greater(x, y); public static Tensor operator >(Tensor x, double y) => gen_math_ops.greater(x, y); + + public static Tensor operator <(double x, Tensor y) => gen_math_ops.less(x, y); + public static Tensor operator <(float x, Tensor y) => gen_math_ops.less(x, y); + public static Tensor operator <(int x, Tensor y) => gen_math_ops.less(x, y); + public static Tensor operator <(Tensor x, Tensor y) => gen_math_ops.less(x, y); public static Tensor operator <(Tensor x, int y) => gen_math_ops.less(x, y); - public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y); public static Tensor operator <(Tensor x, float y) => gen_math_ops.less(x, y); public static Tensor operator <(Tensor x, double y) => gen_math_ops.less(x, y); + public static Tensor operator >=(double x, Tensor y) => gen_math_ops.greater_equal(x, y); + public static Tensor operator >=(float x, Tensor y) => gen_math_ops.greater_equal(x, y); + public static Tensor operator >=(int x, Tensor y) => gen_math_ops.greater_equal(x, y); + public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y); + public static Tensor operator >=(Tensor x, int y) => gen_math_ops.greater_equal(x, y); + public static Tensor operator >=(Tensor x, float y) => gen_math_ops.greater_equal(x, y); + public static Tensor operator >=(Tensor x, double y) => gen_math_ops.greater_equal(x, y); + + public static Tensor operator <=(int x, Tensor y) => gen_math_ops.less_equal(x, y); + public static Tensor operator <=(float x, Tensor y) => gen_math_ops.less_equal(x, y); + public static Tensor operator <=(double x, Tensor y) => gen_math_ops.less_equal(x, y); + public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y); + public static Tensor operator <=(Tensor x, int y) => gen_math_ops.less_equal(x, y); + public static Tensor operator <=(Tensor x, float y) => gen_math_ops.less_equal(x, y); + public static Tensor operator <=(Tensor x, double y) => gen_math_ops.less_equal(x, y); + private static Tensor BinaryOpWrapper(string name, Tx x, Ty y) { TF_DataType dtype = TF_DataType.DtInvalid; @@ -88,7 +123,7 @@ namespace Tensorflow dtype = tr.dtype.as_base_dtype(); var namescope = ops.name_scope(null, name, new { x, y }); - return with(namescope, scope => + return tf_with(namescope, scope => { Tensor result = null; var x1 = ops.convert_to_tensor(x, dtype: dtype, name: "x"); @@ -99,6 +134,9 @@ namespace Tensorflow case "add": result = gen_math_ops.add(x1, y1, name: scope); break; + case "floordiv": + result = gen_math_ops.floor_div(x1, y1, name: scope); + break; case "truediv": result = gen_math_ops.real_div(x1, y1, name: scope); break; diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs index aebca212..50141be6 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs @@ -19,6 +19,7 @@ using System; using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; +using System.Text; using Tensorflow.Framework; using static Tensorflow.Python; @@ -28,10 +29,8 @@ namespace Tensorflow /// A tensor is a generalization of vectors and matrices to potentially higher dimensions. /// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes. /// - public partial class Tensor : IDisposable, ITensorOrOperation, _TensorLike + public partial class Tensor : DisposableObject, ITensorOrOperation, _TensorLike { - private IntPtr _handle; - private int _id; private Operation _op; @@ -48,8 +47,6 @@ namespace Tensorflow private int _value_index; public int value_index => _value_index; - private Status status = new Status(); - private TF_DataType _dtype = TF_DataType.DtInvalid; public TF_DataType dtype => _handle == IntPtr.Zero ? _dtype : c_api.TF_TensorType(_handle); @@ -76,6 +73,7 @@ namespace Tensorflow if (_handle == IntPtr.Zero) { + var status = new Status(); c_api.TF_GraphGetTensorShape(op.graph, _as_tf_output(), dims, rank, status); status.Check(); } @@ -90,6 +88,8 @@ namespace Tensorflow set { + var status = new Status(); + if (value == null) c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status); else @@ -131,8 +131,11 @@ namespace Tensorflow { if (_handle == IntPtr.Zero) { + var status = new Status(); var output = _as_tf_output(); - return c_api.TF_GraphGetTensorNumDims(op.graph, output, status); + int ndim = c_api.TF_GraphGetTensorNumDims(op.graph, output, status); + status.Check(); + return ndim; } else { @@ -184,6 +187,41 @@ namespace Tensorflow return data; } + public unsafe string[] StringData() + { + // + // TF_STRING tensors are encoded with a table of 8-byte offsets followed by TF_StringEncode-encoded bytes. + // [offset1, offset2,...,offsetn, s1size, s1bytes, s2size, s2bytes,...,snsize,snbytes] + // + long size = 1; + foreach (var s in TensorShape.Dimensions) + size *= s; + + var buffer = new byte[size][]; + var src = c_api.TF_TensorData(_handle); + var srcLen = (IntPtr)(src.ToInt64() + (long)bytesize); + src += (int)(size * 8); + for (int i = 0; i < buffer.Length; i++) + { + using (var status = new Status()) + { + IntPtr dst = IntPtr.Zero; + UIntPtr dstLen = UIntPtr.Zero; + var read = c_api.TF_StringDecode((byte*)src, (UIntPtr)(srcLen.ToInt64() - src.ToInt64()), (byte**)&dst, &dstLen, status); + status.Check(true); + buffer[i] = new byte[(int)dstLen]; + Marshal.Copy(dst, buffer[i], 0, buffer[i].Length); + src += (int)read; + } + } + + var _str = new string[buffer.Length]; + for (int i = 0; i < _str.Length; i++) + _str[i] = Encoding.UTF8.GetString(buffer[i]); + + return _str; + } + public Tensor MaybeMove() { var tensor = c_api.TF_TensorMaybeMove(_handle); @@ -262,7 +300,7 @@ namespace Tensorflow index += 1; } - return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => + return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => { string name = scope; if (begin != null) @@ -311,7 +349,7 @@ namespace Tensorflow index += 1; } - return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => + return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => { string name = scope; if (begin != null) @@ -354,26 +392,12 @@ namespace Tensorflow return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}"; } - public void Dispose() + protected override void DisposeUnManagedState(IntPtr handle) { - IntPtr h=IntPtr.Zero; - lock (this) + if(handle != IntPtr.Zero) { - h = _handle; - _handle=IntPtr.Zero; + c_api.TF_DeleteTensor(handle); } - if (h != IntPtr.Zero) - c_api.TF_DeleteTensor(_handle); - status.Dispose(); - GC.SuppressFinalize(this); - } - - /// - /// Dispose the tensor when it gets garbage collected - /// - ~Tensor() - { - Dispose(); } public bool IsDisposed diff --git a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs index cf208ed2..6b20b34f 100644 --- a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs @@ -32,6 +32,12 @@ namespace Tensorflow [DllImport(TensorFlowLibName)] public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, IntPtr dims, int num_dims, UIntPtr len); + [DllImport(TensorFlowLibName)] + public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, ulong len); + + [DllImport(TensorFlowLibName)] + public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, UIntPtr len); + /// /// returns the sizeof() for the underlying type corresponding to the given TF_DataType enum value. /// @@ -150,5 +156,8 @@ namespace Tensorflow /// [DllImport(TensorFlowLibName)] public static extern ulong TF_StringDecode(IntPtr src, ulong src_len, IntPtr dst, ref ulong dst_len, IntPtr status); + + [DllImport(TensorFlowLibName)] + public static extern unsafe UIntPtr TF_StringDecode(byte* src, UIntPtr src_len, byte** dst, UIntPtr* dst_len, IntPtr status); } } diff --git a/src/TensorFlowNET.Core/Train/AdamOptimizer.cs b/src/TensorFlowNET.Core/Train/AdamOptimizer.cs index c273f4d4..673e1307 100644 --- a/src/TensorFlowNET.Core/Train/AdamOptimizer.cs +++ b/src/TensorFlowNET.Core/Train/AdamOptimizer.cs @@ -81,7 +81,7 @@ namespace Tensorflow.Train var m = get_slot(var, "m"); var m_scaled_g_values = grad * (1 - beta1_t); var m_t = state_ops.assign(m, m * beta1_t, use_locking: _use_locking); - with(ops.control_dependencies(new[] { m_t }), delegate + tf_with(ops.control_dependencies(new[] { m_t }), delegate { m_t = scatter_add(m, indices, m_scaled_g_values); }); @@ -89,7 +89,7 @@ namespace Tensorflow.Train var v = get_slot(var, "v"); var v_scaled_g_values = (grad * grad) * (1 - beta2_t); var v_t = state_ops.assign(v, v * beta2_t, use_locking: _use_locking); - with(ops.control_dependencies(new[] { v_t }), delegate + tf_with(ops.control_dependencies(new[] { v_t }), delegate { v_t = scatter_add(v, indices, v_scaled_g_values); }); @@ -117,7 +117,7 @@ namespace Tensorflow.Train var operations = new List(); operations.AddRange(update_ops); - with(ops.control_dependencies(update_ops), delegate + tf_with(ops.control_dependencies(update_ops), delegate { var (beta1_power, beta2_power) = _get_beta_accumulators(); ops.colocate_with(beta1_power); diff --git a/src/TensorFlowNET.Core/Train/Optimizer.cs b/src/TensorFlowNET.Core/Train/Optimizer.cs index 3e1d86c5..e945b120 100644 --- a/src/TensorFlowNET.Core/Train/Optimizer.cs +++ b/src/TensorFlowNET.Core/Train/Optimizer.cs @@ -151,7 +151,7 @@ namespace Tensorflow _create_slots(var_list); var update_ops = new List(); - return with(ops.name_scope(name, Name), scope => + return tf_with(ops.name_scope(name, Name), scope => { name = scope; _prepare(); @@ -162,7 +162,7 @@ namespace Tensorflow continue; var scope_name = var.op.name; - with(ops.name_scope("update_" + scope_name), scope2 => + tf_with(ops.name_scope("update_" + scope_name), scope2 => { var op = processor.update_op(this, grad); update_ops.Add(op); @@ -176,7 +176,7 @@ namespace Tensorflow } else { - with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep => + tf_with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep => { ops.colocate_with(global_step); // TODO: port this if branch once ResourceVariable has been ported! diff --git a/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs b/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs index 9deca740..95775a72 100644 --- a/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs +++ b/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs @@ -102,7 +102,7 @@ namespace Tensorflow Tensor save_tensor = null; Operation restore_op = null; - return with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope => + return tf_with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope => { name = scope; diff --git a/src/TensorFlowNET.Core/Train/SlotCreator.cs b/src/TensorFlowNET.Core/Train/SlotCreator.cs index aaa2c171..57582355 100644 --- a/src/TensorFlowNET.Core/Train/SlotCreator.cs +++ b/src/TensorFlowNET.Core/Train/SlotCreator.cs @@ -57,7 +57,7 @@ namespace Tensorflow.Train { var validate_shape = shape.is_fully_defined(); var prefix = primary.op.name; - return with(new variable_scope(string.Empty, prefix + "/" + name), delegate + return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate { return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype); }); diff --git a/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs b/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs index 8c2c1204..5adf5d9a 100644 --- a/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs +++ b/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs @@ -32,7 +32,7 @@ namespace Tensorflow private static Tensor op_helper(string default_name, RefVariable x, T y) { var tensor1 = x.value(); - return with(ops.name_scope(null, default_name, new { tensor1, y }), scope => { + return tf_with(ops.name_scope(null, default_name, new { tensor1, y }), scope => { var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y"); return gen_math_ops.add(tensor1, tensor2, scope); }); diff --git a/src/TensorFlowNET.Core/Variables/RefVariable.cs b/src/TensorFlowNET.Core/Variables/RefVariable.cs index 78a241c2..463ba2d0 100644 --- a/src/TensorFlowNET.Core/Variables/RefVariable.cs +++ b/src/TensorFlowNET.Core/Variables/RefVariable.cs @@ -134,7 +134,7 @@ namespace Tensorflow ops.init_scope(); var values = init_from_fn ? new object[0] : new object[] { initial_value }; - with(ops.name_scope(name, "Variable", values), scope => + tf_with(ops.name_scope(name, "Variable", values), scope => { name = scope; if (init_from_fn) @@ -148,7 +148,7 @@ namespace Tensorflow List = new AttrValue.Types.ListValue() }; attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}")); - with(ops.name_scope("Initializer"), scope2 => + tf_with(ops.name_scope("Initializer"), scope2 => { _initial_value = (initial_value as Func)(); _initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype); diff --git a/src/TensorFlowNET.Core/Variables/VariableScope.cs b/src/TensorFlowNET.Core/Variables/VariableScope.cs index d509d6b2..778e59b1 100644 --- a/src/TensorFlowNET.Core/Variables/VariableScope.cs +++ b/src/TensorFlowNET.Core/Variables/VariableScope.cs @@ -56,7 +56,7 @@ namespace Tensorflow VariableAggregation aggregation= VariableAggregation.None) { string full_name = !string.IsNullOrEmpty(this.name) ? this.name + "/" + name : name; - return with(ops.name_scope(null), scope => + return tf_with(ops.name_scope(null), scope => { if (dtype == TF_DataType.DtInvalid) dtype = _dtype; diff --git a/src/TensorFlowNET.Core/ops.py.cs b/src/TensorFlowNET.Core/ops.py.cs index 8f7fce29..979e132e 100644 --- a/src/TensorFlowNET.Core/ops.py.cs +++ b/src/TensorFlowNET.Core/ops.py.cs @@ -295,7 +295,7 @@ namespace Tensorflow // inner_device_stack = default_graph._device_function_stack // var outer_context = default_graph.as_default; - with(ops.control_dependencies(null), delegate + tf_with(ops.control_dependencies(null), delegate { var outer_graph = get_default_graph(); // outer_device_stack = None diff --git a/tensorflowlib/README.md b/tensorflowlib/README.md index 63cba815..318e5dc9 100644 --- a/tensorflowlib/README.md +++ b/tensorflowlib/README.md @@ -16,6 +16,8 @@ Here are some pre-built TensorFlow binaries you can use for each platform: - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip + + ### Run in Linux `Install-Package TensorFlow.NET` @@ -31,10 +33,21 @@ sudo apt install libgdiplus More information about [System.Drawing on Linux](). + + ### Run in Mac OS -### GPU Tensorflow for windows -Before running verify you installed CUDA and cuDNN + + +### Tensorflow GPU for Windows + +Before running verify you installed CUDA and cuDNN (TensorFlow v1.14 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible. + +```powershell +PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU +``` + + ### Build from source for Windows diff --git a/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs b/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs index 44e5c7a7..38c650a3 100644 --- a/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs +++ b/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs @@ -77,7 +77,7 @@ let run()= let init = tf.global_variables_initializer() - Tensorflow.Python.``with``(tf.Session(), fun (sess:Session) -> + Tensorflow.Python.``tf_with``(tf.Session(), fun (sess:Session) -> sess.run(init) |> ignore // Loop over epochs for epoch in [0..training_epochs] do diff --git a/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs b/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs index c0ca95b3..7bacb28d 100644 --- a/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs +++ b/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs @@ -18,7 +18,7 @@ using NumSharp; using System; using System.Diagnostics; using Tensorflow; -using TensorFlowNET.Examples.Utility; +using Tensorflow.Hub; using static Tensorflow.Python; namespace TensorFlowNET.Examples @@ -39,7 +39,7 @@ namespace TensorFlowNET.Examples public int? test_size = null; public int batch_size = 1024; // The number of samples per batch - Datasets mnist; + Datasets mnist; NDArray full_data_x; int num_steps = 20; // Total steps to train int k = 25; // The number of clusters @@ -52,29 +52,41 @@ namespace TensorFlowNET.Examples { PrepareData(); var graph = ImportGraph(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { Train(sess); - }); + } return accuray_test > 0.70; } public void PrepareData() { - mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size:validation_size, test_size:test_size); - full_data_x = mnist.train.data; + var loader = new MnistModelLoader(); + + var setting = new ModelLoadSetting + { + TrainDir = ".resources/mnist", + OneHot = true, + TrainSize = train_size, + ValidationSize = validation_size, + TestSize = test_size + }; + + mnist = loader.LoadAsync(setting).Result; + + full_data_x = mnist.Train.Data; // download graph meta data string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/kmeans.meta"; - Web.Download(url, "graph", "kmeans.meta"); + loader.DownloadAsync(url, ".resources/graph", "kmeans.meta").Wait(); } public Graph ImportGraph() { var graph = tf.Graph().as_default(); - tf.train.import_meta_graph("graph/kmeans.meta"); + tf.train.import_meta_graph(".resources/graph/kmeans.meta"); return graph; } @@ -132,7 +144,7 @@ namespace TensorFlowNET.Examples sw.Start(); foreach (var i in range(idx.Length)) { - var x = mnist.train.labels[i]; + var x = mnist.Train.Labels[i]; counts[idx[i]] += x; } @@ -153,7 +165,7 @@ namespace TensorFlowNET.Examples var accuracy_op = tf.reduce_mean(cast); // Test Model - var (test_x, test_y) = (mnist.test.data, mnist.test.labels); + var (test_x, test_y) = (mnist.Test.Data, mnist.Test.Labels); result = sess.run(accuracy_op, new FeedItem(X, test_x), new FeedItem(Y, test_y)); accuray_test = result; print($"Test Accuracy: {accuray_test}"); diff --git a/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs b/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs index f7baef1d..0098404d 100644 --- a/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs +++ b/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs @@ -71,7 +71,7 @@ namespace TensorFlowNET.Examples var init = tf.global_variables_initializer(); // Start training - return with(tf.Session(), sess => + using (var sess = tf.Session()) { // Run the initializer sess.run(init); @@ -114,7 +114,7 @@ namespace TensorFlowNET.Examples Console.WriteLine($"Absolute mean square loss difference: {diff}"); return diff < 0.01; - }); + } } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs b/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs index 185dd1fe..ca691d40 100644 --- a/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs +++ b/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs @@ -19,7 +19,7 @@ using System; using System.Diagnostics; using System.IO; using Tensorflow; -using TensorFlowNET.Examples.Utility; +using Tensorflow.Hub; using static Tensorflow.Python; namespace TensorFlowNET.Examples @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples private float learning_rate = 0.01f; private int display_step = 1; - Datasets mnist; + Datasets mnist; public bool Run() { @@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples var sw = new Stopwatch(); - return with(tf.Session(), sess => + using (var sess = tf.Session()) { // Run the initializer sess.run(init); @@ -84,11 +84,11 @@ namespace TensorFlowNET.Examples sw.Start(); var avg_cost = 0.0f; - var total_batch = mnist.train.num_examples / batch_size; + var total_batch = mnist.Train.NumOfExamples / batch_size; // Loop over all batches foreach (var i in range(total_batch)) { - var (batch_xs, batch_ys) = mnist.train.next_batch(batch_size); + var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(batch_size); // Run optimization op (backprop) and cost op (to get loss value) var result = sess.run(new object[] { optimizer, cost }, new FeedItem(x, batch_xs), @@ -115,32 +115,32 @@ namespace TensorFlowNET.Examples var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)); // Calculate accuracy var accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); - float acc = accuracy.eval(new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels)); + float acc = accuracy.eval(new FeedItem(x, mnist.Test.Data), new FeedItem(y, mnist.Test.Labels)); print($"Accuracy: {acc.ToString("F4")}"); return acc > 0.9; - }); + } } public void PrepareData() { - mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size: validation_size, test_size: test_size); + mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: train_size, validationSize: validation_size, testSize: test_size).Result; } public void SaveModel(Session sess) { var saver = tf.train.Saver(); - var save_path = saver.save(sess, "logistic_regression/model.ckpt"); - tf.train.write_graph(sess.graph, "logistic_regression", "model.pbtxt", as_text: true); + var save_path = saver.save(sess, ".resources/logistic_regression/model.ckpt"); + tf.train.write_graph(sess.graph, ".resources/logistic_regression", "model.pbtxt", as_text: true); - FreezeGraph.freeze_graph(input_graph: "logistic_regression/model.pbtxt", + FreezeGraph.freeze_graph(input_graph: ".resources/logistic_regression/model.pbtxt", input_saver: "", input_binary: false, - input_checkpoint: "logistic_regression/model.ckpt", + input_checkpoint: ".resources/logistic_regression/model.ckpt", output_node_names: "Softmax", restore_op_name: "save/restore_all", filename_tensor_name: "save/Const:0", - output_graph: "logistic_regression/model.pb", + output_graph: ".resources/logistic_regression/model.pb", clear_devices: true, initializer_nodes: ""); } @@ -148,7 +148,7 @@ namespace TensorFlowNET.Examples public void Predict(Session sess) { var graph = new Graph().as_default(); - graph.Import(Path.Join("logistic_regression", "model.pb")); + graph.Import(Path.Join(".resources/logistic_regression", "model.pb")); // restoring the model // var saver = tf.train.import_meta_graph("logistic_regression/tensorflowModel.ckpt.meta"); @@ -159,7 +159,7 @@ namespace TensorFlowNET.Examples var input = x.outputs[0]; // predict - var (batch_xs, batch_ys) = mnist.train.next_batch(10); + var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(10); var results = sess.run(output, new FeedItem(input, batch_xs[np.arange(1)])); if (results.argmax() == (batch_ys[0] as NDArray).argmax()) diff --git a/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs b/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs index d77c6902..358a3301 100644 --- a/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs +++ b/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs @@ -48,14 +48,14 @@ namespace TensorFlowNET.Examples float y_max = X.amax(0).Data(1) + 0.5f; var (xx, yy) = np.meshgrid(np.linspace(x_min, x_max, 30), np.linspace(y_min, y_max, 30)); - with(tf.Session(), sess => + using (var sess = tf.Session()) { //var samples = np.vstack(xx.ravel(), yy.ravel()); //samples = np.transpose(samples); var array = np.Load(Path.Join("nb", "nb_example.npy")); var samples = np.array(array).astype(np.float32); var Z = sess.run(predict(samples)); - }); + } return true; } diff --git a/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs b/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs index 86ecd281..8f761d00 100644 --- a/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs +++ b/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs @@ -17,7 +17,7 @@ using NumSharp; using System; using Tensorflow; -using TensorFlowNET.Examples.Utility; +using Tensorflow.Hub; using static Tensorflow.Python; namespace TensorFlowNET.Examples @@ -31,7 +31,7 @@ namespace TensorFlowNET.Examples { public bool Enabled { get; set; } = true; public string Name => "Nearest Neighbor"; - Datasets mnist; + Datasets mnist; NDArray Xtr, Ytr, Xte, Yte; public int? TrainSize = null; public int ValidationSize = 5000; @@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples float accuracy = 0f; // Initialize the variables (i.e. assign their default value) var init = tf.global_variables_initializer(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { // Run the initializer sess.run(init); @@ -77,17 +77,17 @@ namespace TensorFlowNET.Examples } print($"Accuracy: {accuracy}"); - }); + } return accuracy > 0.8; } public void PrepareData() { - mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: TrainSize, validation_size:ValidationSize, test_size:TestSize); + mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: TrainSize, validationSize: ValidationSize, testSize: TestSize).Result; // In this example, we limit mnist data - (Xtr, Ytr) = mnist.train.next_batch(TrainSize==null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates) - (Xte, Yte) = mnist.test.next_batch(TestSize==null ? 200 : TestSize.Value / 100); // 200 for testing + (Xtr, Ytr) = mnist.Train.GetNextBatch(TrainSize == null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates) + (Xte, Yte) = mnist.Test.GetNextBatch(TestSize == null ? 200 : TestSize.Value / 100); // 200 for testing } public Graph ImportGraph() diff --git a/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs b/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs index a4a2901c..12687e3f 100644 --- a/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs +++ b/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs @@ -90,7 +90,7 @@ namespace TensorFlowNET.Examples var init = tf.global_variables_initializer(); float loss_value = 0; // Start tf session - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { sess.run(init); var step = 0; @@ -110,7 +110,7 @@ namespace TensorFlowNET.Examples Console.WriteLine($"Step {step} loss: {loss_value}"); } Console.WriteLine($"Final loss: {loss_value}"); - }); + } return loss_value; } @@ -128,7 +128,7 @@ namespace TensorFlowNET.Examples float loss_value = 0; // Start tf session - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { sess.run(init); var step = 0; @@ -143,7 +143,7 @@ namespace TensorFlowNET.Examples Console.WriteLine($"Step {step} loss: {loss_value}"); } Console.WriteLine($"Final loss: {loss_value}"); - }); + } return loss_value; } diff --git a/test/TensorFlowNET.Examples/BasicOperations.cs b/test/TensorFlowNET.Examples/BasicOperations.cs index 5fd52e2d..c7314abe 100644 --- a/test/TensorFlowNET.Examples/BasicOperations.cs +++ b/test/TensorFlowNET.Examples/BasicOperations.cs @@ -134,7 +134,7 @@ namespace TensorFlowNET.Examples 3, 3, 2)); var batchMul = tf.batch_matmul(firstTensor, secondTensor); var checkTensor = np.array(0, 6, 0, 15, 0, 24, 3, 1, 6, 4, 9, 7, 6, 0, 15, 0, 24, 0); - return with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(batchMul); Console.WriteLine(result.ToString()); @@ -152,7 +152,7 @@ namespace TensorFlowNET.Examples // [24, 0]]]) return np.reshape(result, 18) .array_equal(checkTensor); - }); + } } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/HelloWorld.cs b/test/TensorFlowNET.Examples/HelloWorld.cs index e9c91336..52e47e3d 100644 --- a/test/TensorFlowNET.Examples/HelloWorld.cs +++ b/test/TensorFlowNET.Examples/HelloWorld.cs @@ -25,13 +25,13 @@ namespace TensorFlowNET.Examples var hello = tf.constant(str); // Start tf session - return with(tf.Session(), sess => + using (var sess = tf.Session()) { // Run the op var result = sess.run(hello); Console.WriteLine(result.ToString()); return result.ToString().Equals(str); - }); + } } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/ImageProcessing/CIFAR10-CNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/CIFAR10-CNN.cs new file mode 100644 index 00000000..a77a5b00 --- /dev/null +++ b/test/TensorFlowNET.Examples/ImageProcessing/CIFAR10-CNN.cs @@ -0,0 +1,74 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow; +using TensorFlowDatasets; +using static Tensorflow.Python; + +namespace TensorFlowNET.Examples +{ + /// + /// https://www.tensorflow.org/tutorials/images/deep_cnn + /// + public class CIFAR10_CNN : IExample + { + public bool Enabled { get; set; } = true; + public bool IsImportingGraph { get; set; } = false; + + public string Name => "CIFAR-10 CNN"; + + public bool Run() + { + PrepareData(); + + return true; + } + + public Graph BuildGraph() + { + throw new NotImplementedException(); + } + + public Graph ImportGraph() + { + throw new NotImplementedException(); + } + + public void Predict(Session sess) + { + throw new NotImplementedException(); + } + + public void PrepareData() + { + var tfds = new DatasetBuilder(); + tfds.download_and_prepare(); + } + + public void Test(Session sess) + { + throw new NotImplementedException(); + } + + public void Train(Session sess) + { + throw new NotImplementedException(); + } + } +} diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs index 2dc355c4..4b882a1a 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs @@ -16,11 +16,12 @@ using NumSharp; using System; +using System.Diagnostics; using Tensorflow; -using TensorFlowNET.Examples.Utility; +using Tensorflow.Hub; using static Tensorflow.Python; -namespace TensorFlowNET.Examples.ImageProcess +namespace TensorFlowNET.Examples { /// /// Convolutional Neural Network classifier for Hand Written Digits @@ -45,7 +46,7 @@ namespace TensorFlowNET.Examples.ImageProcess int epochs = 5; // accuracy > 98% int batch_size = 100; float learning_rate = 0.001f; - Datasets mnist; + Datasets mnist; // Network configuration // 1st Convolutional Layer @@ -78,11 +79,11 @@ namespace TensorFlowNET.Examples.ImageProcess PrepareData(); BuildGraph(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { Train(sess); Test(sess); - }); + } return loss_test < 0.05 && accuracy_test > 0.98; } @@ -91,7 +92,7 @@ namespace TensorFlowNET.Examples.ImageProcess { var graph = new Graph().as_default(); - with(tf.name_scope("Input"), delegate + tf_with(tf.name_scope("Input"), delegate { // Placeholders for inputs (x) and outputs(y) x = tf.placeholder(tf.float32, shape: (-1, img_h, img_w, n_channels), name: "X"); @@ -106,25 +107,25 @@ namespace TensorFlowNET.Examples.ImageProcess var fc1 = fc_layer(layer_flat, h1, "FC1", use_relu: true); var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false); - with(tf.variable_scope("Train"), delegate + tf_with(tf.variable_scope("Train"), delegate { - with(tf.variable_scope("Loss"), delegate + tf_with(tf.variable_scope("Loss"), delegate { loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits), name: "loss"); }); - with(tf.variable_scope("Optimizer"), delegate + tf_with(tf.variable_scope("Optimizer"), delegate { optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss); }); - with(tf.variable_scope("Accuracy"), delegate + tf_with(tf.variable_scope("Accuracy"), delegate { var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred"); accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy"); }); - with(tf.variable_scope("Prediction"), delegate + tf_with(tf.variable_scope("Prediction"), delegate { cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions"); }); @@ -144,6 +145,8 @@ namespace TensorFlowNET.Examples.ImageProcess float loss_val = 100.0f; float accuracy_val = 0f; + var sw = new Stopwatch(); + sw.Start(); foreach (var epoch in range(epochs)) { print($"Training epoch: {epoch + 1}"); @@ -165,7 +168,8 @@ namespace TensorFlowNET.Examples.ImageProcess var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); loss_val = result[0]; accuracy_val = result[1]; - print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}"); + print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")} {sw.ElapsedMilliseconds}ms"); + sw.Restart(); } } @@ -200,7 +204,7 @@ namespace TensorFlowNET.Examples.ImageProcess /// The output array private Tensor conv_layer(Tensor x, int filter_size, int num_filters, int stride, string name) { - return with(tf.variable_scope(name), delegate { + return tf_with(tf.variable_scope(name), delegate { var num_in_channel = x.shape[x.NDims - 1]; var shape = new[] { filter_size, filter_size, num_in_channel, num_filters }; @@ -240,7 +244,7 @@ namespace TensorFlowNET.Examples.ImageProcess /// flattened array private Tensor flatten_layer(Tensor layer) { - return with(tf.variable_scope("Flatten_layer"), delegate + return tf_with(tf.variable_scope("Flatten_layer"), delegate { var layer_shape = layer.TensorShape; var num_features = layer_shape[new Slice(1, 4)].Size; @@ -289,7 +293,7 @@ namespace TensorFlowNET.Examples.ImageProcess /// The output array private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true) { - return with(tf.variable_scope(name), delegate + return tf_with(tf.variable_scope(name), delegate { var in_dim = x.shape[1]; @@ -306,14 +310,14 @@ namespace TensorFlowNET.Examples.ImageProcess public void PrepareData() { - mnist = MNIST.read_data_sets("mnist", one_hot: true); - (x_train, y_train) = Reformat(mnist.train.data, mnist.train.labels); - (x_valid, y_valid) = Reformat(mnist.validation.data, mnist.validation.labels); - (x_test, y_test) = Reformat(mnist.test.data, mnist.test.labels); + mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; + (x_train, y_train) = Reformat(mnist.Train.Data, mnist.Train.Labels); + (x_valid, y_valid) = Reformat(mnist.Validation.Data, mnist.Validation.Labels); + (x_test, y_test) = Reformat(mnist.Test.Data, mnist.Test.Labels); print("Size of:"); - print($"- Training-set:\t\t{len(mnist.train.data)}"); - print($"- Validation-set:\t{len(mnist.validation.data)}"); + print($"- Training-set:\t\t{len(mnist.Train.Data)}"); + print($"- Validation-set:\t{len(mnist.Validation.Data)}"); } /// diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs index 09fdc818..02feecb9 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs @@ -17,10 +17,10 @@ using NumSharp; using System; using Tensorflow; -using TensorFlowNET.Examples.Utility; +using Tensorflow.Hub; using static Tensorflow.Python; -namespace TensorFlowNET.Examples.ImageProcess +namespace TensorFlowNET.Examples { /// /// Neural Network classifier for Hand Written Digits @@ -44,7 +44,7 @@ namespace TensorFlowNET.Examples.ImageProcess int batch_size = 100; float learning_rate = 0.001f; int h1 = 200; // number of nodes in the 1st hidden layer - Datasets mnist; + Datasets mnist; Tensor x, y; Tensor loss, accuracy; @@ -59,11 +59,11 @@ namespace TensorFlowNET.Examples.ImageProcess PrepareData(); BuildGraph(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { Train(sess); Test(sess); - }); + }; return loss_test < 0.09 && accuracy_test > 0.95; } @@ -121,13 +121,13 @@ namespace TensorFlowNET.Examples.ImageProcess public void PrepareData() { - mnist = MNIST.read_data_sets("mnist", one_hot: true); + mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; } public void Train(Session sess) { // Number of training iterations in each epoch - var num_tr_iter = mnist.train.labels.len / batch_size; + var num_tr_iter = mnist.Train.Labels.len / batch_size; var init = tf.global_variables_initializer(); sess.run(init); @@ -139,13 +139,13 @@ namespace TensorFlowNET.Examples.ImageProcess { print($"Training epoch: {epoch + 1}"); // Randomly shuffle the training data at the beginning of each epoch - var (x_train, y_train) = randomize(mnist.train.data, mnist.train.labels); + var (x_train, y_train) = mnist.Randomize(mnist.Train.Data, mnist.Train.Labels); foreach (var iteration in range(num_tr_iter)) { var start = iteration * batch_size; var end = (iteration + 1) * batch_size; - var (x_batch, y_batch) = get_next_batch(x_train, y_train, start, end); + var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end); // Run optimization op (backprop) sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); @@ -161,7 +161,8 @@ namespace TensorFlowNET.Examples.ImageProcess } // Run validation after every epoch - var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.validation.data), new FeedItem(y, mnist.validation.labels)); + var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.Validation.Data), new FeedItem(y, mnist.Validation.Labels)); + loss_val = results1[0]; accuracy_val = results1[1]; print("---------------------------------------------------------"); @@ -172,35 +173,12 @@ namespace TensorFlowNET.Examples.ImageProcess public void Test(Session sess) { - var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels)); + var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.Test.Data), new FeedItem(y, mnist.Test.Labels)); loss_test = result[0]; accuracy_test = result[1]; print("---------------------------------------------------------"); print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}"); print("---------------------------------------------------------"); } - - private (NDArray, NDArray) randomize(NDArray x, NDArray y) - { - var perm = np.random.permutation(y.shape[0]); - - np.random.shuffle(perm); - return (mnist.train.data[perm], mnist.train.labels[perm]); - } - - /// - /// selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method) - /// - /// - /// - /// - /// - /// - private (NDArray, NDArray) get_next_batch(NDArray x, NDArray y, int start, int end) - { - var x_batch = x[$"{start}:{end}"]; - var y_batch = y[$"{start}:{end}"]; - return (x_batch, y_batch); - } } } diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs index d51ca9ad..b91a19ca 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs @@ -17,10 +17,10 @@ using NumSharp; using System; using Tensorflow; -using TensorFlowNET.Examples.Utility; +using Tensorflow.Hub; using static Tensorflow.Python; -namespace TensorFlowNET.Examples.ImageProcess +namespace TensorFlowNET.Examples { /// /// Recurrent Neural Network for handwritten digits MNIST. @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples.ImageProcess int n_inputs = 28; int n_outputs = 10; - Datasets mnist; + Datasets mnist; Tensor x, y; Tensor loss, accuracy, cls_prediction; @@ -64,11 +64,11 @@ namespace TensorFlowNET.Examples.ImageProcess PrepareData(); BuildGraph(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { Train(sess); Test(sess); - }); + } return loss_test < 0.09 && accuracy_test > 0.95; } @@ -143,15 +143,15 @@ namespace TensorFlowNET.Examples.ImageProcess public void PrepareData() { - mnist = MNIST.read_data_sets("mnist", one_hot: true); - (x_train, y_train) = (mnist.train.data, mnist.train.labels); - (x_valid, y_valid) = (mnist.validation.data, mnist.validation.labels); - (x_test, y_test) = (mnist.test.data, mnist.test.labels); + mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; + (x_train, y_train) = (mnist.Train.Data, mnist.Train.Labels); + (x_valid, y_valid) = (mnist.Validation.Data, mnist.Validation.Labels); + (x_test, y_test) = (mnist.Test.Data, mnist.Test.Labels); print("Size of:"); - print($"- Training-set:\t\t{len(mnist.train.data)}"); - print($"- Validation-set:\t{len(mnist.validation.data)}"); - print($"- Test-set:\t\t{len(mnist.test.data)}"); + print($"- Training-set:\t\t{len(mnist.Train.Data)}"); + print($"- Validation-set:\t{len(mnist.Validation.Data)}"); + print($"- Test-set:\t\t{len(mnist.Test.Data)}"); } public Graph ImportGraph() => throw new NotImplementedException(); diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs index c43c853a..8eed577b 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs @@ -4,7 +4,7 @@ using Tensorflow; using TensorFlowNET.Examples.Utility; using static Tensorflow.Python; -namespace TensorFlowNET.Examples.ImageProcess +namespace TensorFlowNET.Examples { /// /// This example removes the background from an input image. @@ -32,11 +32,11 @@ namespace TensorFlowNET.Examples.ImageProcess Tensor output = graph.OperationByName("SemanticPredictions"); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { // Runs inference on a single image. sess.run(output, new FeedItem(output, "[np.asarray(resized_image)]")); - }); + } return false; } diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs index a0f61029..efcb0b73 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples var result_labels = new List(); var sw = new Stopwatch(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { foreach (var nd in file_ndarrays) { @@ -58,7 +58,7 @@ namespace TensorFlowNET.Examples Console.WriteLine($"{labels[idx]} {results[idx]} in {sw.ElapsedMilliseconds}ms", Color.Tan); result_labels.Add(labels[idx]); } - }); + } return result_labels.Contains("military uniform"); } @@ -69,19 +69,19 @@ namespace TensorFlowNET.Examples int input_mean = 117, int input_std = 1) { - return with(tf.Graph().as_default(), graph => - { - var file_reader = tf.read_file(file_name, "file_reader"); - var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); - var cast = tf.cast(decodeJpeg, tf.float32); - var dims_expander = tf.expand_dims(cast, 0); - var resize = tf.constant(new int[] { input_height, input_width }); - var bilinear = tf.image.resize_bilinear(dims_expander, resize); - var sub = tf.subtract(bilinear, new float[] { input_mean }); - var normalized = tf.divide(sub, new float[] { input_std }); - - return with(tf.Session(graph), sess => sess.run(normalized)); - }); + var graph = tf.Graph().as_default(); + + var file_reader = tf.read_file(file_name, "file_reader"); + var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); + var cast = tf.cast(decodeJpeg, tf.float32); + var dims_expander = tf.expand_dims(cast, 0); + var resize = tf.constant(new int[] { input_height, input_width }); + var bilinear = tf.image.resize_bilinear(dims_expander, resize); + var sub = tf.subtract(bilinear, new float[] { input_mean }); + var normalized = tf.divide(sub, new float[] { input_std }); + + using (var sess = tf.Session(graph)) + return sess.run(normalized); } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs index 2f6b9ab1..f51833d2 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs @@ -45,9 +45,12 @@ namespace TensorFlowNET.Examples var input_operation = graph.get_operation_by_name(input_name); var output_operation = graph.get_operation_by_name(output_name); - var results = with(tf.Session(graph), - sess => sess.run(output_operation.outputs[0], - new FeedItem(input_operation.outputs[0], nd))); + NDArray results; + using (var sess = tf.Session(graph)) + { + results = sess.run(output_operation.outputs[0], + new FeedItem(input_operation.outputs[0], nd)); + } results = np.squeeze(results); @@ -69,19 +72,19 @@ namespace TensorFlowNET.Examples int input_mean = 0, int input_std = 255) { - return with(tf.Graph().as_default(), graph => - { - var file_reader = tf.read_file(file_name, "file_reader"); - var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); - var caster = tf.cast(image_reader, tf.float32); - var dims_expander = tf.expand_dims(caster, 0); - var resize = tf.constant(new int[] { input_height, input_width }); - var bilinear = tf.image.resize_bilinear(dims_expander, resize); - var sub = tf.subtract(bilinear, new float[] { input_mean }); - var normalized = tf.divide(sub, new float[] { input_std }); - - return with(tf.Session(graph), sess => sess.run(normalized)); - }); + var graph = tf.Graph().as_default(); + + var file_reader = tf.read_file(file_name, "file_reader"); + var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); + var caster = tf.cast(image_reader, tf.float32); + var dims_expander = tf.expand_dims(caster, 0); + var resize = tf.constant(new int[] { input_height, input_width }); + var bilinear = tf.image.resize_bilinear(dims_expander, resize); + var sub = tf.subtract(bilinear, new float[] { input_mean }); + var normalized = tf.divide(sub, new float[] { input_std }); + + using (var sess = tf.Session(graph)) + return sess.run(normalized); } public void PrepareData() diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs b/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs index f40be91f..8f8d0dd9 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs @@ -51,7 +51,8 @@ namespace TensorFlowNET.Examples var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); - with(tf.Session(graph), sess => Predict(sess)); + using (var sess = tf.Session(graph)) + Predict(sess); return true; } @@ -101,14 +102,15 @@ namespace TensorFlowNET.Examples private NDArray ReadTensorFromImageFile(string file_name) { - return with(tf.Graph().as_default(), graph => - { - var file_reader = tf.read_file(file_name, "file_reader"); - var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); - var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8); - var dims_expander = tf.expand_dims(casted, 0); - return with(tf.Session(graph), sess => sess.run(dims_expander)); - }); + var graph = tf.Graph().as_default(); + + var file_reader = tf.read_file(file_name, "file_reader"); + var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); + var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8); + var dims_expander = tf.expand_dims(casted, 0); + + using (var sess = tf.Session(graph)) + return sess.run(dims_expander); } private void buildOutputImage(NDArray[] resultArr) diff --git a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs index 4d3a858f..becd9f7e 100644 --- a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs +++ b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs @@ -25,7 +25,7 @@ using Tensorflow; using TensorFlowNET.Examples.Utility; using static Tensorflow.Python; -namespace TensorFlowNET.Examples.ImageProcess +namespace TensorFlowNET.Examples { /// /// In this tutorial, we will reuse the feature extraction capabilities from powerful image classifiers trained on ImageNet @@ -83,19 +83,19 @@ namespace TensorFlowNET.Examples.ImageProcess #region For debug purpose // predict images - Predict(null); + // Predict(null); // load saved pb and test new images. - Test(null); + // Test(null); #endregion var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { Train(sess); - }); + } return test_accuracy > 0.75f; } @@ -141,20 +141,18 @@ namespace TensorFlowNET.Examples.ImageProcess Tensor evaluation_step = null; Tensor prediction = null; - with(eval_graph.as_default(), graph => - { - // Add the new layer for exporting. - var (_, _, bottleneck_input, ground_truth_input, final_tensor) = - add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, - wants_quantization, is_training: false); + var graph = eval_graph.as_default(); + // Add the new layer for exporting. + var (_, _, bottleneck_input, ground_truth_input, final_tensor) = + add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, + wants_quantization, is_training: false); - // Now we need to restore the values from the training graph to the eval - // graph. - tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME); + // Now we need to restore the values from the training graph to the eval + // graph. + tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME); - (evaluation_step, prediction) = add_evaluation_step(final_tensor, - ground_truth_input); - }); + (evaluation_step, prediction) = add_evaluation_step(final_tensor, + ground_truth_input); return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input, evaluation_step, prediction); @@ -180,7 +178,7 @@ namespace TensorFlowNET.Examples.ImageProcess Tensor bottleneck_tensor, bool quantize_layer, bool is_training) { var (batch_size, bottleneck_tensor_size) = (bottleneck_tensor.TensorShape.Dimensions[0], bottleneck_tensor.TensorShape.Dimensions[1]); - with(tf.name_scope("input"), scope => + tf_with(tf.name_scope("input"), scope => { bottleneck_input = tf.placeholder_with_default( bottleneck_tensor, @@ -193,10 +191,10 @@ namespace TensorFlowNET.Examples.ImageProcess // Organizing the following ops so they are easier to see in TensorBoard. string layer_name = "final_retrain_ops"; Tensor logits = null; - with(tf.name_scope(layer_name), scope => + tf_with(tf.name_scope(layer_name), scope => { RefVariable layer_weights = null; - with(tf.name_scope("weights"), delegate + tf_with(tf.name_scope("weights"), delegate { var initial_value = tf.truncated_normal(new int[] { bottleneck_tensor_size, class_count }, stddev: 0.001f); layer_weights = tf.Variable(initial_value, name: "final_weights"); @@ -204,13 +202,13 @@ namespace TensorFlowNET.Examples.ImageProcess }); RefVariable layer_biases = null; - with(tf.name_scope("biases"), delegate + tf_with(tf.name_scope("biases"), delegate { layer_biases = tf.Variable(tf.zeros(class_count), name: "final_biases"); variable_summaries(layer_biases); }); - with(tf.name_scope("Wx_plus_b"), delegate + tf_with(tf.name_scope("Wx_plus_b"), delegate { logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases; tf.summary.histogram("pre_activations", logits); @@ -239,7 +237,7 @@ namespace TensorFlowNET.Examples.ImageProcess return (null, null, bottleneck_input, ground_truth_input, final_tensor); Tensor cross_entropy_mean = null; - with(tf.name_scope("cross_entropy"), delegate + tf_with(tf.name_scope("cross_entropy"), delegate { cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy( labels: ground_truth_input, logits: logits); @@ -247,7 +245,7 @@ namespace TensorFlowNET.Examples.ImageProcess tf.summary.scalar("cross_entropy", cross_entropy_mean); - with(tf.name_scope("train"), delegate + tf_with(tf.name_scope("train"), delegate { var optimizer = tf.train.GradientDescentOptimizer(learning_rate); train_step = optimizer.minimize(cross_entropy_mean); @@ -259,12 +257,12 @@ namespace TensorFlowNET.Examples.ImageProcess private void variable_summaries(RefVariable var) { - with(tf.name_scope("summaries"), delegate + tf_with(tf.name_scope("summaries"), delegate { var mean = tf.reduce_mean(var); tf.summary.scalar("mean", mean); Tensor stddev = null; - with(tf.name_scope("stddev"), delegate + tf_with(tf.name_scope("stddev"), delegate { stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))); }); @@ -279,7 +277,7 @@ namespace TensorFlowNET.Examples.ImageProcess { var (height, width) = (299, 299); - return with(tf.Graph().as_default(), graph => + return tf_with(tf.Graph().as_default(), graph => { tf.train.import_meta_graph("graph/InceptionV3.meta"); Tensor resized_input_tensor = graph.OperationByName("Placeholder"); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3)); @@ -350,15 +348,15 @@ namespace TensorFlowNET.Examples.ImageProcess { Tensor evaluation_step = null, correct_prediction = null, prediction = null; - with(tf.name_scope("accuracy"), scope => + tf_with(tf.name_scope("accuracy"), scope => { - with(tf.name_scope("correct_prediction"), delegate + tf_with(tf.name_scope("correct_prediction"), delegate { prediction = tf.argmax(result_tensor, 1); correct_prediction = tf.equal(prediction, ground_truth_tensor); }); - with(tf.name_scope("accuracy"), delegate + tf_with(tf.name_scope("accuracy"), delegate { evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); }); @@ -596,7 +594,7 @@ namespace TensorFlowNET.Examples.ImageProcess create_module_graph(); // Add the new layer that we'll be training. - with(graph.as_default(), delegate + tf_with(graph.as_default(), delegate { (train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_final_retrain_ops( @@ -745,13 +743,13 @@ namespace TensorFlowNET.Examples.ImageProcess Tensor input = graph.OperationByName("Placeholder"); Tensor output = graph.OperationByName("final_result"); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { var result = sess.run(output, new FeedItem(input, fileBytes)); var prob = np.squeeze(result); var idx = np.argmax(prob); print($"Prediction result: [{labels[idx]} {prob[idx][0]}] for {img_path}."); - }); + } } private NDArray ReadTensorFromImageFile(string file_name, @@ -760,19 +758,19 @@ namespace TensorFlowNET.Examples.ImageProcess int input_mean = 0, int input_std = 255) { - return with(tf.Graph().as_default(), graph => - { - var file_reader = tf.read_file(file_name, "file_reader"); - var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); - var caster = tf.cast(image_reader, tf.float32); - var dims_expander = tf.expand_dims(caster, 0); - var resize = tf.constant(new int[] { input_height, input_width }); - var bilinear = tf.image.resize_bilinear(dims_expander, resize); - var sub = tf.subtract(bilinear, new float[] { input_mean }); - var normalized = tf.divide(sub, new float[] { input_std }); - - return with(tf.Session(graph), sess => sess.run(normalized)); - }); + var graph = tf.Graph().as_default(); + + var file_reader = tf.read_file(file_name, "file_reader"); + var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); + var caster = tf.cast(image_reader, tf.float32); + var dims_expander = tf.expand_dims(caster, 0); + var resize = tf.constant(new int[] { input_height, input_width }); + var bilinear = tf.image.resize_bilinear(dims_expander, resize); + var sub = tf.subtract(bilinear, new float[] { input_mean }); + var normalized = tf.divide(sub, new float[] { input_std }); + + using (var sess = tf.Session(graph)) + return sess.run(normalized); } public void Test(Session sess_) @@ -783,7 +781,7 @@ namespace TensorFlowNET.Examples.ImageProcess var graph = Graph.ImportFromPB(output_graph); var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding(); - with(tf.Session(graph), sess => + tf_with(tf.Session(graph), sess => { (test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, diff --git a/test/TensorFlowNET.Examples/TensorFlowNET.Examples.GPU.csproj b/test/TensorFlowNET.Examples/TensorFlowNET.Examples.GPU.csproj new file mode 100644 index 00000000..1bd3d530 --- /dev/null +++ b/test/TensorFlowNET.Examples/TensorFlowNET.Examples.GPU.csproj @@ -0,0 +1,24 @@ + + + + Exe + netcoreapp2.2 + false + + + + + + + + + + + + + + + + + + diff --git a/test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj b/test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj index 149bd549..f4e2340a 100644 --- a/test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj +++ b/test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj @@ -16,7 +16,9 @@ + + diff --git a/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs b/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs index a2afe43d..9ec17f12 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs @@ -64,7 +64,9 @@ namespace TensorFlowNET.Examples { PrepareData(); var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); - with(tf.Session(graph), sess => Train(sess)); + + using (var sess = tf.Session(graph)) + Train(sess); return max_accuracy > 0.9; } diff --git a/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs b/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs index 2ec703ba..7e324c56 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs @@ -63,7 +63,7 @@ namespace TensorFlowNET.Examples.Text.NER var init = tf.global_variables_initializer(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { sess.run(init); @@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples.Text.NER loss_value = run_epoch(sess, train, dev, epoch); print($"train loss: {loss_value}"); } - }); + } return loss_value < 0.1; } diff --git a/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs b/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs index ce6628e3..8ed87748 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs @@ -66,7 +66,7 @@ namespace TensorFlowNET.Examples // Initialize the variables (i.e. assign their default value) var init = tf.global_variables_initializer(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { // Run the initializer sess.run(init); @@ -112,7 +112,7 @@ namespace TensorFlowNET.Examples } } } - }); + } return average_loss < 100; } diff --git a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs index 390c6040..75308b8c 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs @@ -26,7 +26,7 @@ namespace TensorFlowNET.Examples.Text Tensor conv3 = null, conv4 = null, conv5 = null, conv6 = null; Tensor h_pool = null; - with(tf.name_scope("conv-maxpool-1"), delegate + tf_with(tf.name_scope("conv-maxpool-1"), delegate { var conv1 = tf.layers.conv2d(x_expanded, filters: num_filters, @@ -40,7 +40,7 @@ namespace TensorFlowNET.Examples.Text pool1 = tf.transpose(pool1, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-maxpool-2"), delegate + tf_with(tf.name_scope("conv-maxpool-2"), delegate { var conv2 = tf.layers.conv2d(pool1, filters: num_filters, @@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples.Text pool2 = tf.transpose(pool2, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-3"), delegate + tf_with(tf.name_scope("conv-3"), delegate { conv3 = tf.layers.conv2d(pool2, filters: num_filters, @@ -64,7 +64,7 @@ namespace TensorFlowNET.Examples.Text conv3 = tf.transpose(conv3, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-4"), delegate + tf_with(tf.name_scope("conv-4"), delegate { conv4 = tf.layers.conv2d(conv3, filters: num_filters, @@ -74,7 +74,7 @@ namespace TensorFlowNET.Examples.Text conv4 = tf.transpose(conv4, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-5"), delegate + tf_with(tf.name_scope("conv-5"), delegate { conv5 = tf.layers.conv2d(conv4, filters: num_filters, @@ -84,7 +84,7 @@ namespace TensorFlowNET.Examples.Text conv5 = tf.transpose(conv5, new[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-maxpool-6"), delegate + tf_with(tf.name_scope("conv-maxpool-6"), delegate { conv6 = tf.layers.conv2d(conv5, filters: num_filters, @@ -105,7 +105,7 @@ namespace TensorFlowNET.Examples.Text Tensor logits = null; Tensor predictions = null; - with(tf.name_scope("fc-1"), delegate + tf_with(tf.name_scope("fc-1"), delegate { fc1_out = tf.layers.dense(h_pool, 1024, @@ -113,7 +113,7 @@ namespace TensorFlowNET.Examples.Text kernel_initializer: kernel_initializer); }); - with(tf.name_scope("fc-2"), delegate + tf_with(tf.name_scope("fc-2"), delegate { fc2_out = tf.layers.dense(fc1_out, 1024, @@ -121,7 +121,7 @@ namespace TensorFlowNET.Examples.Text kernel_initializer: kernel_initializer); }); - with(tf.name_scope("fc-3"), delegate + tf_with(tf.name_scope("fc-3"), delegate { logits = tf.layers.dense(fc2_out, num_class, @@ -129,7 +129,7 @@ namespace TensorFlowNET.Examples.Text predictions = tf.argmax(logits, -1, output_type: tf.int32); }); - with(tf.name_scope("loss"), delegate + tf_with(tf.name_scope("loss"), delegate { var y_one_hot = tf.one_hot(y, num_class); var loss = tf.reduce_mean( @@ -137,7 +137,7 @@ namespace TensorFlowNET.Examples.Text var optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step: global_step); }); - with(tf.name_scope("accuracy"), delegate + tf_with(tf.name_scope("accuracy"), delegate { var correct_predictions = tf.equal(predictions, y); var accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name: "accuracy"); diff --git a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs index c71376f8..8087a2b2 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs @@ -41,7 +41,7 @@ namespace TensorFlowNET.Examples.Text global_step = tf.Variable(0, trainable: false); // Embedding Layer - with(tf.name_scope("embedding"), delegate + tf_with(tf.name_scope("embedding"), delegate { var init_embeddings = tf.random_uniform(new int[] { alphabet_size, embedding_size }, -1.0f, 1.0f); embeddings = tf.get_variable("embeddings", initializer: init_embeddings); @@ -59,7 +59,7 @@ namespace TensorFlowNET.Examples.Text Tensor fc2_out = null; // First Convolution Layer - with(tf.variable_scope("conv-0"), delegate + tf_with(tf.variable_scope("conv-0"), delegate { conv0 = tf.layers.conv2d(x_expanded, filters: num_filters[0], @@ -70,25 +70,25 @@ namespace TensorFlowNET.Examples.Text conv0 = tf.transpose(conv0, new int[] { 0, 1, 3, 2 }); }); - with(tf.name_scope("conv-block-1"), delegate { + tf_with(tf.name_scope("conv-block-1"), delegate { conv1 = conv_block(conv0, 1); }); - with(tf.name_scope("conv-block-2"), delegate { + tf_with(tf.name_scope("conv-block-2"), delegate { conv2 = conv_block(conv1, 2); }); - with(tf.name_scope("conv-block-3"), delegate { + tf_with(tf.name_scope("conv-block-3"), delegate { conv3 = conv_block(conv2, 3); }); - with(tf.name_scope("conv-block-4"), delegate + tf_with(tf.name_scope("conv-block-4"), delegate { conv4 = conv_block(conv3, 4, max_pool: false); }); // ============= k-max Pooling ============= - with(tf.name_scope("k-max-pooling"), delegate + tf_with(tf.name_scope("k-max-pooling"), delegate { var h = tf.transpose(tf.squeeze(conv4, new int[] { -1 }), new int[] { 0, 2, 1 }); var top_k = tf.nn.top_k(h, k: 8, sorted: false)[0]; @@ -96,30 +96,30 @@ namespace TensorFlowNET.Examples.Text }); // ============= Fully Connected Layers ============= - with(tf.name_scope("fc-1"), scope => + tf_with(tf.name_scope("fc-1"), scope => { fc1_out = tf.layers.dense(h_flat, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer); }); - with(tf.name_scope("fc-2"), scope => + tf_with(tf.name_scope("fc-2"), scope => { fc2_out = tf.layers.dense(fc1_out, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer); }); - with(tf.name_scope("fc-3"), scope => + tf_with(tf.name_scope("fc-3"), scope => { logits = tf.layers.dense(fc2_out, num_class, activation: null, kernel_initializer: fc_initializer); predictions = tf.argmax(logits, -1, output_type: tf.int32); }); // ============= Loss and Accuracy ============= - with(tf.name_scope("loss"), delegate + tf_with(tf.name_scope("loss"), delegate { var y_one_hot = tf.one_hot(y, num_class); loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits: logits, labels: y_one_hot)); var update_ops = tf.get_collection(ops.GraphKeys.UPDATE_OPS) as List; - with(tf.control_dependencies(update_ops.Select(x => (Operation)x).ToArray()), delegate + tf_with(tf.control_dependencies(update_ops.Select(x => (Operation)x).ToArray()), delegate { var adam = tf.train.AdamOptimizer(learning_rate); adam.minimize(loss, global_step: global_step); @@ -129,13 +129,13 @@ namespace TensorFlowNET.Examples.Text private Tensor conv_block(Tensor input, int i, bool max_pool = true) { - return with(tf.variable_scope($"conv-block-{i}"), delegate + return tf_with(tf.variable_scope($"conv-block-{i}"), delegate { Tensor conv = null; // Two "conv-batch_norm-relu" layers. foreach (var j in Enumerable.Range(0, 2)) { - with(tf.variable_scope($"conv-{j}"), delegate + tf_with(tf.variable_scope($"conv-{j}"), delegate { // convolution conv = tf.layers.conv2d( diff --git a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs index b51f6719..3448ac83 100644 --- a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs +++ b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs @@ -36,7 +36,7 @@ namespace TensorFlowNET.Examples.Text var keep_prob = tf.where(is_training, 0.5f, 1.0f); Tensor x_emb = null; - with(tf.name_scope("embedding"), scope => + tf_with(tf.name_scope("embedding"), scope => { var init_embeddings = tf.random_uniform(new int[] { vocabulary_size, embedding_size }); var embeddings = tf.get_variable("embeddings", initializer: init_embeddings); @@ -68,20 +68,20 @@ namespace TensorFlowNET.Examples.Text var h_pool = tf.concat(pooled_outputs, 3); var h_pool_flat = tf.reshape(h_pool, new TensorShape(-1, num_filters * filter_sizes.Rank)); Tensor h_drop = null; - with(tf.name_scope("dropout"), delegate + tf_with(tf.name_scope("dropout"), delegate { h_drop = tf.nn.dropout(h_pool_flat, keep_prob); }); Tensor logits = null; Tensor predictions = null; - with(tf.name_scope("output"), delegate + tf_with(tf.name_scope("output"), delegate { logits = tf.layers.dense(h_drop, num_class); predictions = tf.argmax(logits, -1, output_type: tf.int32); }); - with(tf.name_scope("loss"), delegate + tf_with(tf.name_scope("loss"), delegate { var sscel = tf.nn.sparse_softmax_cross_entropy_with_logits(logits: logits, labels: y); var loss = tf.reduce_mean(sscel); @@ -89,7 +89,7 @@ namespace TensorFlowNET.Examples.Text var optimizer = adam.minimize(loss, global_step: global_step); }); - with(tf.name_scope("accuracy"), delegate + tf_with(tf.name_scope("accuracy"), delegate { var correct_predictions = tf.equal(predictions, y); var accuracy = tf.reduce_mean(tf.cast(correct_predictions, TF_DataType.TF_FLOAT), name: "accuracy"); diff --git a/test/TensorFlowNET.Examples/Utility/DataSetMnist.cs b/test/TensorFlowNET.Examples/Utility/DataSetMnist.cs deleted file mode 100644 index 0017eba5..00000000 --- a/test/TensorFlowNET.Examples/Utility/DataSetMnist.cs +++ /dev/null @@ -1,95 +0,0 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using NumSharp; -using Tensorflow; - -namespace TensorFlowNET.Examples.Utility -{ - public class DataSetMnist : IDataSet - { - public int num_examples { get; } - - public int epochs_completed { get; private set; } - public int index_in_epoch { get; private set; } - public NDArray data { get; private set; } - public NDArray labels { get; private set; } - - public DataSetMnist(NDArray images, NDArray labels, TF_DataType dtype, bool reshape) - { - num_examples = images.shape[0]; - images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]); - images.astype(dtype.as_numpy_datatype()); - images = np.multiply(images, 1.0f / 255.0f); - - labels.astype(dtype.as_numpy_datatype()); - - data = images; - this.labels = labels; - epochs_completed = 0; - index_in_epoch = 0; - } - - public (NDArray, NDArray) next_batch(int batch_size, bool fake_data = false, bool shuffle = true) - { - var start = index_in_epoch; - // Shuffle for the first epoch - if(epochs_completed == 0 && start == 0 && shuffle) - { - var perm0 = np.arange(num_examples); - np.random.shuffle(perm0); - data = data[perm0]; - labels = labels[perm0]; - } - - // Go to the next epoch - if (start + batch_size > num_examples) - { - // Finished epoch - epochs_completed += 1; - - // Get the rest examples in this epoch - var rest_num_examples = num_examples - start; - //var images_rest_part = _images[np.arange(start, _num_examples)]; - //var labels_rest_part = _labels[np.arange(start, _num_examples)]; - // Shuffle the data - if (shuffle) - { - var perm = np.arange(num_examples); - np.random.shuffle(perm); - data = data[perm]; - labels = labels[perm]; - } - - start = 0; - index_in_epoch = batch_size - rest_num_examples; - var end = index_in_epoch; - var images_new_part = data[np.arange(start, end)]; - var labels_new_part = labels[np.arange(start, end)]; - - /*return (np.concatenate(new float[][] { images_rest_part.Data(), images_new_part.Data() }, axis: 0), - np.concatenate(new float[][] { labels_rest_part.Data(), labels_new_part.Data() }, axis: 0));*/ - return (images_new_part, labels_new_part); - } - else - { - index_in_epoch += batch_size; - var end = index_in_epoch; - return (data[np.arange(start, end)], labels[np.arange(start, end)]); - } - } - } -} diff --git a/test/TensorFlowNET.Examples/Utility/Datasets.cs b/test/TensorFlowNET.Examples/Utility/Datasets.cs deleted file mode 100644 index 0c8c4e2d..00000000 --- a/test/TensorFlowNET.Examples/Utility/Datasets.cs +++ /dev/null @@ -1,46 +0,0 @@ -using NumSharp; - -namespace TensorFlowNET.Examples.Utility -{ - public class Datasets where T : IDataSet - { - private T _train; - public T train => _train; - - private T _validation; - public T validation => _validation; - - private T _test; - public T test => _test; - - public Datasets(T train, T validation, T test) - { - _train = train; - _validation = validation; - _test = test; - } - - public (NDArray, NDArray) Randomize(NDArray x, NDArray y) - { - var perm = np.random.permutation(y.shape[0]); - - np.random.shuffle(perm); - return (x[perm], y[perm]); - } - - /// - /// selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method) - /// - /// - /// - /// - /// - /// - public (NDArray, NDArray) GetNextBatch(NDArray x, NDArray y, int start, int end) - { - var x_batch = x[$"{start}:{end}"]; - var y_batch = y[$"{start}:{end}"]; - return (x_batch, y_batch); - } - } -} diff --git a/test/TensorFlowNET.Examples/Utility/IDataSet.cs b/test/TensorFlowNET.Examples/Utility/IDataSet.cs deleted file mode 100644 index 31be57c1..00000000 --- a/test/TensorFlowNET.Examples/Utility/IDataSet.cs +++ /dev/null @@ -1,10 +0,0 @@ -using NumSharp; - -namespace TensorFlowNET.Examples.Utility -{ - public interface IDataSet - { - NDArray data { get; } - NDArray labels { get; } - } -} diff --git a/test/TensorFlowNET.Examples/Utility/MNIST.cs b/test/TensorFlowNET.Examples/Utility/MNIST.cs deleted file mode 100644 index 73d6fe2a..00000000 --- a/test/TensorFlowNET.Examples/Utility/MNIST.cs +++ /dev/null @@ -1,131 +0,0 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using NumSharp; -using System; -using System.IO; -using Tensorflow; - -namespace TensorFlowNET.Examples.Utility -{ - public class MNIST - { - private const string DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/"; - private const string TRAIN_IMAGES = "train-images-idx3-ubyte.gz"; - private const string TRAIN_LABELS = "train-labels-idx1-ubyte.gz"; - private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; - private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; - public static Datasets read_data_sets(string train_dir, - bool one_hot = false, - TF_DataType dtype = TF_DataType.TF_FLOAT, - bool reshape = true, - int validation_size = 5000, - int? train_size = null, - int? test_size = null, - string source_url = DEFAULT_SOURCE_URL) - { - if (train_size!=null && validation_size >= train_size) - throw new ArgumentException("Validation set should be smaller than training set"); - - Web.Download(source_url + TRAIN_IMAGES, train_dir, TRAIN_IMAGES); - Compress.ExtractGZip(Path.Join(train_dir, TRAIN_IMAGES), train_dir); - var train_images = extract_images(Path.Join(train_dir, TRAIN_IMAGES.Split('.')[0]), limit: train_size); - - Web.Download(source_url + TRAIN_LABELS, train_dir, TRAIN_LABELS); - Compress.ExtractGZip(Path.Join(train_dir, TRAIN_LABELS), train_dir); - var train_labels = extract_labels(Path.Join(train_dir, TRAIN_LABELS.Split('.')[0]), one_hot: one_hot, limit: train_size); - - Web.Download(source_url + TEST_IMAGES, train_dir, TEST_IMAGES); - Compress.ExtractGZip(Path.Join(train_dir, TEST_IMAGES), train_dir); - var test_images = extract_images(Path.Join(train_dir, TEST_IMAGES.Split('.')[0]), limit: test_size); - - Web.Download(source_url + TEST_LABELS, train_dir, TEST_LABELS); - Compress.ExtractGZip(Path.Join(train_dir, TEST_LABELS), train_dir); - var test_labels = extract_labels(Path.Join(train_dir, TEST_LABELS.Split('.')[0]), one_hot: one_hot, limit:test_size); - - int end = train_images.shape[0]; - var validation_images = train_images[np.arange(validation_size)]; - var validation_labels = train_labels[np.arange(validation_size)]; - train_images = train_images[np.arange(validation_size, end)]; - train_labels = train_labels[np.arange(validation_size, end)]; - - var train = new DataSetMnist(train_images, train_labels, dtype, reshape); - var validation = new DataSetMnist(validation_images, validation_labels, dtype, reshape); - var test = new DataSetMnist(test_images, test_labels, dtype, reshape); - - return new Datasets(train, validation, test); - } - - public static NDArray extract_images(string file, int? limit=null) - { - using (var bytestream = new FileStream(file, FileMode.Open)) - { - var magic = _read32(bytestream); - if (magic != 2051) - throw new ValueError($"Invalid magic number {magic} in MNIST image file: {file}"); - var num_images = _read32(bytestream); - num_images = limit == null ? num_images : Math.Min(num_images, (uint)limit); - var rows = _read32(bytestream); - var cols = _read32(bytestream); - var buf = new byte[rows * cols * num_images]; - bytestream.Read(buf, 0, buf.Length); - var data = np.frombuffer(buf, np.uint8); - data = data.reshape((int)num_images, (int)rows, (int)cols, 1); - return data; - } - } - - public static NDArray extract_labels(string file, bool one_hot = false, int num_classes = 10, int? limit = null) - { - using (var bytestream = new FileStream(file, FileMode.Open)) - { - var magic = _read32(bytestream); - if (magic != 2049) - throw new ValueError($"Invalid magic number {magic} in MNIST label file: {file}"); - var num_items = _read32(bytestream); - num_items = limit == null ? num_items : Math.Min(num_items,(uint) limit); - var buf = new byte[num_items]; - bytestream.Read(buf, 0, buf.Length); - var labels = np.frombuffer(buf, np.uint8); - if (one_hot) - return dense_to_one_hot(labels, num_classes); - return labels; - } - } - - private static NDArray dense_to_one_hot(NDArray labels_dense, int num_classes) - { - var num_labels = labels_dense.shape[0]; - var index_offset = np.arange(num_labels) * num_classes; - var labels_one_hot = np.zeros(num_labels, num_classes); - - for(int row = 0; row < num_labels; row++) - { - var col = labels_dense.Data(row); - labels_one_hot.SetData(1.0, row, col); - } - - return labels_one_hot; - } - - private static uint _read32(FileStream bytestream) - { - var buffer = new byte[sizeof(uint)]; - var count = bytestream.Read(buffer, 0, 4); - return np.frombuffer(buffer, ">u4").Data(0); - } - } -} diff --git a/test/TensorFlowNET.UnitTest/Basics/NegativeTests.cs b/test/TensorFlowNET.UnitTest/Basics/NegativeTests.cs index 811c58e5..e953eb8e 100644 --- a/test/TensorFlowNET.UnitTest/Basics/NegativeTests.cs +++ b/test/TensorFlowNET.UnitTest/Basics/NegativeTests.cs @@ -12,13 +12,13 @@ namespace TensorFlowNET.UnitTest.Basics { var x = tf.constant(new[,] { { 1, 2 } }); var neg_x = tf.negative(x); - with(tf.Session(), session => + using (var sess = tf.Session()) { - var result = session.run(neg_x); + var result = sess.run(neg_x); Assert.AreEqual(result[0][0], -1); Assert.AreEqual(result[0][1], -2); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/ConstantTest.cs b/test/TensorFlowNET.UnitTest/ConstantTest.cs index 752d6d50..221d6196 100644 --- a/test/TensorFlowNET.UnitTest/ConstantTest.cs +++ b/test/TensorFlowNET.UnitTest/ConstantTest.cs @@ -1,6 +1,8 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using NumSharp; +using System; using System.Linq; +using System.Runtime.InteropServices; using Tensorflow; using static Tensorflow.Python; @@ -92,18 +94,18 @@ namespace TensorFlowNET.UnitTest { // small size var tensor = tf.zeros(new Shape(3, 2), TF_DataType.TF_INT32, "small"); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(tensor); Assert.AreEqual(result.shape[0], 3); Assert.AreEqual(result.shape[1], 2); Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 0, 0, 0, 0, 0, 0 }, result.Data())); - }); + } // big size tensor = tf.zeros(new Shape(200, 100), TF_DataType.TF_INT32, "big"); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(tensor); @@ -114,21 +116,21 @@ namespace TensorFlowNET.UnitTest Assert.AreEqual(0, data[0]); Assert.AreEqual(0, data[500]); Assert.AreEqual(0, data[result.size - 1]); - }); + } } [TestMethod] public void OnesConst() { var ones = tf.ones(new Shape(3, 2), TF_DataType.TF_DOUBLE, "ones"); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(ones); Assert.AreEqual(result.shape[0], 3); Assert.AreEqual(result.shape[1], 2); Assert.IsTrue(new[] { 1, 1, 1, 1, 1, 1 }.SequenceEqual(result.Data())); - }); + } } [TestMethod] @@ -136,14 +138,14 @@ namespace TensorFlowNET.UnitTest { var ones = tf.ones(new Shape(3, 2), TF_DataType.TF_DOUBLE, "ones"); var halfes = ones * 0.5; - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(halfes); Assert.AreEqual(result.shape[0], 3); Assert.AreEqual(result.shape[1], 2); Assert.IsTrue(new[] { .5, .5, .5, .5, .5, .5 }.SequenceEqual(result.Data())); - }); + } } [TestMethod] @@ -156,7 +158,7 @@ namespace TensorFlowNET.UnitTest }); var tensor = tf.constant(nd); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(tensor); var data = result.Data(); @@ -164,7 +166,7 @@ namespace TensorFlowNET.UnitTest Assert.AreEqual(result.shape[0], 2); Assert.AreEqual(result.shape[1], 3); Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 3, 1, 1, 2, 1, 3 }, data)); - }); + } } [TestMethod] @@ -184,9 +186,9 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void StringEncode() { - /*string str = "Hello, TensorFlow.NET!"; + string str = "Hello, TensorFlow.NET!"; var handle = Marshal.StringToHGlobalAnsi(str); - ulong dst_len = c_api.TF_StringEncodedSize((UIntPtr)str.Length); + ulong dst_len = (ulong)c_api.TF_StringEncodedSize((UIntPtr)str.Length); Assert.AreEqual(dst_len, (ulong)23); IntPtr dst = Marshal.AllocHGlobal((int)dst_len); ulong encoded_len = c_api.TF_StringEncode(handle, (ulong)str.Length, dst, dst_len, status); @@ -194,7 +196,7 @@ namespace TensorFlowNET.UnitTest Assert.AreEqual(status.Code, TF_Code.TF_OK); string encoded_str = Marshal.PtrToStringUTF8(dst + sizeof(byte)); Assert.AreEqual(encoded_str, str); - Assert.AreEqual(str.Length, Marshal.ReadByte(dst));*/ + Assert.AreEqual(str.Length, Marshal.ReadByte(dst)); //c_api.TF_StringDecode(dst, (ulong)str.Length, IntPtr.Zero, ref dst_len, status); } diff --git a/test/TensorFlowNET.UnitTest/GradientTest.cs b/test/TensorFlowNET.UnitTest/GradientTest.cs index 8497be1f..372715dc 100644 --- a/test/TensorFlowNET.UnitTest/GradientTest.cs +++ b/test/TensorFlowNET.UnitTest/GradientTest.cs @@ -33,7 +33,8 @@ namespace TensorFlowNET.UnitTest public void Gradient2x() { var graph = tf.Graph().as_default(); - with(tf.Session(graph), sess => { + using (var sess = tf.Session(graph)) + { var x = tf.constant(7.0f); var y = x * x * tf.constant(0.1f); @@ -42,14 +43,14 @@ namespace TensorFlowNET.UnitTest float r = sess.run(grad[0]); Assert.AreEqual(r, 1.4f); - }); + } } [TestMethod] public void Gradient3x() { var graph = tf.Graph().as_default(); - with(tf.Session(graph), sess => { + tf_with(tf.Session(graph), sess => { var x = tf.constant(7.0f); var y = x * x * x * tf.constant(0.1f); @@ -91,14 +92,14 @@ namespace TensorFlowNET.UnitTest var g = tf.gradients(y, new Tensor[] { slice, slice }); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { var r = sess.run(slice); Assert.IsTrue(Enumerable.SequenceEqual(r.shape, new[] { 2, 1, 2 })); Assert.IsTrue(Enumerable.SequenceEqual(r[0].GetData(), new[] { 11, 13 })); Assert.IsTrue(Enumerable.SequenceEqual(r[1].GetData(), new[] { 51, 53 })); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/GraphTest.cs b/test/TensorFlowNET.UnitTest/GraphTest.cs index e50e19f8..31abd561 100644 --- a/test/TensorFlowNET.UnitTest/GraphTest.cs +++ b/test/TensorFlowNET.UnitTest/GraphTest.cs @@ -419,7 +419,7 @@ namespace TensorFlowNET.UnitTest public void ImportGraphMeta() { var dir = "my-save-dir/"; - with(tf.Session(), sess => + using (var sess = tf.Session()) { var new_saver = tf.train.import_meta_graph(dir + "my-model-10000.meta"); new_saver.restore(sess, dir + "my-model-10000"); @@ -428,7 +428,7 @@ namespace TensorFlowNET.UnitTest var logits = (tf.get_collection("logits") as List)[0] as Tensor; var loss = tf.losses.sparse_softmax_cross_entropy(labels: labels, logits: logits); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/NameScopeTest.cs b/test/TensorFlowNET.UnitTest/NameScopeTest.cs index 8d364c8b..39401fe0 100644 --- a/test/TensorFlowNET.UnitTest/NameScopeTest.cs +++ b/test/TensorFlowNET.UnitTest/NameScopeTest.cs @@ -13,7 +13,7 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void NestedNameScope() { - with(new ops.NameScope("scope1"), scope1 => + tf_with(new ops.NameScope("scope1"), scope1 => { name = scope1; Assert.AreEqual("scope1", g._name_stack); @@ -22,7 +22,7 @@ namespace TensorFlowNET.UnitTest var const1 = tf.constant(1.0); Assert.AreEqual("scope1/Const:0", const1.name); - with(new ops.NameScope("scope2"), scope2 => + tf_with(new ops.NameScope("scope2"), scope2 => { name = scope2; Assert.AreEqual("scope1/scope2", g._name_stack); diff --git a/test/TensorFlowNET.UnitTest/OperationsTest.cs b/test/TensorFlowNET.UnitTest/OperationsTest.cs index 37f8d450..10046f0c 100644 --- a/test/TensorFlowNET.UnitTest/OperationsTest.cs +++ b/test/TensorFlowNET.UnitTest/OperationsTest.cs @@ -130,6 +130,22 @@ namespace TensorFlowNET.UnitTest } } + [TestMethod] + public void logicalAndTest() + { + var a = tf.constant(new[] {1f, 2f, 3f, 4f, -4f, -3f, -2f, -1f}); + var b = tf.less(a, 0f); + var c = tf.greater(a, 0f); + var d = tf.cast(tf.logical_and(b, c), tf.int32); + var check = np.array(new[] { 0, 0, 0, 0, 0, 0, 0, 0 }); + + using (var sess = tf.Session()) + { + var o = sess.run(d); + Assert.IsTrue(o.array_equal(check)); + } + } + [TestMethod] public void addOpTests() { @@ -467,5 +483,987 @@ namespace TensorFlowNET.UnitTest } #endregion } + + private IEnumerable MultiplyArray(IReadOnlyCollection first, IReadOnlyCollection second) + { + if(first.Count != second.Count) + throw new ArgumentException("Arrays should be of equal size!"); + + var firstEnumerator = first.GetEnumerator(); + var secondEnumerator = second.GetEnumerator(); + var result = new List(); + while (firstEnumerator.MoveNext()) + { + secondEnumerator.MoveNext(); + result.Add(firstEnumerator.Current * secondEnumerator.Current); + } + + firstEnumerator.Dispose(); + secondEnumerator.Dispose(); + + return result; + } + private IEnumerable MultiplyArray(IReadOnlyCollection first, IReadOnlyCollection second) + { + if(first.Count != second.Count) + throw new ArgumentException("Arrays should be of equal size!"); + + var firstEnumerator = first.GetEnumerator(); + var secondEnumerator = second.GetEnumerator(); + var result = new List(); + while (firstEnumerator.MoveNext()) + { + secondEnumerator.MoveNext(); + result.Add(firstEnumerator.Current * secondEnumerator.Current); + } + + firstEnumerator.Dispose(); + secondEnumerator.Dispose(); + + return result; + } + private IEnumerable MultiplyArray(IReadOnlyCollection first, IReadOnlyCollection second) + { + if(first.Count != second.Count) + throw new ArgumentException("Arrays should be of equal size!"); + + var firstEnumerator = first.GetEnumerator(); + var secondEnumerator = second.GetEnumerator(); + var result = new List(); + while (firstEnumerator.MoveNext()) + { + secondEnumerator.MoveNext(); + result.Add(firstEnumerator.Current * secondEnumerator.Current); + } + + firstEnumerator.Dispose(); + secondEnumerator.Dispose(); + + return result; + } + + [TestMethod] + public void mulOpTests() + { + const int rows = 2; // to avoid broadcasting effect + const int cols = 10; + + #region intTest + const int firstIntVal = 2; + const int secondIntVal = 3; + + var firstIntFeed = Enumerable.Repeat(firstIntVal, rows * cols).ToArray(); + var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); + var intResult = MultiplyArray(firstIntFeed, secondIntFeed).Sum(); + + var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator *(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator *(Tensor x, int y) + c = tf.reduce_sum(tf.reduce_sum(a * secondIntVal, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator *(int x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(firstIntVal * b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + #endregion + + #region floatTest + const float firstFloatVal = 2.0f; + const float secondFloatVal = 3.0f; + + var firstFloatFeed = Enumerable.Repeat(firstFloatVal, rows * cols).ToArray(); + var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); + var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed).Sum(); + + a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((float)o, floatResult); + } + + // Testing `operator *(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((float)o, floatResult); + } + + // Testing `operator *(Tensor x, float y) + c = tf.reduce_sum(tf.reduce_sum(a * secondFloatVal, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((float)o, floatResult); + } + + // Testing `operator *(float x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(firstFloatVal * b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((float)o, floatResult); + } + #endregion + + #region doubleTest + const double firstDoubleVal = 2.0; + const double secondDoubleVal = 3.0; + + var firstDoubleFeed = Enumerable.Repeat(firstDoubleVal, rows * cols).ToArray(); + var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); + var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed).Sum(); + + a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((double)o, doubleResult); + } + + // Testing `operator *(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((double)o, doubleResult); + } + + // Testing `operator *(Tensor x, double y) + c = tf.reduce_sum(tf.reduce_sum(a * secondFloatVal, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((double)o, doubleResult); + } + + // Testing `operator *(double x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(firstFloatVal * b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((double) o, doubleResult); + } + #endregion + } + + [TestMethod] + public void divOpTests() + { + const int rows = 2; // to avoid broadcasting effect + const int cols = 10; + + #region intTest + const int firstIntVal = 6; + const int secondIntVal = 3; + + var firstIntFeed = Enumerable.Repeat(firstIntVal, rows * cols).ToArray(); + var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); + var intResult = (int)(firstIntFeed.Sum() / (float)secondIntVal); + + var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var c = tf.reduce_sum(tf.reduce_sum(gen_math_ops.floor_div(a, b), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator /(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator /(Tensor x, int y) + c = tf.reduce_sum(tf.reduce_sum(a / secondIntVal, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator /(int x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(firstIntVal / b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + #endregion + + #region floatTest + const float firstFloatVal = 6.0f; + const float secondFloatVal = 3.0f; + + var firstFloatFeed = Enumerable.Repeat(firstFloatVal, rows * cols).ToArray(); + var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); + var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed.Select(x => 1/x).ToArray()).Sum(); + + a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.divide(a, b), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((float)o, floatResult); + } + + // Testing `operator /(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((float)o, floatResult); + } + + // Testing `operator /(Tensor x, float y) + c = tf.reduce_sum(tf.reduce_sum(a / secondFloatVal, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((float)o, floatResult); + } + + // Testing `operator /(float x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(firstFloatVal / b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((float)o, floatResult); + } + #endregion + + #region doubleTest + const double firstDoubleVal = 6.0; + const double secondDoubleVal = 3.0; + + var firstDoubleFeed = Enumerable.Repeat(firstDoubleVal, rows * cols).ToArray(); + var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); + var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed.Select(x => 1/x).ToArray()).Sum(); + + a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.divide(a, b), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((double)o, doubleResult); + } + + // Testing `operator /(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((double)o, doubleResult); + } + + // Testing `operator /(Tensor x, double y) + c = tf.reduce_sum(tf.reduce_sum(a / secondFloatVal, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((double)o, doubleResult); + } + + // Testing `operator /(double x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(firstFloatVal / b, 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((double)o, doubleResult); + } + #endregion + } + + [TestMethod] + public void greaterThanOpTests() + { + const int rows = 2; // to avoid broadcasting effect + const int cols = 10; + + #region intTest + const int intThreshold = 10; + + var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); + var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); + var intResult = firstIntFeed.Count(elem => elem > intThreshold); + var intResultTwo = firstIntFeed.Count(elem => elem < intThreshold); + + var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator >(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator >(Tensor x, int y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > intThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator >(int x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold > a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResultTwo); + } + #endregion + + #region floatTest + const float floatThreshold = 10.0f; + + var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); + var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); + var floatResult = firstFloatFeed.Count(elem => elem > floatThreshold); + var floatResultTwo = firstFloatFeed.Count(elem => elem < floatThreshold); + + a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator >(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator >(Tensor x, float y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > floatThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator >(float x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold > a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResultTwo); + } + #endregion + + #region doubleTest + const double doubleThreshold = 10.0; + + var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); + var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); + var doubleResult = firstDoubleFeed.Count(elem => elem > doubleThreshold); + var doubleResultTwo = firstDoubleFeed.Count(elem => elem < doubleThreshold); + + a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator >(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator >(Tensor x, double y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > doubleThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator >(double x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold > a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResultTwo); + } + #endregion + } + + [TestMethod] + public void lessThanOpTests() + { + const int rows = 2; // to avoid broadcasting effect + const int cols = 10; + + #region intTest + const int intThreshold = 10; + + var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); + var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); + var intResult = firstIntFeed.Count(elem => elem < intThreshold); + var intResultTwo = firstIntFeed.Count(elem => elem > intThreshold); + + var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator <(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator <(Tensor x, int y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < intThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator <(int x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold < a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResultTwo); + } + #endregion + + #region floatTest + const float floatThreshold = 10.0f; + + var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); + var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); + var floatResult = firstFloatFeed.Count(elem => elem < floatThreshold); + var floatResultTwo = firstFloatFeed.Count(elem => elem > floatThreshold); + + a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator <(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator <(Tensor x, float y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < floatThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator <(float x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold < a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResultTwo); + } + #endregion + + #region doubleTest + const double doubleThreshold = 10.0; + + var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); + var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); + var doubleResult = firstDoubleFeed.Count(elem => elem < doubleThreshold); + var doubleResultTwo = firstDoubleFeed.Count(elem => elem > doubleThreshold); + + a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator <(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator <(Tensor x, double y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < doubleThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator <(double x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold < a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResultTwo); + } + #endregion + } + + [TestMethod] + public void greaterOrEqualThanOpTests() + { + const int rows = 2; // to avoid broadcasting effect + const int cols = 10; + + #region intTest + const int intThreshold = 10; + + var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); + var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); + var intResult = firstIntFeed.Count(elem => elem >= intThreshold); + var intResultTwo = firstIntFeed.Count(elem => elem <= intThreshold); + + var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator >=(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator >=(Tensor x, int y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= intThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator >=(int x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold >= a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResultTwo); + } + #endregion + + #region floatTest + const float floatThreshold = 10.0f; + + var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); + var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); + var floatResult = firstFloatFeed.Count(elem => elem >= floatThreshold); + var floatResultTwo = firstFloatFeed.Count(elem => elem <= floatThreshold); + + a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator >=(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator >=(Tensor x, float y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= floatThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator >=(float x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold >= a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResultTwo); + } + #endregion + + #region doubleTest + const double doubleThreshold = 10.0; + + var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); + var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); + var doubleResult = firstDoubleFeed.Count(elem => elem >= doubleThreshold); + var doubleResultTwo = firstDoubleFeed.Count(elem => elem <= doubleThreshold); + + a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator >=(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator >=(Tensor x, double y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= doubleThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator >=(double x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold >= a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResultTwo); + } + #endregion + } + + [TestMethod] + public void lessOrEqualThanOpTests() + { + const int rows = 2; // to avoid broadcasting effect + const int cols = 10; + + #region intTest + const int intThreshold = 10; + + var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); + var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); + var intResult = firstIntFeed.Count(elem => elem <= intThreshold); + var intResultTwo = firstIntFeed.Count(elem => elem >= intThreshold); + + var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator <=(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator <=(Tensor x, int y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= intThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResult); + } + + // Testing `operator <=(int x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold <= a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, intResultTwo); + } + #endregion + + #region floatTest + const float floatThreshold = 10.0f; + + var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); + var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); + var floatResult = firstFloatFeed.Count(elem => elem <= floatThreshold); + var floatResultTwo = firstFloatFeed.Count(elem => elem >= floatThreshold); + + a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator <=(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator <=(Tensor x, float y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= floatThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResult); + } + + // Testing `operator <=(float x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold <= a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, floatResultTwo); + } + #endregion + + #region doubleTest + const double doubleThreshold = 10.0; + + var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); + var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); + var doubleResult = firstDoubleFeed.Count(elem => elem <= doubleThreshold); + var doubleResultTwo = firstDoubleFeed.Count(elem => elem >= doubleThreshold); + + a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); + + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator <=(Tensor x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= b, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), + new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator <=(Tensor x, double y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= doubleThreshold, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResult); + } + + // Testing `operator <=(double x, Tensor y) + c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold <= a, tf.int32), 1)); + using (var sess = tf.Session()) + { + var o = sess.run(c, + new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); + Assert.AreEqual((int)o, doubleResultTwo); + } + #endregion + } } } diff --git a/test/TensorFlowNET.UnitTest/PlaceholderTest.cs b/test/TensorFlowNET.UnitTest/PlaceholderTest.cs index c5a575c1..a8cec568 100644 --- a/test/TensorFlowNET.UnitTest/PlaceholderTest.cs +++ b/test/TensorFlowNET.UnitTest/PlaceholderTest.cs @@ -13,12 +13,12 @@ namespace TensorFlowNET.UnitTest var x = tf.placeholder(tf.int32); var y = x * 3; - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(y, new FeedItem(x, 2)); Assert.AreEqual((int)result, 6); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/PythonTest.cs b/test/TensorFlowNET.UnitTest/PythonTest.cs index 6ea4fd19..d51ccc62 100644 --- a/test/TensorFlowNET.UnitTest/PythonTest.cs +++ b/test/TensorFlowNET.UnitTest/PythonTest.cs @@ -164,7 +164,7 @@ namespace TensorFlowNET.UnitTest // return self._eval_helper(tensors) // else: { - with(tf.Session(), s => + using (var sess = tf.Session()) { var ndarray=tensor.eval(); if (typeof(T) == typeof(double)) @@ -181,7 +181,8 @@ namespace TensorFlowNET.UnitTest { result = ndarray; } - }); + } + return (T)result; } } diff --git a/test/TensorFlowNET.UnitTest/SessionTest.cs b/test/TensorFlowNET.UnitTest/SessionTest.cs index 72e5e24a..51620b19 100644 --- a/test/TensorFlowNET.UnitTest/SessionTest.cs +++ b/test/TensorFlowNET.UnitTest/SessionTest.cs @@ -82,11 +82,11 @@ namespace TensorFlowNET.UnitTest var a = constant_op.constant(np.array(3.0).reshape(1, 1)); var b = constant_op.constant(np.array(2.0).reshape(1, 1)); var c = math_ops.matmul(a, b, name: "matmul"); - with(tf.Session(), delegate + using (var sess = tf.Session()) { var result = c.eval(); Assert.AreEqual(6, result.Data()[0]); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/TensorTest.cs b/test/TensorFlowNET.UnitTest/TensorTest.cs index 9f4fff39..6666443c 100644 --- a/test/TensorFlowNET.UnitTest/TensorTest.cs +++ b/test/TensorFlowNET.UnitTest/TensorTest.cs @@ -12,7 +12,7 @@ namespace TensorFlowNET.UnitTest [TestClass] public class TensorTest : CApiTest { - [TestMethod] + [Ignore("Not for mult-thread")] public void TensorDeallocationThreadSafety() { var tensors = new Tensor[1000]; @@ -77,14 +77,14 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void AllocateTensor() { - /*ulong num_bytes = 6 * sizeof(float); + ulong num_bytes = 6 * sizeof(float); long[] dims = { 2, 3 }; Tensor t = c_api.TF_AllocateTensor(TF_DataType.TF_FLOAT, dims, 2, num_bytes); EXPECT_EQ(TF_DataType.TF_FLOAT, t.dtype); EXPECT_EQ(2, t.NDims); - Assert.IsTrue(Enumerable.SequenceEqual(dims, t.shape)); + EXPECT_EQ((int)dims[0], t.shape[0]); EXPECT_EQ(num_bytes, t.bytesize); - t.Dispose();*/ + t.Dispose(); } diff --git a/test/TensorFlowNET.UnitTest/TrainSaverTest.cs b/test/TensorFlowNET.UnitTest/TrainSaverTest.cs index c437c862..625ca874 100644 --- a/test/TensorFlowNET.UnitTest/TrainSaverTest.cs +++ b/test/TensorFlowNET.UnitTest/TrainSaverTest.cs @@ -17,10 +17,10 @@ namespace TensorFlowNET.UnitTest public void ImportGraph() { - with(tf.Session(), sess => + using (var sess = tf.Session()) { var new_saver = tf.train.import_meta_graph("C:/tmp/my-model.meta"); - }); + } //tf.train.export_meta_graph(filename: "linear_regression.meta.bin"); // import meta @@ -42,7 +42,7 @@ namespace TensorFlowNET.UnitTest public void ImportSavedModel() { - with(Session.LoadFromSavedModel("mobilenet"), sess => + tf_with(Session.LoadFromSavedModel("mobilenet"), sess => { }); @@ -63,14 +63,14 @@ namespace TensorFlowNET.UnitTest // Add ops to save and restore all the variables. var saver = tf.train.Saver(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { sess.run(init_op); // Save the variables to disk. var save_path = saver.save(sess, "/tmp/model1.ckpt"); Console.WriteLine($"Model saved in path: {save_path}"); - }); + } } public void Save2() @@ -87,7 +87,7 @@ namespace TensorFlowNET.UnitTest // Add ops to save and restore all the variables. var saver = tf.train.Saver(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { sess.run(init_op); // o some work with the model. @@ -97,7 +97,7 @@ namespace TensorFlowNET.UnitTest // Save the variables to disk. var save_path = saver.save(sess, "/tmp/model2.ckpt"); Console.WriteLine($"Model saved in path: {save_path}"); - }); + } } } } diff --git a/test/TensorFlowNET.UnitTest/VariableTest.cs b/test/TensorFlowNET.UnitTest/VariableTest.cs index a353bcc1..e170bcab 100644 --- a/test/TensorFlowNET.UnitTest/VariableTest.cs +++ b/test/TensorFlowNET.UnitTest/VariableTest.cs @@ -35,9 +35,9 @@ namespace TensorFlowNET.UnitTest public void VarCreation() { tf.Graph().as_default(); - with(tf.variable_scope("foo"), delegate + tf_with(tf.variable_scope("foo"), delegate { - with(tf.variable_scope("bar"), delegate + tf_with(tf.variable_scope("bar"), delegate { var v = tf.get_variable("v", new TensorShape(1)); Assert.AreEqual(v.name, "foo/bar/v:0"); @@ -53,14 +53,14 @@ namespace TensorFlowNET.UnitTest { tf.Graph().as_default(); variable_scope vs = null; - with(tf.variable_scope("foo"), v => vs = v); + tf_with(tf.variable_scope("foo"), v => vs = v); // Re-enter the variable scope. - with(tf.variable_scope(vs, auxiliary_name_scope: false), v => + tf_with(tf.variable_scope(vs, auxiliary_name_scope: false), v => { var vs1 = (VariableScope)v; // Restore the original name_scope. - with(tf.name_scope(vs1.original_name_scope), delegate + tf_with(tf.name_scope(vs1.original_name_scope), delegate { var v1 = tf.get_variable("v", new TensorShape(1)); Assert.AreEqual(v1.name, "foo/v:0"); @@ -89,21 +89,20 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void Assign1() { - with(tf.Graph().as_default(), graph => - { - var variable = tf.Variable(31, name: "tree"); - var init = tf.global_variables_initializer(); + var graph = tf.Graph().as_default(); - var sess = tf.Session(graph); - sess.run(init); + var variable = tf.Variable(31, name: "tree"); + var init = tf.global_variables_initializer(); - var result = sess.run(variable); - Assert.IsTrue((int)result == 31); + var sess = tf.Session(graph); + sess.run(init); - var assign = variable.assign(12); - result = sess.run(assign); - Assert.IsTrue((int)result == 12); - }); + var result = sess.run(variable); + Assert.IsTrue((int)result == 31); + + var assign = variable.assign(12); + result = sess.run(assign); + Assert.IsTrue((int)result == 12); } [TestMethod] @@ -115,12 +114,12 @@ namespace TensorFlowNET.UnitTest // Add an op to initialize the variables. var init_op = tf.global_variables_initializer(); - with(tf.Session(), sess => + using (var sess = tf.Session()) { sess.run(init_op); // o some work with the model. inc_v1.op.run(); - }); + } } /// @@ -129,6 +128,7 @@ namespace TensorFlowNET.UnitTest [TestMethod] public void Add() { + tf.Graph().as_default(); int result = 0; Tensor x = tf.Variable(10, name: "x"); diff --git a/test/TensorFlowNET.UnitTest/c_test_util.cs b/test/TensorFlowNET.UnitTest/c_test_util.cs index c75bc616..1b6909e7 100644 --- a/test/TensorFlowNET.UnitTest/c_test_util.cs +++ b/test/TensorFlowNET.UnitTest/c_test_util.cs @@ -37,14 +37,13 @@ namespace TensorFlowNET.UnitTest public static GraphDef GetGraphDef(Graph graph) { - var s = new Status(); - var buffer = new Buffer(); - c_api.TF_GraphToGraphDef(graph, buffer, s); - s.Check(); - var def = GraphDef.Parser.ParseFrom(buffer); - buffer.Dispose(); - s.Dispose(); - return def; + using (var s = new Status()) + using (var buffer = new Buffer()) + { + c_api.TF_GraphToGraphDef(graph, buffer, s); + s.Check(); + return GraphDef.Parser.ParseFrom(buffer); + } } public static bool IsAddN(NodeDef node_def, int n) diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs index a116c91a..94686049 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs @@ -16,7 +16,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test { var graph = tf.Graph().as_default(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { var x = tf.constant(2, name: "x"); var y = tf.constant(5, name: "y"); @@ -27,7 +27,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test int result = z.eval(sess); assertEquals(result, 22); - }); + } } [Ignore("need tesnroflow expose AddControlInput API")] @@ -36,7 +36,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test { var graph = tf.Graph().as_default(); - with(tf.Session(graph), sess => + using (var sess = tf.Session(graph)) { var x = tf.constant(2, name: "x"); var y = tf.constant(1, name: "y"); @@ -47,7 +47,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test int result = z.eval(sess); assertEquals(result, 11); - }); + } } [Ignore("need tesnroflow expose AddControlInput API")] diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs index 47cb397a..682b826f 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs @@ -11,7 +11,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test private void _testWhileContextHelper(int? maximum_iterations = null) { // TODO: implement missing code dependencies - with(this.cached_session(), sess => + using (var sess = this.cached_session()) { var i = constant_op.constant(0, name: "i"); var c = new Func(x => gen_math_ops.less(x, 10, name: "c")); @@ -26,7 +26,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test WhileContext.from_proto( control_flow_context.to_proto()).to_proto(), "");*/ } - }); + } } [Ignore("TODO")] diff --git a/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs b/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs index d1249cf0..68ee14e4 100644 --- a/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs +++ b/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs @@ -15,20 +15,18 @@ namespace TensorFlowNET.UnitTest.gradients_test [TestMethod] public void testGradients() { - with(tf.Graph().as_default(), g => - { - var inp = tf.constant(1.0, shape: new[] { 32, 100 }, name: "in"); - var w = tf.constant(1.0, shape: new[] { 100, 10 }, name: "w"); - var b = tf.constant(1.0, shape: new[] { 10 }, name: "b"); - var xw = math_ops.matmul(inp, w, name: "xw"); - var h = nn_ops.bias_add(xw, b, name: "h"); - var w_grad = gradients_impl.gradients(new[] { h }, new[] { w })[0]; - self.assertEquals("MatMul", w_grad.op.type); - // TODO: Operation._original_op - //self.assertEquals(w_grad.op._original_op, xw.op); - self.assertTrue((bool)w_grad.op.get_attr("transpose_a")); - self.assertFalse((bool)w_grad.op.get_attr("transpose_b")); - }); + var g = tf.Graph().as_default(); + var inp = tf.constant(1.0, shape: new[] { 32, 100 }, name: "in"); + var w = tf.constant(1.0, shape: new[] { 100, 10 }, name: "w"); + var b = tf.constant(1.0, shape: new[] { 10 }, name: "b"); + var xw = math_ops.matmul(inp, w, name: "xw"); + var h = nn_ops.bias_add(xw, b, name: "h"); + var w_grad = gradients_impl.gradients(new[] { h }, new[] { w })[0]; + self.assertEquals("MatMul", w_grad.op.type); + // TODO: Operation._original_op + //self.assertEquals(w_grad.op._original_op, xw.op); + self.assertTrue((bool)w_grad.op.get_attr("transpose_a")); + self.assertFalse((bool)w_grad.op.get_attr("transpose_b")); } [TestMethod] @@ -104,14 +102,14 @@ namespace TensorFlowNET.UnitTest.gradients_test tf.constant(new[] { 1 }, tf.int32, new[] { 1 }) ); var g = tf.gradients(b, a); - with(tf.Session(), sess => + using (var sess = tf.Session()) { var result = sess.run(new object[] { g, b }); var actualDeriv = np.squeeze(result[0]); var actual = np.squeeze(result[1]); self.assertEquals(new float[] { 1, 0 }, new float[] { actualDeriv[0], actualDeriv[1] }); self.assertEquals(0.9640276f, (float)actual); - }); + } } [TestMethod] diff --git a/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs b/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs index 33a16ba8..47baeeb5 100644 --- a/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs +++ b/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs @@ -73,12 +73,13 @@ namespace TensorFlowNET.UnitTest.nn_test { var value = array_ops.placeholder(dtype: dtypes.float32); var sparsity = nn_impl.zero_fraction(value); - with(self.cached_session(), sess => { + using (var sess = self.cached_session()) + { // TODO: make this compile - //self.assertAllClose( - // 0.25, - // sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); - }); + //self.assertAllClose( + // 0.25, + // sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); + } } diff --git a/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs b/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs index 21ee3f6d..8c8a89dd 100644 --- a/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs +++ b/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs @@ -16,19 +16,18 @@ namespace TensorFlowNET.UnitTest.ops_test [TestMethod] public void TestBasic() { - var graph = tf.Graph().as_default(); + var g = tf.Graph().as_default(); Tensor a = null, b = null, c = null, d = null, e = null; - with(graph, g => + + a = constant_op.constant(1.0); + b = constant_op.constant(1.0); + tf_with(g.control_dependencies(new[] { a }), x => { - a = constant_op.constant(1.0); - b = constant_op.constant(1.0); - with(g.control_dependencies(new[] { a }), x => - { - c = constant_op.constant(1.0); - d = array_ops.identity(b); - e = array_ops.identity(c); - }); + c = constant_op.constant(1.0); + d = array_ops.identity(b); + e = array_ops.identity(c); }); + Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op })); Assert.IsTrue(Enumerable.SequenceEqual(d.op.control_inputs, new[] { a.op })); // e should be dominated by c. @@ -56,7 +55,7 @@ namespace TensorFlowNET.UnitTest.ops_test // TODO: make this compile (see original Python code below) a = constant_op.constant(1.0); b = future; // <--- {henon} obviously, this doesn't compile, looks like control_dependencies needs to be able to take callables as well. - with(ops.control_dependencies(new object[] { a, b }), ctrl => + tf_with(ops.control_dependencies(new object[] { a, b }), ctrl => { return c = constant_op.constant(3.0); }); @@ -64,19 +63,15 @@ namespace TensorFlowNET.UnitTest.ops_test } else { - var graph = tf.Graph().as_default(); - with(graph, g => + var g = tf.Graph().as_default(); + a = constant_op.constant(1.0); + var b1 = future(); + tf_with(g.control_dependencies(new[] { a, b }), ctrl => { - a = constant_op.constant(1.0); - var b1 = future(); - with(g.control_dependencies(new[] { a, b }), ctrl => - { - c = constant_op.constant(3.0); - }); - Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); - Assert.AreEqual(1, calls); + c = constant_op.constant(3.0); }); - + Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); + Assert.AreEqual(1, calls); } } /* @@ -139,17 +134,17 @@ namespace TensorFlowNET.UnitTest.ops_test var a_3 = constant_op.constant(4.0); var a_4 = constant_op.constant(5.0); Tensor b_1 = null, b_2 = null; - with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => + tf_with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => { b_1 = constant_op.constant(6.0); }); - with(g.control_dependencies(new[] { a_1 }), ctrl1 => + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => { - with(g.control_dependencies(new[] { a_2 }), ctrl2 => + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => { - with(g.control_dependencies(new[] { a_3 }), ctrl3 => + tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => { - with(g.control_dependencies(new[] { a_4 }), ctrl4 => + tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => { b_2 = constant_op.constant(7.0); }); @@ -175,15 +170,15 @@ namespace TensorFlowNET.UnitTest.ops_test var a_3 = constant_op.constant(4.0); var a_4 = constant_op.constant(5.0); Operation b_3_4 = null, b_3 = null, b_none = null, b_1 = null, b_1_2 = null, b_none2 = null; - with(g.control_dependencies(new[] { a_1 }), ctrl1 => + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => { - with(g.control_dependencies(new[] { a_2 }), ctrl2 => + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => { - with(g.control_dependencies(null), ctrl3 => + tf_with(g.control_dependencies(null), ctrl3 => { - with(g.control_dependencies(new[] { a_3 }), ctrl4 => + tf_with(g.control_dependencies(new[] { a_3 }), ctrl4 => { - with(g.control_dependencies(new[] { a_4 }), ctrl5 => + tf_with(g.control_dependencies(new[] { a_4 }), ctrl5 => { // deps [a_3, a_4] b_3_4 = constant_op.constant(7.0); @@ -199,7 +194,7 @@ namespace TensorFlowNET.UnitTest.ops_test }); // deps back to [a_1] b_1 = constant_op.constant(11.0); - with(g.control_dependencies(null), ctrl6 => + tf_with(g.control_dependencies(null), ctrl6 => { // deps are None again b_none2 = constant_op.constant(12.0); @@ -233,25 +228,25 @@ namespace TensorFlowNET.UnitTest.ops_test Operation c_1 = null, c_2 = null, c_3 = null, c_4 = null; Operation d_1 = null, d_2 = null, d_3 = null, d_4 = null; Operation e_1 = null, e_2 = null, e_3 = null, e_4 = null; - with(g.control_dependencies(new[] { a_1 }), ctrl1 => + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => { b_1 = tf.multiply(a_3, a_4); c_1 = tf.multiply(a_1, b_1.output); d_1 = tf.multiply(b_1.output, c_1.output); e_1 = constant_op.constant(5.0); - with(g.control_dependencies(new[] { a_2 }), ctrl2 => + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => { b_2 = tf.multiply(a_3, a_4); c_2 = tf.multiply(a_1, b_1.output); d_2 = tf.multiply(b_2.output, c_2.output); e_2 = tf.multiply(e_1.output, e_1.output); - with(g.control_dependencies(new[] { a_3 }), ctrl3 => + tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => { b_3 = tf.multiply(a_3, a_4); c_3 = tf.multiply(a_1, b_1.output); d_3 = tf.multiply(b_3.output, c_3.output); e_3 = tf.multiply(e_2.output, e_2.output); - with(g.control_dependencies(new[] { a_4 }), ctrl4 => + tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => { b_4 = tf.multiply(a_3, a_4); c_4 = tf.multiply(a_1, b_1.output); @@ -310,7 +305,7 @@ namespace TensorFlowNET.UnitTest.ops_test var g = tf.Graph().as_default(); Operation b = null; var a = constant_op.constant(100.0); - with(g.control_dependencies(new[] { a }), ctrl1 => + tf_with(g.control_dependencies(new[] { a }), ctrl1 => { b = array_ops.identity(a); }); diff --git a/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs b/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs index a961d7ec..dcaeaf11 100644 --- a/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs +++ b/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs @@ -24,81 +24,73 @@ namespace TensorFlowNET.UnitTest.ops_test [TestMethod] public void TestShape() { - var graph = tf.Graph().as_default(); - with(graph, g => - { - var x = constant_op.constant(new[,] { { 1, 2, 3 }, { 4, 5, 6 } }); - var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] { x }, new Operation[0]); - var op = g._create_op_from_tf_operation(c_op); - - Assert.AreEqual("myop", op.name); - Assert.AreEqual("Identity", op.type); - Assert.AreEqual(1, len(op.outputs)); - assertItemsEqual(new[] { 2, 3 }, op.outputs[0].shape); - }); + var g = tf.Graph().as_default(); + + var x = constant_op.constant(new[,] { { 1, 2, 3 }, { 4, 5, 6 } }); + var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] { x }, new Operation[0]); + var op = g._create_op_from_tf_operation(c_op); + + Assert.AreEqual("myop", op.name); + Assert.AreEqual("Identity", op.type); + Assert.AreEqual(1, len(op.outputs)); + assertItemsEqual(new[] { 2, 3 }, op.outputs[0].shape); } [TestMethod] public void TestUniqueName() { var graph = tf.Graph().as_default(); - with(graph, g => - { - //var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); - //var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); - //var op = g._create_op_from_tf_operation(c_op); - //var op2 = g._create_op_from_tf_operation(c_op2); - var op = constant_op.constant(0, name: "myop").op; - var op2 = constant_op.constant(0, name: "myop_1").op; - - // Create ops with same names as op1 and op2. We expect the new names to be - // uniquified. - var op3 = constant_op.constant(0, name: "myop").op; - var op4 = constant_op.constant(0, name: "myop_1").op; - - self.assertEqual(op.name, "myop"); - self.assertEqual(op2.name, "myop_1"); - self.assertEqual(op3.name, "myop_2"); - self.assertEqual(op4.name, "myop_1_1"); - }); + //var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); + //var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); + //var op = g._create_op_from_tf_operation(c_op); + //var op2 = g._create_op_from_tf_operation(c_op2); + var op = constant_op.constant(0, name: "myop").op; + var op2 = constant_op.constant(0, name: "myop_1").op; + + // Create ops with same names as op1 and op2. We expect the new names to be + // uniquified. + var op3 = constant_op.constant(0, name: "myop").op; + var op4 = constant_op.constant(0, name: "myop_1").op; + + self.assertEqual(op.name, "myop"); + self.assertEqual(op2.name, "myop_1"); + self.assertEqual(op3.name, "myop_2"); + self.assertEqual(op4.name, "myop_1_1"); } [Ignore("need tesnroflow expose UpdateEdge API")] [TestMethod] public void TestCond() { - var graph = tf.Graph().as_default(); - with(graph, g => + var g = tf.Graph().as_default(); + var x = constant_op.constant(10); + + var true_fn = new Func(() => { - var x = constant_op.constant(10); - - var true_fn = new Func(() => - { - var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); - var new_ops = g._add_new_tf_operations(); - self.assertEqual(len(new_ops), 1); - return x; - }); - - control_flow_ops.cond(x < 10, true_fn, () => x); - - var op = g.get_operation_by_name("cond/myop"); - - //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); - //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); - - self.assertIsNotNone(op); - self.assertEqual(op.name, "cond/myop"); - self.assertEqual(op.type, "Identity"); - //self.assertEqual(op.outputs, new object[0]); - var op_input = op.inputs[0].op; - self.assertEqual(op_input.type, "Switch"); - self.assertEqual(op_input.inputs[0].name, x.name); - self.assertEqual(op.graph, g); - self.assertIsNotNone(op._get_control_flow_context()); - var cond_text = op._get_control_flow_context() as ControlFlowContext; - self.assertEqual(cond_text.name, "cond/cond_text"); + var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); + var new_ops = g._add_new_tf_operations(); + self.assertEqual(len(new_ops), 1); + return x; }); + + control_flow_ops.cond(x < 10, true_fn, () => x); + + var op = g.get_operation_by_name("cond/myop"); + + //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); + //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); + + self.assertIsNotNone(op); + self.assertEqual(op.name, "cond/myop"); + self.assertEqual(op.type, "Identity"); + //self.assertEqual(op.outputs, new object[0]); + var op_input = op.inputs[0].op; + self.assertEqual(op_input.type, "Switch"); + self.assertEqual(op_input.inputs[0].name, x.name); + self.assertEqual(op.graph, g); + self.assertIsNotNone(op._get_control_flow_context()); + var cond_text = op._get_control_flow_context() as ControlFlowContext; + self.assertEqual(cond_text.name, "cond/cond_text"); } [Ignore("Todo: Port")] @@ -107,20 +99,17 @@ namespace TensorFlowNET.UnitTest.ops_test { var graph = tf.Graph().as_default(); Operation x=null; - with(graph, g => + x = constant_op.constant(42); + var body = new Func(i => { - x = constant_op.constant(42); - var body = new Func(i => - { - ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, - new Operation[0]); - var new_ops = g._add_new_tf_operations(); - self.assertEqual(len(new_ops), 1); - return i; - }); - // TODO: port control_flow_ops.while_loop - //control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); + ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, + new Operation[0]); + var new_ops = graph._add_new_tf_operations(); + self.assertEqual(len(new_ops), 1); + return i; }); + // TODO: port control_flow_ops.while_loop + //control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); var op = graph.get_operation_by_name("myloop/myop"); self.assertIsNotNone(op); self.assertEqual(op.name, "myloop/myop");