diff --git a/.gitignore b/.gitignore
index eee1dc7b..ce600fbb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -332,3 +332,7 @@ src/TensorFlowNET.Native/bazel-*
src/TensorFlowNET.Native/c_api.h
/.vscode
test/TensorFlowNET.Examples/mnist
+
+
+# training model resources
+.resources
diff --git a/README.md b/README.md
index 7f7d14a4..9cf23da2 100644
--- a/README.md
+++ b/README.md
@@ -28,8 +28,14 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr
Install TF.NET and TensorFlow binary through NuGet.
```sh
+### install tensorflow C# binding
PM> Install-Package TensorFlow.NET
+
+### Install tensorflow binary
+### For CPU version
PM> Install-Package SciSharp.TensorFlow.Redist
+### For GPU version (CUDA and cuDNN are required)
+PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
```
Import TF.NET.
diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln
index 5d6e5fe7..689965c4 100644
--- a/TensorFlow.NET.sln
+++ b/TensorFlow.NET.sln
@@ -17,7 +17,11 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowBenchmark", "src\
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}"
EndProject
-Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowDatasets", "src\TensorFlowDatasets\TensorFlowDatasets.csproj", "{DF151A51-E9FD-41BD-B0F4-08A743755D44}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples.GPU", "test\TensorFlowNET.Examples\TensorFlowNET.Examples.GPU.csproj", "{6F6B3382-8F87-4CD9-BF87-C81D5405685A}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
@@ -57,6 +61,14 @@ Global
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.Build.0 = Release|Any CPU
+ {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.Build.0 = Release|Any CPU
+ {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
diff --git a/src/KerasNET.Core/Model.cs b/src/KerasNET.Core/Model.cs
index b1e6de57..d1d05159 100644
--- a/src/KerasNET.Core/Model.cs
+++ b/src/KerasNET.Core/Model.cs
@@ -115,7 +115,7 @@ namespace Keras
var init = tf.global_variables_initializer();
float loss_value = 0;
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
sess.run(init);
var step = 0;
@@ -133,7 +133,7 @@ namespace Keras
Console.WriteLine($"Step {step} loss: {loss_value}");
}
Console.WriteLine($"Final loss: {loss_value}");
- });
+ }
return loss_value;
}
diff --git a/src/SciSharp.TensorFlow.Redist/README.md b/src/SciSharp.TensorFlow.Redist/README.md
index 3f75c4cf..5bdf82a1 100644
--- a/src/SciSharp.TensorFlow.Redist/README.md
+++ b/src/SciSharp.TensorFlow.Redist/README.md
@@ -1,8 +1,14 @@
## SciSharp.TensorFlow.Redist ##
-`SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.4.0 going forward.
+`SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.3.0 going forward.
+* CPU version for all platforms (Windows, Linux, OSX)
+```powershell
+PM> Install-Package SciSharp.TensorFlow.Redist
+```
+
+* GPU version for Windows
```powershell
PM> Install-Package SciSharp.TensorFlow.Redist
```
@@ -16,7 +22,7 @@ Related merged [commits](https://github.com/SciSharp/TensorFlow.NET/commit/854a5
On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries.
1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux.
-2. Run `nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json`
+2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json`
diff --git a/src/SciSharp.TensorFlow.Redist/Redist.nuspec b/src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec
similarity index 89%
rename from src/SciSharp.TensorFlow.Redist/Redist.nuspec
rename to src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec
index d2527c8b..11919e8c 100644
--- a/src/SciSharp.TensorFlow.Redist/Redist.nuspec
+++ b/src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec
@@ -9,7 +9,7 @@
LICENSE.txt
https://aka.ms/deprecateLicenseUrl
https://www.tensorflow.org/
- $packageId$ contains the TensorFlow C library version $version$ redistributed as a NuGet package.
+ $packageId$ contains the TensorFlow C library CPU version $version$ redistributed as a NuGet package.
https://github.com/tensorflow/tensorflow/releases/tag/v$version$
Copyright 2019 The TensorFlow Authors. All rights reserved.
TensorFlow
diff --git a/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec b/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec
new file mode 100644
index 00000000..f010c96b
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec
@@ -0,0 +1,26 @@
+
+
+
+ $packageId$
+ $version$
+ The TensorFlow Authors
+ The TensorFlow Authors
+ true
+ LICENSE.txt
+ https://aka.ms/deprecateLicenseUrl
+ https://www.tensorflow.org/
+ $packageId$ contains the TensorFlow C library GPU version $version$ redistributed as a NuGet package.
+ https://github.com/tensorflow/tensorflow/releases/tag/v$version$
+ Copyright 2019 The TensorFlow Authors. All rights reserved.
+ TensorFlow
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-CPU.nupkgproj
similarity index 99%
rename from src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj
rename to src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-CPU.nupkgproj
index a0ca0a0a..6a225ede 100644
--- a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj
+++ b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-CPU.nupkgproj
@@ -17,7 +17,7 @@
true
false
- Redist.nuspec
+ Redist-CPU.nuspec
packageId=$(PackageId);version=$(PackageVersion)
$(ProjDir)
diff --git a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj
new file mode 100644
index 00000000..08fd9386
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj
@@ -0,0 +1,187 @@
+
+
+
+ $(MSBuildThisFileDirectory)
+ $(ProjDir)bin\
+ $(ProjDir)obj\
+
+ x64
+ netstandard2.0
+ 1.14.0
+ 1
+
+ $(BinDir)packages\
+ $(MSBuildProjectName)
+ $(TensorFlowVersion)
+
+ true
+ false
+
+ Redist-Windows-GPU.nuspec
+ packageId=$(PackageId);version=$(PackageVersion)
+ $(ProjDir)
+
+ CopyFilesFromArchive
+
+ win
+ linux
+ osx
+ $(PackageRid)-$(TargetArchitecture)
+
+
+
+
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ @(FilesWithHashes->'%(FileHash)')
+ $([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", ""))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" />
+ <_fileFromArchive DestinationFile="%(FileName)%(Extension)"/>
+ <_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" />
+
+
+ <_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/SciSharp.TensorFlow.Redist/libtensorflow-gpu-windows-x86_64-1.14.0.zip.sha b/src/SciSharp.TensorFlow.Redist/libtensorflow-gpu-windows-x86_64-1.14.0.zip.sha
new file mode 100644
index 00000000..739129b1
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/libtensorflow-gpu-windows-x86_64-1.14.0.zip.sha
@@ -0,0 +1 @@
+850A27858FA951DF77A78CD1BD78B54F6EE2532DD5A49F0579A7B02C795C62F0212F20177EAEA2BD77BD451A57FBBD1348362492F9E14BFE5CA5028C71711293
diff --git a/src/TensorFlowDatasets/DatasetBuilder.cs b/src/TensorFlowDatasets/DatasetBuilder.cs
new file mode 100644
index 00000000..bfb78d6e
--- /dev/null
+++ b/src/TensorFlowDatasets/DatasetBuilder.cs
@@ -0,0 +1,24 @@
+using System;
+
+namespace TensorFlowDatasets
+{
+ ///
+ /// Abstract base class for all datasets.
+ ///
+ public class DatasetBuilder
+ {
+ ///
+ /// Downloads and prepares dataset for reading.
+ ///
+ ///
+ /// directory where downloaded files are stored.
+ ///
+ ///
+ /// further configuration for downloading and preparing dataset.
+ ///
+ public void download_and_prepare(string download_dir = null, DownloadConfig download_config = null)
+ {
+
+ }
+ }
+}
diff --git a/src/TensorFlowDatasets/DownloadConfig.cs b/src/TensorFlowDatasets/DownloadConfig.cs
new file mode 100644
index 00000000..0488e273
--- /dev/null
+++ b/src/TensorFlowDatasets/DownloadConfig.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace TensorFlowDatasets
+{
+ public class DownloadConfig
+ {
+ }
+}
diff --git a/src/TensorFlowDatasets/TensorFlowDatasets.csproj b/src/TensorFlowDatasets/TensorFlowDatasets.csproj
new file mode 100644
index 00000000..1b839c1f
--- /dev/null
+++ b/src/TensorFlowDatasets/TensorFlowDatasets.csproj
@@ -0,0 +1,19 @@
+
+
+
+ netcoreapp2.2
+ SciSharp.TensorFlowDatasets
+ 0.0.1
+ SciSharp Team
+ TensorFlow Datasets
+ true
+ https://avatars3.githubusercontent.com/u/44989469?s=200&v=4
+ http://scisharpstack.org
+ TensorFlow Datasets provides many public datasets as tf.data.Datasets.
+ https://github.com/SciSharp/TensorFlow.NET
+ git
+ SciSharp, Dataset, TensorFlow
+ Apache 2.0
+
+
+
diff --git a/src/TensorFlowHub/MnistDataSet.cs b/src/TensorFlowHub/MnistDataSet.cs
index e0717ccb..accc57e1 100644
--- a/src/TensorFlowHub/MnistDataSet.cs
+++ b/src/TensorFlowHub/MnistDataSet.cs
@@ -27,5 +27,54 @@ namespace Tensorflow.Hub
labels.astype(dataType);
Labels = labels;
}
+
+ public (NDArray, NDArray) GetNextBatch(int batch_size, bool fake_data = false, bool shuffle = true)
+ {
+ var start = IndexInEpoch;
+ // Shuffle for the first epoch
+ if(EpochsCompleted == 0 && start == 0 && shuffle)
+ {
+ var perm0 = np.arange(NumOfExamples);
+ np.random.shuffle(perm0);
+ Data = Data[perm0];
+ Labels = Labels[perm0];
+ }
+
+ // Go to the next epoch
+ if (start + batch_size > NumOfExamples)
+ {
+ // Finished epoch
+ EpochsCompleted += 1;
+
+ // Get the rest examples in this epoch
+ var rest_num_examples = NumOfExamples - start;
+ //var images_rest_part = _images[np.arange(start, _num_examples)];
+ //var labels_rest_part = _labels[np.arange(start, _num_examples)];
+ // Shuffle the data
+ if (shuffle)
+ {
+ var perm = np.arange(NumOfExamples);
+ np.random.shuffle(perm);
+ Data = Data[perm];
+ Labels = Labels[perm];
+ }
+
+ start = 0;
+ IndexInEpoch = batch_size - rest_num_examples;
+ var end = IndexInEpoch;
+ var images_new_part = Data[np.arange(start, end)];
+ var labels_new_part = Labels[np.arange(start, end)];
+
+ /*return (np.concatenate(new float[][] { images_rest_part.Data(), images_new_part.Data() }, axis: 0),
+ np.concatenate(new float[][] { labels_rest_part.Data(), labels_new_part.Data() }, axis: 0));*/
+ return (images_new_part, labels_new_part);
+ }
+ else
+ {
+ IndexInEpoch += batch_size;
+ var end = IndexInEpoch;
+ return (Data[np.arange(start, end)], Labels[np.arange(start, end)]);
+ }
+ }
}
}
diff --git a/src/TensorFlowHub/MnistModelLoader.cs b/src/TensorFlowHub/MnistModelLoader.cs
index 7c4ff109..121c0961 100644
--- a/src/TensorFlowHub/MnistModelLoader.cs
+++ b/src/TensorFlowHub/MnistModelLoader.cs
@@ -15,14 +15,26 @@ namespace Tensorflow.Hub
private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz";
private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz";
- public static async Task> LoadAsync(string trainDir, bool oneHot = false)
+ public static async Task> LoadAsync(string trainDir, bool oneHot = false, int? trainSize = null, int? validationSize = null, int? testSize = null)
{
var loader = new MnistModelLoader();
- return await loader.LoadAsync(new ModelLoadSetting
+
+ var setting = new ModelLoadSetting
{
TrainDir = trainDir,
OneHot = oneHot
- });
+ };
+
+ if (trainSize.HasValue)
+ setting.TrainSize = trainSize.Value;
+
+ if (validationSize.HasValue)
+ setting.ValidationSize = validationSize.Value;
+
+ if (testSize.HasValue)
+ setting.TestSize = testSize.Value;
+
+ return await loader.LoadAsync(setting);
}
public async Task> LoadAsync(ModelLoadSetting setting)
@@ -86,7 +98,7 @@ namespace Tensorflow.Hub
var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape);
var validation = new MnistDataSet(validationImages, validationLabels, dtype, reshape);
- var test = new MnistDataSet(trainImages, trainLabels, dtype, reshape);
+ var test = new MnistDataSet(testImages, testLabels, dtype, reshape);
return new Datasets(train, validation, test);
}
diff --git a/src/TensorFlowHub/Utils.cs b/src/TensorFlowHub/Utils.cs
index 10aaf958..72ee9430 100644
--- a/src/TensorFlowHub/Utils.cs
+++ b/src/TensorFlowHub/Utils.cs
@@ -25,13 +25,25 @@ namespace Tensorflow.Hub
if (!Path.IsPathRooted(dirSaveTo))
dirSaveTo = Path.Combine(AppContext.BaseDirectory, dirSaveTo);
- if (!Directory.Exists(dirSaveTo))
- Directory.CreateDirectory(dirSaveTo);
-
- using (var wc = new WebClient())
+ var fileSaveTo = Path.Combine(dirSaveTo, fileName);
+
+ if (File.Exists(fileSaveTo))
{
- await wc.DownloadFileTaskAsync(url, Path.Combine(dirSaveTo, fileName));
+ //TODO:maybe you can check file's hashcode and "donglowad.info" to complete file ...
+ Console.WriteLine($"{fileSaveTo} already exists.");
}
+ else
+ {
+ if (!Directory.Exists(dirSaveTo))
+ Directory.CreateDirectory(dirSaveTo);
+
+ using (var wc = new WebClient())
+ {
+ await wc.DownloadFileTaskAsync(url, fileSaveTo);
+ }
+
+ }
+
}
public static async Task UnzipAsync(this IModelLoader modelLoader, string zipFile, string saveTo)
@@ -42,7 +54,7 @@ namespace Tensorflow.Hub
if (!Directory.Exists(saveTo))
Directory.CreateDirectory(saveTo);
-
+
if (!Path.IsPathRooted(zipFile))
zipFile = Path.Combine(AppContext.BaseDirectory, zipFile);
@@ -78,7 +90,7 @@ namespace Tensorflow.Hub
var cts = new CancellationTokenSource();
var showProgressTask = ShowProgressInConsole(cts);
-
+
try
{
await task;
@@ -86,7 +98,7 @@ namespace Tensorflow.Hub
finally
{
cts.Cancel();
- }
+ }
}
private static async Task ShowProgressInConsole(CancellationTokenSource cts)
diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs
index 5586840c..fb65d31b 100644
--- a/src/TensorFlowNET.Core/APIs/tf.math.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.math.cs
@@ -189,6 +189,9 @@ namespace Tensorflow
public static Tensor log1p(Tensor x, string name = null)
=> gen_math_ops.log1p(x, name);
+ public static Tensor logical_and(Tensor x, Tensor y, string name = null)
+ => gen_math_ops.logical_and(x, y, name);
+
///
/// Clips tensor values to a specified min and max.
///
diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs
index 0bc9d0e5..67efe726 100644
--- a/src/TensorFlowNET.Core/APIs/tf.nn.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs
@@ -136,7 +136,7 @@ namespace Tensorflow
public static Tensor bias_add(Tensor value, RefVariable bias, string data_format = null, string name = null)
{
- return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope =>
+ return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope =>
{
name = scope;
return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name);
@@ -169,7 +169,7 @@ namespace Tensorflow
///
public static Tensor softmax_cross_entropy_with_logits(Tensor labels, Tensor logits, int dim = -1, string name = null)
{
- with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope =>
+ tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope =>
{
name = scope;
labels = array_ops.stop_gradient(labels, name: "labels_stop_gradient");
diff --git a/src/TensorFlowNET.Core/Buffers/Buffer.cs b/src/TensorFlowNET.Core/Buffers/Buffer.cs
index 0b73265d..dbe576b8 100644
--- a/src/TensorFlowNET.Core/Buffers/Buffer.cs
+++ b/src/TensorFlowNET.Core/Buffers/Buffer.cs
@@ -19,10 +19,8 @@ using System.Runtime.InteropServices;
namespace Tensorflow
{
- public class Buffer : IDisposable
+ public class Buffer : DisposableObject
{
- private IntPtr _handle;
-
private TF_Buffer buffer => Marshal.PtrToStructure(_handle);
public byte[] Data
@@ -30,8 +28,8 @@ namespace Tensorflow
get
{
var data = new byte[buffer.length];
- if (buffer.length > 0)
- Marshal.Copy(buffer.data, data, 0, (int)buffer.length);
+ if (data.Length > 0)
+ Marshal.Copy(buffer.data, data, 0, data.Length);
return data;
}
}
@@ -54,6 +52,8 @@ namespace Tensorflow
Marshal.Copy(data, 0, dst, data.Length);
_handle = c_api.TF_NewBufferFromString(dst, (ulong)data.Length);
+
+ Marshal.FreeHGlobal(dst);
}
public static implicit operator IntPtr(Buffer buffer)
@@ -66,9 +66,7 @@ namespace Tensorflow
return buffer.Data;
}
- public void Dispose()
- {
- c_api.TF_DeleteBuffer(_handle);
- }
+ protected override void DisposeUnManagedState(IntPtr handle)
+ => c_api.TF_DeleteBuffer(handle);
}
}
diff --git a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs
index a6253520..8112708f 100644
--- a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs
+++ b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs
@@ -68,7 +68,7 @@ namespace Tensorflow.Clustering
private Tensor _initialize()
{
- return with(ops.control_dependencies(new Operation[]
+ return tf_with(ops.control_dependencies(new Operation[]
{
check_ops.assert_positive(_num_remaining)
}), delegate
diff --git a/src/TensorFlowNET.Core/DisposableObject.cs b/src/TensorFlowNET.Core/DisposableObject.cs
new file mode 100644
index 00000000..7e416e6d
--- /dev/null
+++ b/src/TensorFlowNET.Core/DisposableObject.cs
@@ -0,0 +1,77 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow
+{
+ ///
+ /// Abstract class for disposable object allocated in unmanaged runtime.
+ ///
+ public abstract class DisposableObject : IDisposable
+ {
+ protected IntPtr _handle;
+
+ protected DisposableObject() { }
+
+ public DisposableObject(IntPtr handle)
+ {
+ _handle = handle;
+ }
+
+ protected virtual void DisposeManagedState()
+ {
+ }
+
+ protected abstract void DisposeUnManagedState(IntPtr handle);
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ // free unmanaged resources (unmanaged objects) and override a finalizer below.
+ if (_handle != IntPtr.Zero)
+ {
+ // dispose managed state (managed objects).
+ DisposeManagedState();
+
+ // set large fields to null.
+ DisposeUnManagedState(_handle);
+
+ _handle = IntPtr.Zero;
+ }
+ }
+ }
+
+ // override a finalizer only if Dispose(bool disposing) above has code to free unmanaged resources.
+ ~DisposableObject()
+ {
+ // Do not change this code. Put cleanup code in Dispose(bool disposing) above.
+ Dispose(false);
+ }
+
+ // This code added to correctly implement the disposable pattern.
+ public void Dispose()
+ {
+ // Do not change this code. Put cleanup code in Dispose(bool disposing) above.
+ Dispose(true);
+ // uncomment the following line if the finalizer is overridden above.
+ GC.SuppressFinalize(this);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Framework/c_api_util.cs b/src/TensorFlowNET.Core/Framework/c_api_util.cs
index 440cbf44..5d5cb9b3 100644
--- a/src/TensorFlowNET.Core/Framework/c_api_util.cs
+++ b/src/TensorFlowNET.Core/Framework/c_api_util.cs
@@ -128,7 +128,7 @@ namespace Tensorflow
IntPtr c_op;
while ((c_op = c_api.TF_GraphNextOperation(graph, ref pos)) != IntPtr.Zero)
{
- yield return c_op;
+ yield return new Operation(c_op, graph);
}
}
}
diff --git a/src/TensorFlowNET.Core/Framework/importer.py.cs b/src/TensorFlowNET.Core/Framework/importer.py.cs
index 577d41aa..0c405be9 100644
--- a/src/TensorFlowNET.Core/Framework/importer.py.cs
+++ b/src/TensorFlowNET.Core/Framework/importer.py.cs
@@ -42,7 +42,7 @@ namespace Tensorflow
string prefix = "";
var graph = ops.get_default_graph();
- with(ops.name_scope(name, "import", input_map.Values), scope =>
+ tf_with(ops.name_scope(name, "import", input_map.Values), scope =>
{
prefix = scope;
/*if (!string.IsNullOrEmpty(prefix))
diff --git a/src/TensorFlowNET.Core/Gradients/gradients_util.cs b/src/TensorFlowNET.Core/Gradients/gradients_util.cs
index 95f083da..43247fa4 100644
--- a/src/TensorFlowNET.Core/Gradients/gradients_util.cs
+++ b/src/TensorFlowNET.Core/Gradients/gradients_util.cs
@@ -55,7 +55,7 @@ namespace Tensorflow
**/
var grads = new Dictionary>>();
- with(ops.name_scope(name, "gradients",
+ tf_with(ops.name_scope(name, "gradients",
values: ys.Concat(xs).Concat(stop_gradients).Concat(grad_ys)), scope =>
{
string grad_scope = scope;
@@ -141,7 +141,7 @@ namespace Tensorflow
}
}
- with(ops.name_scope(op.name + "_grad"), scope1 =>
+ tf_with(ops.name_scope(op.name + "_grad"), scope1 =>
{
string name1 = scope1;
if (grad_fn != null)
diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs
index a84185f3..a5ac79ba 100644
--- a/src/TensorFlowNET.Core/Gradients/math_grad.cs
+++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs
@@ -90,7 +90,7 @@ namespace Tensorflow.Gradients
{
var grad = grads[0];
var y = op.outputs[0]; // y = e^x
- return with(ops.control_dependencies(new Operation[] { grad }), dp => {
+ return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => {
y = math_ops.conj(y);
return new Tensor[] { math_ops.mul_no_nan(y, grad) };
});
@@ -107,7 +107,7 @@ namespace Tensorflow.Gradients
{
var grad = grads[0];
var x = op.inputs[0];
- return with(ops.control_dependencies(new Operation[] { grad }), dp => {
+ return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => {
x = math_ops.conj(x);
return new Tensor[] { grad * math_ops.digamma(x) };
});
@@ -118,7 +118,7 @@ namespace Tensorflow.Gradients
{
var grad = grads[0];
var x = op.inputs[0];
- return with(ops.control_dependencies(new Operation[] { grad }), dp => {
+ return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => {
x = math_ops.conj(x);
return new Tensor[] { grad * math_ops.reciprocal(x) };
});
@@ -431,7 +431,7 @@ namespace Tensorflow.Gradients
var grad = grads[0];
var y = op.outputs[0];
- return with(ops.control_dependencies(grads), delegate
+ return tf_with(ops.control_dependencies(grads), delegate
{
y = math_ops.conj(y);
return new Tensor[] { gen_math_ops.sigmoid_grad(y, grad) };
@@ -453,7 +453,7 @@ namespace Tensorflow.Gradients
var grad = grads[0];
var x = op.inputs[0];
- return with(ops.control_dependencies(grads), delegate
+ return tf_with(ops.control_dependencies(grads), delegate
{
x = math_ops.conj(x);
var y = constant_op.constant(2.0f, dtype: x.dtype);
@@ -467,7 +467,7 @@ namespace Tensorflow.Gradients
var grad = grads[0];
var y = op.outputs[0];
- return with(ops.control_dependencies(grads), delegate
+ return tf_with(ops.control_dependencies(grads), delegate
{
y = math_ops.conj(y);
return new Tensor[] { gen_math_ops.tanh_grad(y, grad) };
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Export.cs b/src/TensorFlowNET.Core/Graphs/Graph.Export.cs
index 67b93191..17828c73 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.Export.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.Export.cs
@@ -22,7 +22,7 @@ namespace Tensorflow
{
var buffer = new Buffer();
c_api.TF_GraphToGraphDef(_handle, buffer, s);
- s.Check();
+ s.Check(true);
// var def = GraphDef.Parser.ParseFrom(buffer);
// buffer.Dispose();
@@ -31,8 +31,11 @@ namespace Tensorflow
private GraphDef _as_graph_def(bool add_shapes = false)
{
- var buffer = ToGraphDef(Status);
- Status.Check();
+ var status = new Status();
+ var buffer = ToGraphDef(status);
+ status.Check(true);
+ status.Dispose();
+
var def = GraphDef.Parser.ParseFrom(buffer);
buffer.Dispose();
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs
index 7fcfdbd7..af7ebfd1 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs
@@ -43,16 +43,20 @@ namespace Tensorflow
var bytes = File.ReadAllBytes(file_path);
var graph_def = new Tensorflow.Buffer(bytes);
var opts = c_api.TF_NewImportGraphDefOptions();
- c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, Status);
- return Status;
+ var status = new Status();
+ c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status);
+ return status;
}
- public Status Import(byte[] bytes)
+ public Status Import(byte[] bytes, string prefix = "")
{
var graph_def = new Tensorflow.Buffer(bytes);
var opts = c_api.TF_NewImportGraphDefOptions();
- c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, Status);
- return Status;
+ c_api.TF_ImportGraphDefOptionsSetPrefix(opts, prefix);
+ var status = new Status();
+ c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status);
+ c_api.TF_DeleteImportGraphDefOptions(opts);
+ return status;
}
public static Graph ImportFromPB(string file_path, string name = null)
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
index 06b65f03..09e09573 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
@@ -38,6 +38,31 @@ namespace Tensorflow
return c_api.TF_NewOperation(_handle, opType, opName);
}
+ public unsafe Operation[] ReturnOperations(IntPtr results)
+ {
+ TF_Operation return_oper_handle = new TF_Operation();
+ int num_return_opers = 0;
+ c_api.TF_ImportGraphDefResultsReturnOperations(results, ref num_return_opers, ref return_oper_handle);
+ Operation[] return_opers = new Operation[num_return_opers];
+ for (int i = 0; i < num_return_opers; i++)
+ {
+ var handle = return_oper_handle.node + Marshal.SizeOf() * i;
+ return_opers[i] = new Operation(*(IntPtr*)handle);
+ }
+
+ return return_opers;
+ }
+
+ public Operation OperationByName(string operName)
+ {
+ return c_api.TF_GraphOperationByName(_handle, operName);
+ }
+
+ public ITensorOrOperation[] get_operations()
+ {
+ return _nodes_by_name.Values.Select(x => x).ToArray();
+ }
+
///
/// Returns the `Operation` with the given `name`.
///
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs
index 82e83df1..7121e0be 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.cs
@@ -15,6 +15,7 @@
******************************************************************************/
using System;
+using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
@@ -72,7 +73,7 @@ namespace Tensorflow
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
*/
- public partial class Graph : IPython, IDisposable
+ public partial class Graph : IPython, IDisposable, IEnumerable
{
private IntPtr _handle;
private Dictionary _nodes_by_id;
@@ -87,8 +88,7 @@ namespace Tensorflow
private string _graph_key;
public string graph_key => _graph_key;
public string _last_loss_reduction;
- public bool _is_loss_scaled_by_optimizer { get; set; }
- public Status Status { get; }
+ public bool _is_loss_scaled_by_optimizer { get; set; }
///
/// True if the graph is considered "finalized". In that case no
@@ -106,7 +106,6 @@ namespace Tensorflow
public Graph()
{
_handle = c_api.TF_NewGraph();
- Status = new Status();
_nodes_by_id = new Dictionary();
_nodes_by_name = new Dictionary();
_names_in_use = new Dictionary();
@@ -116,11 +115,14 @@ namespace Tensorflow
public Graph(IntPtr handle)
{
_handle = handle;
- Status = new Status();
_nodes_by_id = new Dictionary();
_nodes_by_name = new Dictionary();
_names_in_use = new Dictionary();
_graph_key = $"grap-key-{ops.uid()}/";
+ }
+
+ public void __enter__()
+ {
}
public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true)
@@ -409,31 +411,6 @@ namespace Tensorflow
return return_outputs;
}
- public unsafe Operation[] ReturnOperations(IntPtr results)
- {
- TF_Operation return_oper_handle = new TF_Operation();
- int num_return_opers = 0;
- c_api.TF_ImportGraphDefResultsReturnOperations(results, ref num_return_opers, ref return_oper_handle);
- Operation[] return_opers = new Operation[num_return_opers];
- for (int i = 0; i < num_return_opers; i++)
- {
- var handle = return_oper_handle.node + Marshal.SizeOf() * i;
- return_opers[i] = new Operation(*(IntPtr*)handle);
- }
-
- return return_opers;
- }
-
- public Operation OperationByName(string operName)
- {
- return c_api.TF_GraphOperationByName(_handle, operName);
- }
-
- public ITensorOrOperation[] get_operations()
- {
- return _nodes_by_name.Values.Select(x => x).ToArray();
- }
-
public string[] get_all_collection_keys()
{
return _collections.Keys.Where(x => !x.StartsWith("__")).ToArray();
@@ -468,7 +445,12 @@ namespace Tensorflow
public void Dispose()
{
- // c_api.TF_DeleteGraph(_handle);
+ /*if (_handle != IntPtr.Zero)
+ c_api.TF_DeleteGraph(_handle);
+
+ _handle = IntPtr.Zero;
+
+ GC.SuppressFinalize(this);*/
}
///
@@ -481,17 +463,46 @@ namespace Tensorflow
public Tensor get_tensor_by_name(string name)
{
return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false);
- }
-
- public void __enter__()
- {
+ }
+
+ public TensorShape GetTensorShape(TF_Output output)
+ {
+ var status = new Status();
+ var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status);
+ status.Check();
+
+ if (ndim == -1)
+ return new TensorShape();
+
+ var dims = new long[ndim];
+ c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status);
+ status.Check();
+
+ return new TensorShape(dims.Select(x => (int)x).ToArray());
+ }
+
+ public override string ToString()
+ {
+ int len = 0;
+ return c_api.TF_GraphDebugString(_handle, out len);
}
public void __exit__()
{
- }
+ }
+
+ private IEnumerable GetEnumerable()
+ => c_api_util.tf_operations(this);
+ IEnumerator IEnumerable.GetEnumerator()
+ => GetEnumerable().GetEnumerator();
+
+ IEnumerator IEnumerable.GetEnumerator()
+ {
+ throw new NotImplementedException();
+ }
+
public static implicit operator IntPtr(Graph graph)
{
return graph._handle;
diff --git a/src/TensorFlowNET.Core/Graphs/c_api.graph.cs b/src/TensorFlowNET.Core/Graphs/c_api.graph.cs
index 05cd5940..889949ef 100644
--- a/src/TensorFlowNET.Core/Graphs/c_api.graph.cs
+++ b/src/TensorFlowNET.Core/Graphs/c_api.graph.cs
@@ -43,6 +43,9 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)]
public static extern void TF_DeleteImportGraphDefResults(IntPtr results);
+ [DllImport(TensorFlowLibName)]
+ public static extern string TF_GraphDebugString(IntPtr graph, out int len);
+
[DllImport(TensorFlowLibName)]
public static extern void TF_GraphGetOpDef(IntPtr graph, string op_name, IntPtr output_op_def, IntPtr status);
@@ -100,6 +103,7 @@ namespace Tensorflow
/// TF_Status*
[DllImport(TensorFlowLibName)]
public static extern void TF_GraphImportGraphDef(IntPtr graph, IntPtr graph_def, IntPtr options, IntPtr status);
+
///
/// Iterate through the operations of a graph.
///
diff --git a/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs b/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs
index d10b1874..52dc7bf4 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs
@@ -207,7 +207,7 @@ namespace Tensorflow.Keras.Layers
public Tensor _assign_moving_average(RefVariable variable, Tensor value, Tensor momentum)
{
- return Python.with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope =>
+ return Python.tf_with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope =>
{
// var cm = ops.colocate_with(variable);
var decay = ops.convert_to_tensor(1.0f - momentum, name: "decay");
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Layer.cs b/src/TensorFlowNET.Core/Keras/Layers/Layer.cs
index f380ce78..d96c1f14 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/Layer.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/Layer.cs
@@ -125,7 +125,7 @@ namespace Tensorflow.Keras.Layers
// Symbolic execution on symbolic tensors. We will attempt to build
// the corresponding TF subgraph inside `backend.get_graph()`
var graph = backend.get_graph().as_default();
- with(ops.name_scope(_name_scope()), delegate
+ tf_with(ops.name_scope(_name_scope()), delegate
{
// Build layer if applicable (if the `build` method has been
// overridden).
diff --git a/src/TensorFlowNET.Core/Layers/Layer.cs b/src/TensorFlowNET.Core/Layers/Layer.cs
index 57c71e92..961952a6 100644
--- a/src/TensorFlowNET.Core/Layers/Layer.cs
+++ b/src/TensorFlowNET.Core/Layers/Layer.cs
@@ -72,7 +72,7 @@ namespace Tensorflow.Layers
}
Tensor outputs = null;
- with(scope_context_manager, scope2 =>
+ tf_with(scope_context_manager, scope2 =>
{
_current_scope = scope2;
// Actually call layer
@@ -136,12 +136,12 @@ namespace Tensorflow.Layers
_set_scope();
var reuse = built || (_reuse != null && _reuse.Value);
- return with(tf.variable_scope(_scope,
+ return tf_with(tf.variable_scope(_scope,
reuse: reuse,
auxiliary_name_scope: false), scope =>
{
_current_scope = scope;
- return with(ops.name_scope(_name_scope()), delegate
+ return tf_with(ops.name_scope(_name_scope()), delegate
{
var variable = base.add_weight(name,
shape,
@@ -183,7 +183,7 @@ namespace Tensorflow.Layers
}
else
{
- with(tf.variable_scope(scope, default_name: _base_name), captured_scope =>
+ tf_with(tf.variable_scope(scope, default_name: _base_name), captured_scope =>
{
// convert variable_scope to VariableScope
_scope = captured_scope;
diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs
index c6b7d24d..136c9e3b 100644
--- a/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs
+++ b/src/TensorFlowNET.Core/Operations/ControlFlows/CondContext.cs
@@ -122,7 +122,7 @@ namespace Tensorflow.Operations
_external_values[result.name] = result;
}
- with(ops.control_dependencies(null), ctrl =>
+ tf_with(ops.control_dependencies(null), ctrl =>
{
var results = control_flow_ops._SwitchRefOrTensor(result, _pred);
result = results[_branch];
diff --git a/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs b/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs
index 19a1266b..69affeea 100644
--- a/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs
+++ b/src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs
@@ -58,7 +58,7 @@ namespace Tensorflow
private Tensor _call_log_prob (Tensor value, string name)
{
- return with(ops.name_scope(name, "moments", new { value }), scope =>
+ return tf_with(ops.name_scope(name, "moments", new { value }), scope =>
{
try
{
diff --git a/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs b/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs
index 2aa15063..f4f4b4bf 100644
--- a/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs
+++ b/src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs
@@ -50,9 +50,9 @@ namespace Tensorflow
parameters.Add("validate_args", validate_args);
parameters.Add("allow_nan_stats", allow_nan_stats);
- with(ops.name_scope(name, "", new { loc, scale }), scope =>
+ tf_with(ops.name_scope(name, "", new { loc, scale }), scope =>
{
- with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd =>
+ tf_with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd =>
{
this._loc = array_ops.identity(loc, name);
this._scale = array_ops.identity(scale, name);
diff --git a/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs b/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs
index 9e530a12..f8ed0446 100644
--- a/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs
+++ b/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs
@@ -24,7 +24,7 @@ namespace Tensorflow
public Tensor compute_weighted_loss(Tensor losses, Tensor weights = null, string scope = null,
string loss_collection = ops.GraphKeys.LOSSES, string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS)
{
- return with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate
+ return tf_with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate
{
// Save the `reduction` argument for loss normalization when distributing
// to multiple replicas. Used only for estimator + v1 optimizer flow.
@@ -77,7 +77,7 @@ namespace Tensorflow
public Tensor _num_present(Tensor losses, Tensor weights, bool per_batch = false)
{
- return with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope =>
+ return tf_with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope =>
{
string scope = name_scope;
weights = math_ops.cast(weights, dtype: dtypes.float32);
@@ -104,7 +104,7 @@ namespace Tensorflow
string loss_collection= ops.GraphKeys.LOSSES,
string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS)
{
- return with(ops.name_scope(scope,
+ return tf_with(ops.name_scope(scope,
"sparse_softmax_cross_entropy_loss",
(logits, labels, weights)),
name_scope =>
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs b/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs
index 9d1e5726..b385f9c8 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/MaxPoolFunction.cs
@@ -30,7 +30,7 @@ namespace Tensorflow.Operations
string data_format = "NHWC",
string name = null)
{
- return with(ops.name_scope(name, "MaxPool", value), scope =>
+ return tf_with(ops.name_scope(name, "MaxPool", value), scope =>
{
name = scope;
value = ops.convert_to_tensor(value, name: "input");
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs
index 3200e13f..5b820b3a 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs
@@ -29,7 +29,7 @@ namespace Tensorflow.Operations
TF_DataType dtype = TF_DataType.DtInvalid,
int? parallel_iterations = null, bool swap_memory = false, bool time_major = false)
{
- with(tf.variable_scope("rnn"), scope =>
+ tf_with(tf.variable_scope("rnn"), scope =>
{
VariableScope varscope = scope;
var flat_input = nest.flatten(inputs_tensor);
@@ -139,7 +139,7 @@ namespace Tensorflow.Operations
var time = array_ops.constant(0, dtype: dtypes.int32, name: "time");
string base_name = null;
- with(ops.name_scope("dynamic_rnn"), scope => base_name = scope);
+ tf_with(ops.name_scope("dynamic_rnn"), scope => base_name = scope);
Func _create_ta = (name, element_shape, dtype_) =>
{
diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
index 033731b0..ab34a320 100644
--- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
+++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
@@ -58,7 +58,7 @@ namespace Tensorflow
var input_types = new List();
object values = null;
- return with(ops.name_scope(name), scope =>
+ return tf_with(ops.name_scope(name), scope =>
{
var inferred_from = new Dictionary();
var base_types = new List();
diff --git a/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs b/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs
index 1b99dcc8..8de412c8 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs
@@ -23,7 +23,10 @@ namespace Tensorflow
///
public partial class Operation
{
- public static implicit operator Operation(IntPtr handle) => new Operation(handle);
+ // make sure the new op is in the same graph instance
+ public static implicit operator Operation(IntPtr handle)
+ => new Operation(handle);
+
public static implicit operator IntPtr(Operation op) => op._handle;
public static implicit operator Tensor(Operation op) => op.output;
diff --git a/src/TensorFlowNET.Core/Operations/Operation.Input.cs b/src/TensorFlowNET.Core/Operations/Operation.Input.cs
index 83e7567f..6d6403c9 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.Input.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.Input.cs
@@ -26,7 +26,18 @@ namespace Tensorflow
{
public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index));
public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index));
- public int InputListLength(string name) => c_api.TF_OperationInputListLength(_handle, name, status);
+
+ public int InputListLength(string name)
+ {
+ int num = 0;
+ using(var status = new Status())
+ {
+ num = c_api.TF_OperationInputListLength(_handle, name, status);
+ status.Check(true);
+ }
+ return num;
+ }
+
public int NumInputs => c_api.TF_OperationNumInputs(_handle);
private TF_DataType[] _input_types => _inputs._inputs.Select(x => x.dtype).ToArray();
diff --git a/src/TensorFlowNET.Core/Operations/Operation.Output.cs b/src/TensorFlowNET.Core/Operations/Operation.Output.cs
index cefb76cf..24348322 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.Output.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.Output.cs
@@ -24,7 +24,18 @@ namespace Tensorflow
{
public int NumOutputs => c_api.TF_OperationNumOutputs(_handle);
public TF_DataType OutputType(int index) => c_api.TF_OperationOutputType(new TF_Output(_handle, index));
- public int OutputListLength(string name) => c_api.TF_OperationOutputListLength(_handle, name, status);
+
+ public int OutputListLength(string name)
+ {
+ int num = 0;
+ using (var status = new Status())
+ {
+ num = c_api.TF_OperationOutputListLength(_handle, name, status);
+ status.Check(true);
+ }
+
+ return num;
+ }
private Tensor[] _outputs;
public Tensor[] outputs => _outputs;
@@ -35,6 +46,8 @@ namespace Tensorflow
public int OutputNumConsumers(int index) => c_api.TF_OperationOutputNumConsumers(new TF_Output(_handle, index));
+ public TF_Output this[int index] => _tf_output(index);
+
public unsafe TF_Input[] OutputConsumers(int index, int max_consumers)
{
int size = Marshal.SizeOf();
diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs
index b673380b..d7590b97 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.cs
@@ -54,11 +54,10 @@ namespace Tensorflow
public Operation op => this;
public TF_DataType dtype => TF_DataType.DtInvalid;
- private Status status = new Status();
public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle));
- public string OpType => c_api.StringPiece(c_api.TF_OperationOpType(_handle));
- public string Device => c_api.StringPiece(c_api.TF_OperationDevice(_handle));
+ public string OpType => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationOpType(_handle));
+ public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle));
private NodeDef _node_def;
public NodeDef node_def
@@ -96,10 +95,14 @@ namespace Tensorflow
_operDesc = c_api.TF_NewOperation(g, opType, oper_name);
c_api.TF_SetAttrType(_operDesc, "dtype", TF_DataType.TF_INT32);
- _handle = c_api.TF_FinishOperation(_operDesc, status);
-
- // Dict mapping op name to file and line information for op colocation
- // context managers.
+ using (var status = new Status())
+ {
+ _handle = c_api.TF_FinishOperation(_operDesc, status);
+ status.Check(true);
+ }
+
+ // Dict mapping op name to file and line information for op colocation
+ // context managers.
_control_flow_context = graph._get_control_flow_context();
}
@@ -220,6 +223,7 @@ namespace Tensorflow
{
AttrValue x = null;
+ using (var status = new Status())
using (var buf = new Buffer())
{
c_api.TF_OperationGetAttrValueProto(_handle, name, buf, status);
@@ -274,12 +278,15 @@ namespace Tensorflow
var output = tensor._as_tf_output();
// Reset cached inputs.
- _inputs = null;
+ _inputs = null;
// after the c_api call next time _inputs is accessed
// the updated inputs are reloaded from the c_api
- c_api.UpdateEdge(_graph, output, input, status);
- //var updated_inputs = inputs;
- status.Check();
+ using (var status = new Status())
+ {
+ c_api.UpdateEdge(_graph, output, input, status);
+ //var updated_inputs = inputs;
+ status.Check();
+ }
}
private void _assert_same_graph(Tensor tensor)
diff --git a/src/TensorFlowNET.Core/Operations/RNNCell.cs b/src/TensorFlowNET.Core/Operations/RNNCell.cs
index 57f46e7b..1b260981 100644
--- a/src/TensorFlowNET.Core/Operations/RNNCell.cs
+++ b/src/TensorFlowNET.Core/Operations/RNNCell.cs
@@ -82,7 +82,7 @@ namespace Tensorflow
{
Tensor output = null;
var state_size = this.state_size;
- with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate
+ tf_with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate
{
output = _zero_state_tensors(state_size, batch_size, dtype);
});
diff --git a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs
index b4619c05..bbeee929 100644
--- a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs
+++ b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs
@@ -66,7 +66,7 @@ namespace Tensorflow.Operations
_element_shape = new List { };
}
- with(ops.name_scope(name, "", new { handle, size, flow }), scope =>
+ tf_with(ops.name_scope(name, "", new { handle, size, flow }), scope =>
{
if(handle != null)
{
diff --git a/src/TensorFlowNET.Core/Operations/array_ops.py.cs b/src/TensorFlowNET.Core/Operations/array_ops.py.cs
index c3f52cb8..2e909ab8 100644
--- a/src/TensorFlowNET.Core/Operations/array_ops.py.cs
+++ b/src/TensorFlowNET.Core/Operations/array_ops.py.cs
@@ -43,7 +43,7 @@ namespace Tensorflow
public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
{
dtype = dtype.as_base_dtype();
- return with(ops.name_scope(name, "zeros", shape), scope =>
+ return tf_with(ops.name_scope(name, "zeros", shape), scope =>
{
name = scope;
switch (dtype)
@@ -67,7 +67,7 @@ namespace Tensorflow
public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
{
dtype = dtype.as_base_dtype();
- return with(ops.name_scope(name, "zeros", shape), scope =>
+ return tf_with(ops.name_scope(name, "zeros", shape), scope =>
{
name = scope;
switch (dtype)
@@ -140,7 +140,7 @@ namespace Tensorflow
{
var must_pack = false;
var converted_elems = new List
- public class Status : IDisposable
+ public class Status : DisposableObject
{
- protected IntPtr _handle;
-
///
/// Error message
///
@@ -67,22 +65,7 @@ namespace Tensorflow
return status._handle;
}
- public void Dispose()
- {
- IntPtr h = IntPtr.Zero;
- lock (this)
- {
- h = _handle;
- _handle = IntPtr.Zero;
- }
- if (h != IntPtr.Zero)
- c_api.TF_DeleteStatus(h);
- GC.SuppressFinalize(this);
- }
-
- ~Status()
- {
- Dispose();
- }
+ protected override void DisposeUnManagedState(IntPtr handle)
+ => c_api.TF_DeleteStatus(handle);
}
}
diff --git a/src/TensorFlowNET.Core/Summaries/Summary.cs b/src/TensorFlowNET.Core/Summaries/Summary.cs
index 5a22385f..258edf88 100644
--- a/src/TensorFlowNET.Core/Summaries/Summary.cs
+++ b/src/TensorFlowNET.Core/Summaries/Summary.cs
@@ -55,7 +55,7 @@ namespace Tensorflow.Summaries
///
public Tensor merge(Tensor[] inputs, string[] collections = null, string name = null)
{
- return with(ops.name_scope(name, "Merge", inputs), delegate
+ return tf_with(ops.name_scope(name, "Merge", inputs), delegate
{
var val = gen_logging_ops.merge_summary(inputs: inputs, name: name);
collect(val, collections?.ToList(), new List());
@@ -88,7 +88,7 @@ namespace Tensorflow.Summaries
public (string, string) summary_scope(string name, string family = null, string default_name = null, Tensor[] values = null)
{
string scope_base_name = string.IsNullOrEmpty(family) ? name : $"{family}/{name}";
- return with(ops.name_scope(scope_base_name, default_name: default_name, values), scope =>
+ return tf_with(ops.name_scope(scope_base_name, default_name: default_name, values), scope =>
{
var tag = scope._name_scope;
if (string.IsNullOrEmpty(family))
diff --git a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj
index 1ec4f6f3..3037221c 100644
--- a/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj
+++ b/src/TensorFlowNET.Core/TensorFlowNET.Core.csproj
@@ -5,7 +5,7 @@
TensorFlow.NET
Tensorflow
1.14.0
- 0.10.3
+ 0.10.8
Haiping Chen, Meinrad Recheis
SciSharp STACK
true
@@ -17,7 +17,7 @@
TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#
Google's TensorFlow full binding in .NET Standard.
Docs: https://tensorflownet.readthedocs.io
- 0.10.3.0
+ 0.10.8.0
Changes since v0.9.0:
1. Added full connected Convolution Neural Network example.
@@ -29,9 +29,15 @@ Docs: https://tensorflownet.readthedocs.io
7. Add BatchMatMulGrad.
8. Upgrade NumSharp.
9. Fix strided_slice_grad type convention error.
-10. Add AbsGrad.
+10. Add AbsGrad.
+11. Fix Session.LoadFromSavedModel(string).
+12. Add Tensor operator overloads.
+13. Fix default graph and operation issue when import model.
+14. Fix TF_String endcode and decode.
+15. Fix Tensor memory leak.
+16. Rename with to tf_with that is only used to build graph purpose.
7.2
- 0.10.3.0
+ 0.10.8.0
LICENSE
true
true
@@ -62,7 +68,7 @@ Docs: https://tensorflownet.readthedocs.io
-
+
diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
index f5ac5f77..a104f066 100644
--- a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
+++ b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
@@ -1,648 +1,717 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using NumSharp;
-using System;
-using System.Linq;
-using System.Numerics;
-using System.Runtime.CompilerServices;
-using System.Runtime.InteropServices;
-using System.Text;
-using static Tensorflow.c_api;
-
-namespace Tensorflow
-{
- public partial class Tensor
- {
- ///
- /// true if unmanaged buffer has been freed.
- ///
- private bool _deallocator_called => _deallocatorArgs.deallocator_called;
-
- ///
- /// true if the Tensor was created from a managed array
- ///
- private bool _isPinnedArray => _deallocatorArgs.gc_handle != IntPtr.Zero;
-
- ///
- /// True only if the Tensor object was created in a way that the Tensor object itself allocated memory or pinned a managed object.
- /// False if the Tensor was created from a pointer
- ///
- public bool IsMemoryOwner { get; private set; }
-
- ///
- /// This holds values that are used by the unmanaged deallocator callback
- ///
- private DeallocatorArgs _deallocatorArgs = new DeallocatorArgs() { gc_handle = IntPtr.Zero };
-
- // note: they must be assigned to a static variable in order to work as unmanaged callbacks
- static Deallocator _hGlobalDeallocator = FreeHGlobalMemory;
- static Deallocator _gcHandleDeallocator = FreeGCHandle;
- private static Deallocator _nothingDeallocator = FreeNothing;
-
- ///
- /// Create a Tensor object from an existing TF handle
- ///
- ///
- public Tensor(IntPtr handle)
- {
- _handle = handle;
- IsMemoryOwner = false;
- }
-
- ///
- /// Create a new Tensor from the given unmanaged memory pointer (which must be allocated, fixed or pinned by the caller)
- /// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor
- /// but not the memory itself!
- ///
- /// Pointer to unmanaged, fixed or pinned memory which the caller owns
- /// Tensor shape
- /// TF data type
- /// Size of the tensor in memory
- public Tensor(IntPtr ptr, long[] shape, TF_DataType dType, int num_bytes)
- {
- _handle = TF_NewTensor(dType, dims: shape, num_dims: shape.Length, data: ptr, len: (UIntPtr)num_bytes, deallocator: _nothingDeallocator, ref _deallocatorArgs);
- IsMemoryOwner = false;
- }
-
-#if _REGEN
- %types=["sbyte", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
- %foreach types%
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(#1[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), new long[]{data.Length}, data, Marshal.SizeOf<#1>());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(#1[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), shape, data, Marshal.SizeOf<#1>());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(#1 value, TF_DataType? dType = null)
- {
- var v = (#1*)Marshal.AllocHGlobal(sizeof(#1));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(#1)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(#1), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
- %
-#else
-
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(sbyte[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(sbyte[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(sbyte value, TF_DataType? dType = null)
- {
- var v = (sbyte*)Marshal.AllocHGlobal(sizeof(sbyte));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(sbyte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(sbyte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(byte[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(byte[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(byte value, TF_DataType? dType = null)
- {
- var v = (byte*)Marshal.AllocHGlobal(sizeof(byte));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(byte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(byte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(short[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(short[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(short value, TF_DataType? dType = null)
- {
- var v = (short*)Marshal.AllocHGlobal(sizeof(short));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(short)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(short), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(ushort[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(ushort[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(ushort value, TF_DataType? dType = null)
- {
- var v = (ushort*)Marshal.AllocHGlobal(sizeof(ushort));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ushort)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ushort), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(int[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(int[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(int value, TF_DataType? dType = null)
- {
- var v = (int*)Marshal.AllocHGlobal(sizeof(int));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(int)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(int), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(uint[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(uint[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(uint value, TF_DataType? dType = null)
- {
- var v = (uint*)Marshal.AllocHGlobal(sizeof(uint));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(uint)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(uint), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(long[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(long[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(long value, TF_DataType? dType = null)
- {
- var v = (long*)Marshal.AllocHGlobal(sizeof(long));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(long)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(long), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(ulong[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(ulong[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(ulong value, TF_DataType? dType = null)
- {
- var v = (ulong*)Marshal.AllocHGlobal(sizeof(ulong));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ulong)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ulong), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(float[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(float[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(float value, TF_DataType? dType = null)
- {
- var v = (float*)Marshal.AllocHGlobal(sizeof(float));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(float)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(float), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(double[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(double[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(double value, TF_DataType? dType = null)
- {
- var v = (double*)Marshal.AllocHGlobal(sizeof(double));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(double)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(double), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a 1d Tensor from the given linear array and shape
- ///
- public Tensor(Complex[] data, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), new long[]{data.Length}, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a N-dimensional Tensor from the given array
- ///
- public Tensor(Complex[] data, long[] shape, TF_DataType? dType = null)
- {
- _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), shape, data, Marshal.SizeOf());
- IsMemoryOwner=true;
- }
-
- ///
- /// Create a scalar Tensor from the given value
- ///
- public unsafe Tensor(Complex value, TF_DataType? dType = null)
- {
- var v = (Complex*)Marshal.AllocHGlobal(sizeof(Complex));
- *v = value;
- _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(Complex)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(Complex), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
- IsMemoryOwner=true;
- }
-#endif
-
- ///
- /// Create a string Tensor from the given string
- ///
- public unsafe Tensor(string str)
- {
- var buffer = Encoding.UTF8.GetBytes(str);
- var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
- var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
-
- IntPtr tensor = c_api.TF_TensorData(handle);
- Marshal.WriteInt64(tensor, 0);
- fixed (byte* src = &buffer[0])
- c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
- _handle = handle;
- status.Check(true);
- }
-
- public unsafe Tensor(NDArray nd, TF_DataType? tensorDType = null)
- {
- if (tensorDType == TF_DataType.TF_STRING && nd.dtype.Name == "Byte")
- {
- var buffer = nd.Data();
- var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
- var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
-
- IntPtr tensor = c_api.TF_TensorData(handle);
- Marshal.WriteInt64(tensor, 0);
- fixed (byte* src = &buffer[0])
- c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
-
- status.Check(true);
- _handle=handle;
- IsMemoryOwner = false;
- return;
- }
- _handle = Allocate(nd, tensorDType: tensorDType);
- IsMemoryOwner = true;
- }
-
- private unsafe IntPtr Allocate(NDArray nd, TF_DataType? tensorDType = null)
- {
- IntPtr dotHandle = IntPtr.Zero;
- int buffersize = 0;
-
- if (nd.dtype.Name != "String")
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using NumSharp;
+using System;
+using System.Linq;
+using System.Numerics;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Text;
+using static Tensorflow.c_api;
+
+namespace Tensorflow
+{
+ public partial class Tensor
+ {
+ ///
+ /// true if unmanaged buffer has been freed.
+ ///
+ private bool _deallocator_called => _deallocatorArgs.deallocator_called;
+
+ ///
+ /// true if the Tensor was created from a managed array
+ ///
+ private bool _isPinnedArray => _deallocatorArgs.gc_handle != IntPtr.Zero;
+
+ ///
+ /// True only if the Tensor object was created in a way that the Tensor object itself allocated memory or pinned a managed object.
+ /// False if the Tensor was created from a pointer
+ ///
+ public bool IsMemoryOwner { get; private set; }
+
+ ///
+ /// This holds values that are used by the unmanaged deallocator callback
+ ///
+ private DeallocatorArgs _deallocatorArgs = new DeallocatorArgs() { gc_handle = IntPtr.Zero };
+
+ // note: they must be assigned to a static variable in order to work as unmanaged callbacks
+ static Deallocator _hGlobalDeallocator = FreeHGlobalMemory;
+ static Deallocator _gcHandleDeallocator = FreeGCHandle;
+ private static Deallocator _nothingDeallocator = FreeNothing;
+
+ ///
+ /// Create a Tensor object from an existing TF handle
+ ///
+ ///
+ public Tensor(IntPtr handle)
+ {
+ _handle = handle;
+ IsMemoryOwner = false;
+ }
+
+ ///
+ /// Create a new Tensor from the given unmanaged memory pointer (which must be allocated, fixed or pinned by the caller)
+ /// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor
+ /// but not the memory itself!
+ ///
+ /// Pointer to unmanaged, fixed or pinned memory which the caller owns
+ /// Tensor shape
+ /// TF data type
+ /// Size of the tensor in memory
+ public Tensor(IntPtr ptr, long[] shape, TF_DataType dType, int num_bytes)
+ {
+ _handle = TF_NewTensor(dType, dims: shape, num_dims: shape.Length, data: ptr, len: (UIntPtr)num_bytes, deallocator: _nothingDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner = false;
+ }
+
+#if _REGEN
+ %types=["sbyte", "bool", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
+ %foreach types%
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(#1[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), new long[]{data.Length}, data, Marshal.SizeOf<#1>());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(#1[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), shape, data, Marshal.SizeOf<#1>());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(#1 value, TF_DataType? dType = null)
+ {
+ var v = (#1*)Marshal.AllocHGlobal(sizeof(#1));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(#1)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(#1), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+ %
+#else
+
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(sbyte[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(sbyte[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(sbyte value, TF_DataType? dType = null)
+ {
+ var v = (sbyte*)Marshal.AllocHGlobal(sizeof(sbyte));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(sbyte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(sbyte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(bool[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(bool)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(bool[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(bool)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(bool value, TF_DataType? dType = null)
+ {
+ var v = (bool*)Marshal.AllocHGlobal(sizeof(bool));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(bool)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(bool), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(byte[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(byte[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(byte value, TF_DataType? dType = null)
+ {
+ var v = (byte*)Marshal.AllocHGlobal(sizeof(byte));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(byte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(byte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(short[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(short[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(short value, TF_DataType? dType = null)
+ {
+ var v = (short*)Marshal.AllocHGlobal(sizeof(short));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(short)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(short), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(ushort[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(ushort[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(ushort value, TF_DataType? dType = null)
+ {
+ var v = (ushort*)Marshal.AllocHGlobal(sizeof(ushort));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ushort)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ushort), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(int[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(int[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(int value, TF_DataType? dType = null)
+ {
+ var v = (int*)Marshal.AllocHGlobal(sizeof(int));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(int)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(int), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(uint[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(uint[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(uint value, TF_DataType? dType = null)
+ {
+ var v = (uint*)Marshal.AllocHGlobal(sizeof(uint));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(uint)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(uint), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(long[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(long[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(long value, TF_DataType? dType = null)
+ {
+ var v = (long*)Marshal.AllocHGlobal(sizeof(long));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(long)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(long), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(ulong[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(ulong[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(ulong value, TF_DataType? dType = null)
+ {
+ var v = (ulong*)Marshal.AllocHGlobal(sizeof(ulong));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ulong)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ulong), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(float[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(float[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(float value, TF_DataType? dType = null)
+ {
+ var v = (float*)Marshal.AllocHGlobal(sizeof(float));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(float)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(float), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(double[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(double[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(double value, TF_DataType? dType = null)
+ {
+ var v = (double*)Marshal.AllocHGlobal(sizeof(double));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(double)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(double), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(Complex[] data, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), new long[]{data.Length}, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(Complex[] data, long[] shape, TF_DataType? dType = null)
+ {
+ _handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), shape, data, Marshal.SizeOf());
+ IsMemoryOwner=true;
+ }
+
+ ///
+ /// Create a scalar Tensor from the given value
+ ///
+ public unsafe Tensor(Complex value, TF_DataType? dType = null)
+ {
+ var v = (Complex*)Marshal.AllocHGlobal(sizeof(Complex));
+ *v = value;
+ _handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(Complex)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(Complex), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
+ IsMemoryOwner=true;
+ }
+#endif
+
+ ///
+ /// Create a string Tensor from the given string
+ ///
+ public unsafe Tensor(string str)
+ {
+ var status = new Status();
+ var buffer = Encoding.UTF8.GetBytes(str);
+ var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
+ var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
+
+ IntPtr tensor = c_api.TF_TensorData(handle);
+ Marshal.WriteInt64(tensor, 0);
+ fixed (byte* src = &buffer[0])
+ c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
+ _handle = handle;
+ status.Check(true);
+ }
+
+ public unsafe Tensor(NDArray nd, TF_DataType? tensorDType = null)
+ {
+ if (tensorDType == TF_DataType.TF_STRING && nd.dtype.Name == "Byte")
+ {
+ var buffer = nd.Data();
+ var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
+ var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
+
+ IntPtr tensor = c_api.TF_TensorData(handle);
+ Marshal.WriteInt64(tensor, 0);
+
+ var status = new Status();
+ fixed (byte* src = &buffer[0])
+ c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
+
+ status.Check(true);
+ _handle=handle;
+ IsMemoryOwner = false;
+ return;
+ }
+ _handle = Allocate(nd, tensorDType: tensorDType);
+ IsMemoryOwner = true;
+ }
+
+ private unsafe IntPtr Allocate(NDArray nd, TF_DataType? tensorDType = null)
+ {
+ IntPtr dotHandle = IntPtr.Zero;
+ int buffersize = 0;
+
+ if (nd.dtype.Name != "String")
+ {
+ buffersize = (nd.size * nd.dtypesize);
+ dotHandle = Marshal.AllocHGlobal(buffersize);
+ }
+
+ var dataType = ToTFDataType(nd.dtype);
+ // shape
+ var dims = nd.shape.Select(x => (long)x).ToArray();
+ var nd1 = nd.ravel();
+ switch (nd.dtype.Name)
+ {
+ case "Boolean":
+ var boolVals = Array.ConvertAll(nd1.Data(), x => Convert.ToByte(x));
+ Marshal.Copy(boolVals, 0, dotHandle, nd.size);
+ break;
+ case "Int16":
+ Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
+ break;
+ case "Int32":
+ Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
+ break;
+ case "Int64":
+ Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
+ break;
+ case "Single":
+ Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
+ break;
+ case "Double":
+ Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
+ break;
+ case "Byte":
+ Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
+ break;
+ case "String":
+ return new Tensor(UTF8Encoding.UTF8.GetBytes(nd.Data(0)), TF_DataType.TF_STRING);
+ default:
+ throw new NotImplementedException($"Marshal.Copy failed for {nd.dtype.Name}.");
+ }
+ var tfHandle = c_api.TF_NewTensor(dataType,
+ dims,
+ dims.Length,
+ dotHandle,
+ (UIntPtr)buffersize,
+ _hGlobalDeallocator,
+ ref _deallocatorArgs);
+
+ return tfHandle;
+ }
+
+ public unsafe Tensor(byte[][] buffer, long[] shape)
+ {
+ int size = 0;
+ foreach (var b in buffer)
{
- buffersize = (nd.size * nd.dtypesize);
- dotHandle = Marshal.AllocHGlobal(buffersize);
+ size += (int)TF_StringEncodedSize((UIntPtr)b.Length);
}
-
- var dataType = ToTFDataType(nd.dtype);
- // shape
- var dims = nd.shape.Select(x => (long)x).ToArray();
- var nd1 = nd.ravel();
- switch (nd.dtype.Name)
+ int totalSize = size + buffer.Length * 8;
+ ulong offset = 0;
+ IntPtr handle = TF_AllocateTensor(TF_DataType.TF_STRING, shape, shape.Length, (UIntPtr)totalSize);
+
+ // Clear offset table
+ IntPtr pOffset = TF_TensorData(handle);
+ IntPtr dst = pOffset + buffer.Length * 8;
+ IntPtr dstLimit = pOffset + totalSize;
+ for (int i = 0; i < buffer.Length; i++)
{
- case "Boolean":
- var boolVals = Array.ConvertAll(nd1.Data(), x => Convert.ToByte(x));
- Marshal.Copy(boolVals, 0, dotHandle, nd.size);
- break;
- case "Int16":
- Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
- break;
- case "Int32":
- Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
- break;
- case "Int64":
- Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
- break;
- case "Single":
- Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
- break;
- case "Double":
- Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
- break;
- case "Byte":
- Marshal.Copy(nd1.Data(), 0, dotHandle, nd.size);
- break;
- case "String":
- return new Tensor(UTF8Encoding.UTF8.GetBytes(nd.Data(0)), TF_DataType.TF_STRING);
- default:
- throw new NotImplementedException($"Marshal.Copy failed for {nd.dtype.Name}.");
+ Marshal.WriteInt64(pOffset, (long)offset);
+ using (var status = new Status())
+ {
+ fixed (byte* src = &buffer[i][0])
+ {
+ var written = TF_StringEncode(src, (UIntPtr)buffer[i].Length, (sbyte*)dst, (UIntPtr)(dstLimit.ToInt64() - dst.ToInt64()), status);
+ status.Check(true);
+ pOffset += 8;
+ dst += (int)written;
+ offset += written;
+ }
+ }
}
- var tfHandle = c_api.TF_NewTensor(dataType,
- dims,
- dims.Length,
- dotHandle,
- (UIntPtr)buffersize,
- _hGlobalDeallocator,
- ref _deallocatorArgs);
-
- return tfHandle;
- }
-
- public Tensor(Operation op, int value_index, TF_DataType dtype)
- {
- _op = op;
- _value_index = value_index;
- _dtype = dtype;
- _id = ops.uid();
- }
-
-
- ///
- /// Creates a new tensor from the given array without copying memory. The array is pinned down and the pointer passed on.
- ///
- /// Represents the tensor shape.
- /// The linear array of data, the data must fit in the tensor with the specified dimensions.
- /// The number of bytes in memory of a single array element
- ///
- /// Use the FromBuffer method to create a tensor that has the specified dimensions
- /// and is initialized with data from the data array. The data is copied starting
- /// at the start offset, for count bytes and is laid out into the tensor following the
- /// specified dimensions.
- ///
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int element_size)
- {
- if (dt == TF_DataType.TF_STRING && data is byte[])
- {
- var buffer = (byte[])data;
- var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
- var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
- IntPtr tensor = c_api.TF_TensorData(handle);
- Marshal.WriteInt64(tensor, 0);
- fixed (byte* src = &buffer[0])
- c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
-
- status.Check(true);
- return handle;
- }
- return CreateTensorWithoutCopying(dt, shape, data, 0, data.Length, element_size);
- }
-
- ///
- /// Creates a new tensor from a subsection of the given array without copying memory. The array is pinned down and the pointer passed on.
- ///
- /// Represents the tensor shape.
- /// The linear array of data, the data must fit in the tensor with the specified dimensions.
- /// The offset into the provided data array where the data resides.
- /// The number of elements to copy from data.
- /// The number of bytes in memory of a single array element
- ///
- /// Use the FromBuffer method to create a tensor that has the specified dimensions
- /// and is initialized with data from the data array. The data is copied starting
- /// at the start offset, for count bytes and is laid out into the tensor following the
- /// specified dimensions.
- ///
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int start, int count, int element_size)
- {
- if (start < 0 || start > data.Length - count)
- throw new ArgumentException($"Array length {data.Length} does not match the given shape {new Shape(shape.Cast().ToArray())}");
-
- // get a handle to the pinned array which we will pass on to the tensor computation engine to use
- var gcHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
- _deallocatorArgs = new DeallocatorArgs() { gc_handle = GCHandle.ToIntPtr(gcHandle) };
- if (shape == null || shape.Length == 0)
- return TF_NewTensor(dt, new long[0], 0, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs);
- else
- return TF_NewTensor(dt, shape, shape.Length, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs);
- }
-
- [MonoPInvokeCallback(typeof(Deallocator))]
- internal static void FreeHGlobalMemory(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
- {
- if (args.deallocator_called)
- return;
- Marshal.FreeHGlobal(dataPtr);
- args.deallocator_called = true;
- }
-
- [MonoPInvokeCallback(typeof(Deallocator))]
- internal static void FreeGCHandle(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
- {
- if (args.deallocator_called || args.gc_handle == IntPtr.Zero)
- return;
- // note: since the ptr given to tensorflow is just the addr of the pinned object we can not directly free it! we need to free the gcHandle instead
- GCHandle.FromIntPtr(args.gc_handle).Free();
- args.deallocator_called = true;
- }
-
- [MonoPInvokeCallback(typeof(Deallocator))]
- internal static void FreeNothing(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
- {
- args.deallocator_called = true;
- }
-
- }
-
- ///
- /// This attribute can be applied to callback functions that will be invoked
- /// from unmanaged code to managed code.
- ///
- ///
- ///
- /// [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))]
- /// internal static void MyFreeFunc (IntPtr data, IntPtr length){..}
- ///
- ///
- public sealed class MonoPInvokeCallbackAttribute : Attribute
- {
- ///
- /// Use this constructor to annotate the type of the callback function that
- /// will be invoked from unmanaged code.
- ///
- /// T.
- public MonoPInvokeCallbackAttribute(Type t) { }
- }
-
-}
+ _handle = handle;
+ }
+
+ public Tensor(Operation op, int value_index, TF_DataType dtype)
+ {
+ _op = op;
+ _value_index = value_index;
+ _dtype = dtype;
+ _id = ops.uid();
+ }
+
+
+ ///
+ /// Creates a new tensor from the given array without copying memory. The array is pinned down and the pointer passed on.
+ ///
+ /// Represents the tensor shape.
+ /// The linear array of data, the data must fit in the tensor with the specified dimensions.
+ /// The number of bytes in memory of a single array element
+ ///
+ /// Use the FromBuffer method to create a tensor that has the specified dimensions
+ /// and is initialized with data from the data array. The data is copied starting
+ /// at the start offset, for count bytes and is laid out into the tensor following the
+ /// specified dimensions.
+ ///
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int element_size)
+ {
+ if (dt == TF_DataType.TF_STRING && data is byte[])
+ {
+ var buffer = (byte[])data;
+ var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
+ var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
+
+ IntPtr tensor = c_api.TF_TensorData(handle);
+ Marshal.WriteInt64(tensor, 0);
+
+ var status = new Status();
+ fixed (byte* src = &buffer[0])
+ c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
+
+ status.Check(true);
+ return handle;
+ }
+ return CreateTensorWithoutCopying(dt, shape, data, 0, data.Length, element_size);
+ }
+
+ ///
+ /// Creates a new tensor from a subsection of the given array without copying memory. The array is pinned down and the pointer passed on.
+ ///
+ /// Represents the tensor shape.
+ /// The linear array of data, the data must fit in the tensor with the specified dimensions.
+ /// The offset into the provided data array where the data resides.
+ /// The number of elements to copy from data.
+ /// The number of bytes in memory of a single array element
+ ///
+ /// Use the FromBuffer method to create a tensor that has the specified dimensions
+ /// and is initialized with data from the data array. The data is copied starting
+ /// at the start offset, for count bytes and is laid out into the tensor following the
+ /// specified dimensions.
+ ///
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int start, int count, int element_size)
+ {
+ if (start < 0 || start > data.Length - count)
+ throw new ArgumentException($"Array length {data.Length} does not match the given shape {new Shape(shape.Cast().ToArray())}");
+
+ // get a handle to the pinned array which we will pass on to the tensor computation engine to use
+ var gcHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
+ _deallocatorArgs = new DeallocatorArgs() { gc_handle = GCHandle.ToIntPtr(gcHandle) };
+ if (shape == null || shape.Length == 0)
+ return TF_NewTensor(dt, new long[0], 0, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs);
+ else
+ return TF_NewTensor(dt, shape, shape.Length, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs);
+ }
+
+ [MonoPInvokeCallback(typeof(Deallocator))]
+ internal static void FreeHGlobalMemory(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
+ {
+ if (args.deallocator_called)
+ return;
+ Marshal.FreeHGlobal(dataPtr);
+ args.deallocator_called = true;
+ }
+
+ [MonoPInvokeCallback(typeof(Deallocator))]
+ internal static void FreeGCHandle(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
+ {
+ if (args.deallocator_called || args.gc_handle == IntPtr.Zero)
+ return;
+ // note: since the ptr given to tensorflow is just the addr of the pinned object we can not directly free it! we need to free the gcHandle instead
+ GCHandle.FromIntPtr(args.gc_handle).Free();
+ args.deallocator_called = true;
+ }
+
+ [MonoPInvokeCallback(typeof(Deallocator))]
+ internal static void FreeNothing(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
+ {
+ args.deallocator_called = true;
+ }
+
+ }
+
+ ///
+ /// This attribute can be applied to callback functions that will be invoked
+ /// from unmanaged code to managed code.
+ ///
+ ///
+ ///
+ /// [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))]
+ /// internal static void MyFreeFunc (IntPtr data, IntPtr length){..}
+ ///
+ ///
+ public sealed class MonoPInvokeCallbackAttribute : Attribute
+ {
+ ///
+ /// Use this constructor to annotate the type of the callback function that
+ /// will be invoked from unmanaged code.
+ ///
+ /// T.
+ public MonoPInvokeCallbackAttribute(Type t) { }
+ }
+
+}
diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs
index 0b9c7f3e..4bd32d74 100644
--- a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs
+++ b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs
@@ -15,6 +15,7 @@
******************************************************************************/
using System;
+using System.Linq;
using static Tensorflow.Python;
namespace Tensorflow
@@ -63,22 +64,56 @@ namespace Tensorflow
public static Tensor operator *(long constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor);
public static Tensor operator *(ulong constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor);
- public static Tensor operator /(Tensor x, Tensor y) => BinaryOpWrapper("truediv", x, y);
- public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y);
+ private static readonly TF_DataType[] _intTfDataTypes = {
+ TF_DataType.TF_INT8, TF_DataType.TF_INT16, TF_DataType.TF_INT32, TF_DataType.TF_INT64,
+ TF_DataType.TF_QINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QINT32,
+ TF_DataType.TF_UINT8, TF_DataType.TF_UINT16, TF_DataType.TF_UINT32, TF_DataType.TF_UINT64
+ };
+ public static Tensor operator /(double x, Tensor y) => BinaryOpWrapper("truediv", x, y);
public static Tensor operator /(float x, Tensor y) => BinaryOpWrapper("truediv", x, y);
+ public static Tensor operator /(int x, Tensor y) => BinaryOpWrapper("floordiv", x, y);
+ public static Tensor operator /(Tensor x, Tensor y) =>
+ _intTfDataTypes.Contains(x._dtype)
+ ? BinaryOpWrapper("floordiv", x, y)
+ : BinaryOpWrapper("truediv", x, y);
+ public static Tensor operator /(Tensor x, int y) => BinaryOpWrapper("floordiv", x, y);
+ public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y);
public static Tensor operator /(Tensor x, double y) => BinaryOpWrapper("truediv", x, y);
public static Tensor operator %(Tensor x, Tensor y) => BinaryOpWrapper("mod", x, y);
+ public static Tensor operator >(double x, Tensor y) => gen_math_ops.greater(x, y);
+ public static Tensor operator >(float x, Tensor y) => gen_math_ops.greater(x, y);
+ public static Tensor operator >(int x, Tensor y) => gen_math_ops.greater(x, y);
+ public static Tensor operator >(Tensor x, Tensor y) => gen_math_ops.greater(x, y);
public static Tensor operator >(Tensor x, int y) => gen_math_ops.greater(x, y);
- public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y);
public static Tensor operator >(Tensor x, float y) => gen_math_ops.greater(x, y);
public static Tensor operator >(Tensor x, double y) => gen_math_ops.greater(x, y);
+
+ public static Tensor operator <(double x, Tensor y) => gen_math_ops.less(x, y);
+ public static Tensor operator <(float x, Tensor y) => gen_math_ops.less(x, y);
+ public static Tensor operator <(int x, Tensor y) => gen_math_ops.less(x, y);
+ public static Tensor operator <(Tensor x, Tensor y) => gen_math_ops.less(x, y);
public static Tensor operator <(Tensor x, int y) => gen_math_ops.less(x, y);
- public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y);
public static Tensor operator <(Tensor x, float y) => gen_math_ops.less(x, y);
public static Tensor operator <(Tensor x, double y) => gen_math_ops.less(x, y);
+ public static Tensor operator >=(double x, Tensor y) => gen_math_ops.greater_equal(x, y);
+ public static Tensor operator >=(float x, Tensor y) => gen_math_ops.greater_equal(x, y);
+ public static Tensor operator >=(int x, Tensor y) => gen_math_ops.greater_equal(x, y);
+ public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y);
+ public static Tensor operator >=(Tensor x, int y) => gen_math_ops.greater_equal(x, y);
+ public static Tensor operator >=(Tensor x, float y) => gen_math_ops.greater_equal(x, y);
+ public static Tensor operator >=(Tensor x, double y) => gen_math_ops.greater_equal(x, y);
+
+ public static Tensor operator <=(int x, Tensor y) => gen_math_ops.less_equal(x, y);
+ public static Tensor operator <=(float x, Tensor y) => gen_math_ops.less_equal(x, y);
+ public static Tensor operator <=(double x, Tensor y) => gen_math_ops.less_equal(x, y);
+ public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y);
+ public static Tensor operator <=(Tensor x, int y) => gen_math_ops.less_equal(x, y);
+ public static Tensor operator <=(Tensor x, float y) => gen_math_ops.less_equal(x, y);
+ public static Tensor operator <=(Tensor x, double y) => gen_math_ops.less_equal(x, y);
+
private static Tensor BinaryOpWrapper(string name, Tx x, Ty y)
{
TF_DataType dtype = TF_DataType.DtInvalid;
@@ -88,7 +123,7 @@ namespace Tensorflow
dtype = tr.dtype.as_base_dtype();
var namescope = ops.name_scope(null, name, new { x, y });
- return with(namescope, scope =>
+ return tf_with(namescope, scope =>
{
Tensor result = null;
var x1 = ops.convert_to_tensor(x, dtype: dtype, name: "x");
@@ -99,6 +134,9 @@ namespace Tensorflow
case "add":
result = gen_math_ops.add(x1, y1, name: scope);
break;
+ case "floordiv":
+ result = gen_math_ops.floor_div(x1, y1, name: scope);
+ break;
case "truediv":
result = gen_math_ops.real_div(x1, y1, name: scope);
break;
diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs
index aebca212..50141be6 100644
--- a/src/TensorFlowNET.Core/Tensors/Tensor.cs
+++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs
@@ -19,6 +19,7 @@ using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
+using System.Text;
using Tensorflow.Framework;
using static Tensorflow.Python;
@@ -28,10 +29,8 @@ namespace Tensorflow
/// A tensor is a generalization of vectors and matrices to potentially higher dimensions.
/// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes.
///
- public partial class Tensor : IDisposable, ITensorOrOperation, _TensorLike
+ public partial class Tensor : DisposableObject, ITensorOrOperation, _TensorLike
{
- private IntPtr _handle;
-
private int _id;
private Operation _op;
@@ -48,8 +47,6 @@ namespace Tensorflow
private int _value_index;
public int value_index => _value_index;
- private Status status = new Status();
-
private TF_DataType _dtype = TF_DataType.DtInvalid;
public TF_DataType dtype => _handle == IntPtr.Zero ? _dtype : c_api.TF_TensorType(_handle);
@@ -76,6 +73,7 @@ namespace Tensorflow
if (_handle == IntPtr.Zero)
{
+ var status = new Status();
c_api.TF_GraphGetTensorShape(op.graph, _as_tf_output(), dims, rank, status);
status.Check();
}
@@ -90,6 +88,8 @@ namespace Tensorflow
set
{
+ var status = new Status();
+
if (value == null)
c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status);
else
@@ -131,8 +131,11 @@ namespace Tensorflow
{
if (_handle == IntPtr.Zero)
{
+ var status = new Status();
var output = _as_tf_output();
- return c_api.TF_GraphGetTensorNumDims(op.graph, output, status);
+ int ndim = c_api.TF_GraphGetTensorNumDims(op.graph, output, status);
+ status.Check();
+ return ndim;
}
else
{
@@ -184,6 +187,41 @@ namespace Tensorflow
return data;
}
+ public unsafe string[] StringData()
+ {
+ //
+ // TF_STRING tensors are encoded with a table of 8-byte offsets followed by TF_StringEncode-encoded bytes.
+ // [offset1, offset2,...,offsetn, s1size, s1bytes, s2size, s2bytes,...,snsize,snbytes]
+ //
+ long size = 1;
+ foreach (var s in TensorShape.Dimensions)
+ size *= s;
+
+ var buffer = new byte[size][];
+ var src = c_api.TF_TensorData(_handle);
+ var srcLen = (IntPtr)(src.ToInt64() + (long)bytesize);
+ src += (int)(size * 8);
+ for (int i = 0; i < buffer.Length; i++)
+ {
+ using (var status = new Status())
+ {
+ IntPtr dst = IntPtr.Zero;
+ UIntPtr dstLen = UIntPtr.Zero;
+ var read = c_api.TF_StringDecode((byte*)src, (UIntPtr)(srcLen.ToInt64() - src.ToInt64()), (byte**)&dst, &dstLen, status);
+ status.Check(true);
+ buffer[i] = new byte[(int)dstLen];
+ Marshal.Copy(dst, buffer[i], 0, buffer[i].Length);
+ src += (int)read;
+ }
+ }
+
+ var _str = new string[buffer.Length];
+ for (int i = 0; i < _str.Length; i++)
+ _str[i] = Encoding.UTF8.GetString(buffer[i]);
+
+ return _str;
+ }
+
public Tensor MaybeMove()
{
var tensor = c_api.TF_TensorMaybeMove(_handle);
@@ -262,7 +300,7 @@ namespace Tensorflow
index += 1;
}
- return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope =>
+ return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope =>
{
string name = scope;
if (begin != null)
@@ -311,7 +349,7 @@ namespace Tensorflow
index += 1;
}
- return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope =>
+ return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope =>
{
string name = scope;
if (begin != null)
@@ -354,26 +392,12 @@ namespace Tensorflow
return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}";
}
- public void Dispose()
+ protected override void DisposeUnManagedState(IntPtr handle)
{
- IntPtr h=IntPtr.Zero;
- lock (this)
+ if(handle != IntPtr.Zero)
{
- h = _handle;
- _handle=IntPtr.Zero;
+ c_api.TF_DeleteTensor(handle);
}
- if (h != IntPtr.Zero)
- c_api.TF_DeleteTensor(_handle);
- status.Dispose();
- GC.SuppressFinalize(this);
- }
-
- ///
- /// Dispose the tensor when it gets garbage collected
- ///
- ~Tensor()
- {
- Dispose();
}
public bool IsDisposed
diff --git a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs
index cf208ed2..6b20b34f 100644
--- a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs
+++ b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs
@@ -32,6 +32,12 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)]
public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, IntPtr dims, int num_dims, UIntPtr len);
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, ulong len);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, UIntPtr len);
+
///
/// returns the sizeof() for the underlying type corresponding to the given TF_DataType enum value.
///
@@ -150,5 +156,8 @@ namespace Tensorflow
///
[DllImport(TensorFlowLibName)]
public static extern ulong TF_StringDecode(IntPtr src, ulong src_len, IntPtr dst, ref ulong dst_len, IntPtr status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern unsafe UIntPtr TF_StringDecode(byte* src, UIntPtr src_len, byte** dst, UIntPtr* dst_len, IntPtr status);
}
}
diff --git a/src/TensorFlowNET.Core/Train/AdamOptimizer.cs b/src/TensorFlowNET.Core/Train/AdamOptimizer.cs
index c273f4d4..673e1307 100644
--- a/src/TensorFlowNET.Core/Train/AdamOptimizer.cs
+++ b/src/TensorFlowNET.Core/Train/AdamOptimizer.cs
@@ -81,7 +81,7 @@ namespace Tensorflow.Train
var m = get_slot(var, "m");
var m_scaled_g_values = grad * (1 - beta1_t);
var m_t = state_ops.assign(m, m * beta1_t, use_locking: _use_locking);
- with(ops.control_dependencies(new[] { m_t }), delegate
+ tf_with(ops.control_dependencies(new[] { m_t }), delegate
{
m_t = scatter_add(m, indices, m_scaled_g_values);
});
@@ -89,7 +89,7 @@ namespace Tensorflow.Train
var v = get_slot(var, "v");
var v_scaled_g_values = (grad * grad) * (1 - beta2_t);
var v_t = state_ops.assign(v, v * beta2_t, use_locking: _use_locking);
- with(ops.control_dependencies(new[] { v_t }), delegate
+ tf_with(ops.control_dependencies(new[] { v_t }), delegate
{
v_t = scatter_add(v, indices, v_scaled_g_values);
});
@@ -117,7 +117,7 @@ namespace Tensorflow.Train
var operations = new List();
operations.AddRange(update_ops);
- with(ops.control_dependencies(update_ops), delegate
+ tf_with(ops.control_dependencies(update_ops), delegate
{
var (beta1_power, beta2_power) = _get_beta_accumulators();
ops.colocate_with(beta1_power);
diff --git a/src/TensorFlowNET.Core/Train/Optimizer.cs b/src/TensorFlowNET.Core/Train/Optimizer.cs
index 3e1d86c5..e945b120 100644
--- a/src/TensorFlowNET.Core/Train/Optimizer.cs
+++ b/src/TensorFlowNET.Core/Train/Optimizer.cs
@@ -151,7 +151,7 @@ namespace Tensorflow
_create_slots(var_list);
var update_ops = new List();
- return with(ops.name_scope(name, Name), scope =>
+ return tf_with(ops.name_scope(name, Name), scope =>
{
name = scope;
_prepare();
@@ -162,7 +162,7 @@ namespace Tensorflow
continue;
var scope_name = var.op.name;
- with(ops.name_scope("update_" + scope_name), scope2 =>
+ tf_with(ops.name_scope("update_" + scope_name), scope2 =>
{
var op = processor.update_op(this, grad);
update_ops.Add(op);
@@ -176,7 +176,7 @@ namespace Tensorflow
}
else
{
- with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep =>
+ tf_with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep =>
{
ops.colocate_with(global_step);
// TODO: port this if branch once ResourceVariable has been ported!
diff --git a/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs b/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs
index 9deca740..95775a72 100644
--- a/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs
+++ b/src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs
@@ -102,7 +102,7 @@ namespace Tensorflow
Tensor save_tensor = null;
Operation restore_op = null;
- return with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope =>
+ return tf_with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope =>
{
name = scope;
diff --git a/src/TensorFlowNET.Core/Train/SlotCreator.cs b/src/TensorFlowNET.Core/Train/SlotCreator.cs
index aaa2c171..57582355 100644
--- a/src/TensorFlowNET.Core/Train/SlotCreator.cs
+++ b/src/TensorFlowNET.Core/Train/SlotCreator.cs
@@ -57,7 +57,7 @@ namespace Tensorflow.Train
{
var validate_shape = shape.is_fully_defined();
var prefix = primary.op.name;
- return with(new variable_scope(string.Empty, prefix + "/" + name), delegate
+ return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate
{
return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype);
});
diff --git a/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs b/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs
index 8c2c1204..5adf5d9a 100644
--- a/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs
+++ b/src/TensorFlowNET.Core/Variables/RefVariable.Operators.cs
@@ -32,7 +32,7 @@ namespace Tensorflow
private static Tensor op_helper(string default_name, RefVariable x, T y)
{
var tensor1 = x.value();
- return with(ops.name_scope(null, default_name, new { tensor1, y }), scope => {
+ return tf_with(ops.name_scope(null, default_name, new { tensor1, y }), scope => {
var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y");
return gen_math_ops.add(tensor1, tensor2, scope);
});
diff --git a/src/TensorFlowNET.Core/Variables/RefVariable.cs b/src/TensorFlowNET.Core/Variables/RefVariable.cs
index 78a241c2..463ba2d0 100644
--- a/src/TensorFlowNET.Core/Variables/RefVariable.cs
+++ b/src/TensorFlowNET.Core/Variables/RefVariable.cs
@@ -134,7 +134,7 @@ namespace Tensorflow
ops.init_scope();
var values = init_from_fn ? new object[0] : new object[] { initial_value };
- with(ops.name_scope(name, "Variable", values), scope =>
+ tf_with(ops.name_scope(name, "Variable", values), scope =>
{
name = scope;
if (init_from_fn)
@@ -148,7 +148,7 @@ namespace Tensorflow
List = new AttrValue.Types.ListValue()
};
attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}"));
- with(ops.name_scope("Initializer"), scope2 =>
+ tf_with(ops.name_scope("Initializer"), scope2 =>
{
_initial_value = (initial_value as Func)();
_initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype);
diff --git a/src/TensorFlowNET.Core/Variables/VariableScope.cs b/src/TensorFlowNET.Core/Variables/VariableScope.cs
index d509d6b2..778e59b1 100644
--- a/src/TensorFlowNET.Core/Variables/VariableScope.cs
+++ b/src/TensorFlowNET.Core/Variables/VariableScope.cs
@@ -56,7 +56,7 @@ namespace Tensorflow
VariableAggregation aggregation= VariableAggregation.None)
{
string full_name = !string.IsNullOrEmpty(this.name) ? this.name + "/" + name : name;
- return with(ops.name_scope(null), scope =>
+ return tf_with(ops.name_scope(null), scope =>
{
if (dtype == TF_DataType.DtInvalid)
dtype = _dtype;
diff --git a/src/TensorFlowNET.Core/ops.py.cs b/src/TensorFlowNET.Core/ops.py.cs
index 8f7fce29..979e132e 100644
--- a/src/TensorFlowNET.Core/ops.py.cs
+++ b/src/TensorFlowNET.Core/ops.py.cs
@@ -295,7 +295,7 @@ namespace Tensorflow
// inner_device_stack = default_graph._device_function_stack
// var outer_context = default_graph.as_default;
- with(ops.control_dependencies(null), delegate
+ tf_with(ops.control_dependencies(null), delegate
{
var outer_graph = get_default_graph();
// outer_device_stack = None
diff --git a/tensorflowlib/README.md b/tensorflowlib/README.md
index 63cba815..318e5dc9 100644
--- a/tensorflowlib/README.md
+++ b/tensorflowlib/README.md
@@ -16,6 +16,8 @@ Here are some pre-built TensorFlow binaries you can use for each platform:
- CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip
- GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip
+
+
### Run in Linux
`Install-Package TensorFlow.NET`
@@ -31,10 +33,21 @@ sudo apt install libgdiplus
More information about [System.Drawing on Linux]().
+
+
### Run in Mac OS
-### GPU Tensorflow for windows
-Before running verify you installed CUDA and cuDNN
+
+
+### Tensorflow GPU for Windows
+
+Before running verify you installed CUDA and cuDNN (TensorFlow v1.14 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible.
+
+```powershell
+PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
+```
+
+
### Build from source for Windows
diff --git a/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs b/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs
index 44e5c7a7..38c650a3 100644
--- a/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs
+++ b/test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs
@@ -77,7 +77,7 @@ let run()=
let init = tf.global_variables_initializer()
- Tensorflow.Python.``with``(tf.Session(), fun (sess:Session) ->
+ Tensorflow.Python.``tf_with``(tf.Session(), fun (sess:Session) ->
sess.run(init) |> ignore
// Loop over epochs
for epoch in [0..training_epochs] do
diff --git a/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs b/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs
index c0ca95b3..7bacb28d 100644
--- a/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs
+++ b/test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs
@@ -18,7 +18,7 @@ using NumSharp;
using System;
using System.Diagnostics;
using Tensorflow;
-using TensorFlowNET.Examples.Utility;
+using Tensorflow.Hub;
using static Tensorflow.Python;
namespace TensorFlowNET.Examples
@@ -39,7 +39,7 @@ namespace TensorFlowNET.Examples
public int? test_size = null;
public int batch_size = 1024; // The number of samples per batch
- Datasets mnist;
+ Datasets mnist;
NDArray full_data_x;
int num_steps = 20; // Total steps to train
int k = 25; // The number of clusters
@@ -52,29 +52,41 @@ namespace TensorFlowNET.Examples
{
PrepareData();
var graph = ImportGraph();
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
Train(sess);
- });
+ }
return accuray_test > 0.70;
}
public void PrepareData()
{
- mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size:validation_size, test_size:test_size);
- full_data_x = mnist.train.data;
+ var loader = new MnistModelLoader();
+
+ var setting = new ModelLoadSetting
+ {
+ TrainDir = ".resources/mnist",
+ OneHot = true,
+ TrainSize = train_size,
+ ValidationSize = validation_size,
+ TestSize = test_size
+ };
+
+ mnist = loader.LoadAsync(setting).Result;
+
+ full_data_x = mnist.Train.Data;
// download graph meta data
string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/kmeans.meta";
- Web.Download(url, "graph", "kmeans.meta");
+ loader.DownloadAsync(url, ".resources/graph", "kmeans.meta").Wait();
}
public Graph ImportGraph()
{
var graph = tf.Graph().as_default();
- tf.train.import_meta_graph("graph/kmeans.meta");
+ tf.train.import_meta_graph(".resources/graph/kmeans.meta");
return graph;
}
@@ -132,7 +144,7 @@ namespace TensorFlowNET.Examples
sw.Start();
foreach (var i in range(idx.Length))
{
- var x = mnist.train.labels[i];
+ var x = mnist.Train.Labels[i];
counts[idx[i]] += x;
}
@@ -153,7 +165,7 @@ namespace TensorFlowNET.Examples
var accuracy_op = tf.reduce_mean(cast);
// Test Model
- var (test_x, test_y) = (mnist.test.data, mnist.test.labels);
+ var (test_x, test_y) = (mnist.Test.Data, mnist.Test.Labels);
result = sess.run(accuracy_op, new FeedItem(X, test_x), new FeedItem(Y, test_y));
accuray_test = result;
print($"Test Accuracy: {accuray_test}");
diff --git a/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs b/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs
index f7baef1d..0098404d 100644
--- a/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs
+++ b/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs
@@ -71,7 +71,7 @@ namespace TensorFlowNET.Examples
var init = tf.global_variables_initializer();
// Start training
- return with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
// Run the initializer
sess.run(init);
@@ -114,7 +114,7 @@ namespace TensorFlowNET.Examples
Console.WriteLine($"Absolute mean square loss difference: {diff}");
return diff < 0.01;
- });
+ }
}
public void PrepareData()
diff --git a/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs b/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs
index 185dd1fe..ca691d40 100644
--- a/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs
+++ b/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs
@@ -19,7 +19,7 @@ using System;
using System.Diagnostics;
using System.IO;
using Tensorflow;
-using TensorFlowNET.Examples.Utility;
+using Tensorflow.Hub;
using static Tensorflow.Python;
namespace TensorFlowNET.Examples
@@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples
private float learning_rate = 0.01f;
private int display_step = 1;
- Datasets mnist;
+ Datasets mnist;
public bool Run()
{
@@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples
var sw = new Stopwatch();
- return with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
// Run the initializer
sess.run(init);
@@ -84,11 +84,11 @@ namespace TensorFlowNET.Examples
sw.Start();
var avg_cost = 0.0f;
- var total_batch = mnist.train.num_examples / batch_size;
+ var total_batch = mnist.Train.NumOfExamples / batch_size;
// Loop over all batches
foreach (var i in range(total_batch))
{
- var (batch_xs, batch_ys) = mnist.train.next_batch(batch_size);
+ var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(batch_size);
// Run optimization op (backprop) and cost op (to get loss value)
var result = sess.run(new object[] { optimizer, cost },
new FeedItem(x, batch_xs),
@@ -115,32 +115,32 @@ namespace TensorFlowNET.Examples
var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1));
// Calculate accuracy
var accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32));
- float acc = accuracy.eval(new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels));
+ float acc = accuracy.eval(new FeedItem(x, mnist.Test.Data), new FeedItem(y, mnist.Test.Labels));
print($"Accuracy: {acc.ToString("F4")}");
return acc > 0.9;
- });
+ }
}
public void PrepareData()
{
- mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size: validation_size, test_size: test_size);
+ mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: train_size, validationSize: validation_size, testSize: test_size).Result;
}
public void SaveModel(Session sess)
{
var saver = tf.train.Saver();
- var save_path = saver.save(sess, "logistic_regression/model.ckpt");
- tf.train.write_graph(sess.graph, "logistic_regression", "model.pbtxt", as_text: true);
+ var save_path = saver.save(sess, ".resources/logistic_regression/model.ckpt");
+ tf.train.write_graph(sess.graph, ".resources/logistic_regression", "model.pbtxt", as_text: true);
- FreezeGraph.freeze_graph(input_graph: "logistic_regression/model.pbtxt",
+ FreezeGraph.freeze_graph(input_graph: ".resources/logistic_regression/model.pbtxt",
input_saver: "",
input_binary: false,
- input_checkpoint: "logistic_regression/model.ckpt",
+ input_checkpoint: ".resources/logistic_regression/model.ckpt",
output_node_names: "Softmax",
restore_op_name: "save/restore_all",
filename_tensor_name: "save/Const:0",
- output_graph: "logistic_regression/model.pb",
+ output_graph: ".resources/logistic_regression/model.pb",
clear_devices: true,
initializer_nodes: "");
}
@@ -148,7 +148,7 @@ namespace TensorFlowNET.Examples
public void Predict(Session sess)
{
var graph = new Graph().as_default();
- graph.Import(Path.Join("logistic_regression", "model.pb"));
+ graph.Import(Path.Join(".resources/logistic_regression", "model.pb"));
// restoring the model
// var saver = tf.train.import_meta_graph("logistic_regression/tensorflowModel.ckpt.meta");
@@ -159,7 +159,7 @@ namespace TensorFlowNET.Examples
var input = x.outputs[0];
// predict
- var (batch_xs, batch_ys) = mnist.train.next_batch(10);
+ var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(10);
var results = sess.run(output, new FeedItem(input, batch_xs[np.arange(1)]));
if (results.argmax() == (batch_ys[0] as NDArray).argmax())
diff --git a/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs b/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs
index d77c6902..358a3301 100644
--- a/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs
+++ b/test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs
@@ -48,14 +48,14 @@ namespace TensorFlowNET.Examples
float y_max = X.amax(0).Data(1) + 0.5f;
var (xx, yy) = np.meshgrid(np.linspace(x_min, x_max, 30), np.linspace(y_min, y_max, 30));
- with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
//var samples = np.vstack(xx.ravel(), yy.ravel());
//samples = np.transpose(samples);
var array = np.Load(Path.Join("nb", "nb_example.npy"));
var samples = np.array(array).astype(np.float32);
var Z = sess.run(predict(samples));
- });
+ }
return true;
}
diff --git a/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs b/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs
index 86ecd281..8f761d00 100644
--- a/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs
+++ b/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs
@@ -17,7 +17,7 @@
using NumSharp;
using System;
using Tensorflow;
-using TensorFlowNET.Examples.Utility;
+using Tensorflow.Hub;
using static Tensorflow.Python;
namespace TensorFlowNET.Examples
@@ -31,7 +31,7 @@ namespace TensorFlowNET.Examples
{
public bool Enabled { get; set; } = true;
public string Name => "Nearest Neighbor";
- Datasets mnist;
+ Datasets mnist;
NDArray Xtr, Ytr, Xte, Yte;
public int? TrainSize = null;
public int ValidationSize = 5000;
@@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples
float accuracy = 0f;
// Initialize the variables (i.e. assign their default value)
var init = tf.global_variables_initializer();
- with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
// Run the initializer
sess.run(init);
@@ -77,17 +77,17 @@ namespace TensorFlowNET.Examples
}
print($"Accuracy: {accuracy}");
- });
+ }
return accuracy > 0.8;
}
public void PrepareData()
{
- mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: TrainSize, validation_size:ValidationSize, test_size:TestSize);
+ mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: TrainSize, validationSize: ValidationSize, testSize: TestSize).Result;
// In this example, we limit mnist data
- (Xtr, Ytr) = mnist.train.next_batch(TrainSize==null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates)
- (Xte, Yte) = mnist.test.next_batch(TestSize==null ? 200 : TestSize.Value / 100); // 200 for testing
+ (Xtr, Ytr) = mnist.Train.GetNextBatch(TrainSize == null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates)
+ (Xte, Yte) = mnist.Test.GetNextBatch(TestSize == null ? 200 : TestSize.Value / 100); // 200 for testing
}
public Graph ImportGraph()
diff --git a/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs b/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs
index a4a2901c..12687e3f 100644
--- a/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs
+++ b/test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs
@@ -90,7 +90,7 @@ namespace TensorFlowNET.Examples
var init = tf.global_variables_initializer();
float loss_value = 0;
// Start tf session
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
sess.run(init);
var step = 0;
@@ -110,7 +110,7 @@ namespace TensorFlowNET.Examples
Console.WriteLine($"Step {step} loss: {loss_value}");
}
Console.WriteLine($"Final loss: {loss_value}");
- });
+ }
return loss_value;
}
@@ -128,7 +128,7 @@ namespace TensorFlowNET.Examples
float loss_value = 0;
// Start tf session
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
sess.run(init);
var step = 0;
@@ -143,7 +143,7 @@ namespace TensorFlowNET.Examples
Console.WriteLine($"Step {step} loss: {loss_value}");
}
Console.WriteLine($"Final loss: {loss_value}");
- });
+ }
return loss_value;
}
diff --git a/test/TensorFlowNET.Examples/BasicOperations.cs b/test/TensorFlowNET.Examples/BasicOperations.cs
index 5fd52e2d..c7314abe 100644
--- a/test/TensorFlowNET.Examples/BasicOperations.cs
+++ b/test/TensorFlowNET.Examples/BasicOperations.cs
@@ -134,7 +134,7 @@ namespace TensorFlowNET.Examples
3, 3, 2));
var batchMul = tf.batch_matmul(firstTensor, secondTensor);
var checkTensor = np.array(0, 6, 0, 15, 0, 24, 3, 1, 6, 4, 9, 7, 6, 0, 15, 0, 24, 0);
- return with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
var result = sess.run(batchMul);
Console.WriteLine(result.ToString());
@@ -152,7 +152,7 @@ namespace TensorFlowNET.Examples
// [24, 0]]])
return np.reshape(result, 18)
.array_equal(checkTensor);
- });
+ }
}
public void PrepareData()
diff --git a/test/TensorFlowNET.Examples/HelloWorld.cs b/test/TensorFlowNET.Examples/HelloWorld.cs
index e9c91336..52e47e3d 100644
--- a/test/TensorFlowNET.Examples/HelloWorld.cs
+++ b/test/TensorFlowNET.Examples/HelloWorld.cs
@@ -25,13 +25,13 @@ namespace TensorFlowNET.Examples
var hello = tf.constant(str);
// Start tf session
- return with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
// Run the op
var result = sess.run(hello);
Console.WriteLine(result.ToString());
return result.ToString().Equals(str);
- });
+ }
}
public void PrepareData()
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/CIFAR10-CNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/CIFAR10-CNN.cs
new file mode 100644
index 00000000..a77a5b00
--- /dev/null
+++ b/test/TensorFlowNET.Examples/ImageProcessing/CIFAR10-CNN.cs
@@ -0,0 +1,74 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow;
+using TensorFlowDatasets;
+using static Tensorflow.Python;
+
+namespace TensorFlowNET.Examples
+{
+ ///
+ /// https://www.tensorflow.org/tutorials/images/deep_cnn
+ ///
+ public class CIFAR10_CNN : IExample
+ {
+ public bool Enabled { get; set; } = true;
+ public bool IsImportingGraph { get; set; } = false;
+
+ public string Name => "CIFAR-10 CNN";
+
+ public bool Run()
+ {
+ PrepareData();
+
+ return true;
+ }
+
+ public Graph BuildGraph()
+ {
+ throw new NotImplementedException();
+ }
+
+ public Graph ImportGraph()
+ {
+ throw new NotImplementedException();
+ }
+
+ public void Predict(Session sess)
+ {
+ throw new NotImplementedException();
+ }
+
+ public void PrepareData()
+ {
+ var tfds = new DatasetBuilder();
+ tfds.download_and_prepare();
+ }
+
+ public void Test(Session sess)
+ {
+ throw new NotImplementedException();
+ }
+
+ public void Train(Session sess)
+ {
+ throw new NotImplementedException();
+ }
+ }
+}
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs
index 2dc355c4..4b882a1a 100644
--- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs
@@ -16,11 +16,12 @@
using NumSharp;
using System;
+using System.Diagnostics;
using Tensorflow;
-using TensorFlowNET.Examples.Utility;
+using Tensorflow.Hub;
using static Tensorflow.Python;
-namespace TensorFlowNET.Examples.ImageProcess
+namespace TensorFlowNET.Examples
{
///
/// Convolutional Neural Network classifier for Hand Written Digits
@@ -45,7 +46,7 @@ namespace TensorFlowNET.Examples.ImageProcess
int epochs = 5; // accuracy > 98%
int batch_size = 100;
float learning_rate = 0.001f;
- Datasets mnist;
+ Datasets mnist;
// Network configuration
// 1st Convolutional Layer
@@ -78,11 +79,11 @@ namespace TensorFlowNET.Examples.ImageProcess
PrepareData();
BuildGraph();
- with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
Train(sess);
Test(sess);
- });
+ }
return loss_test < 0.05 && accuracy_test > 0.98;
}
@@ -91,7 +92,7 @@ namespace TensorFlowNET.Examples.ImageProcess
{
var graph = new Graph().as_default();
- with(tf.name_scope("Input"), delegate
+ tf_with(tf.name_scope("Input"), delegate
{
// Placeholders for inputs (x) and outputs(y)
x = tf.placeholder(tf.float32, shape: (-1, img_h, img_w, n_channels), name: "X");
@@ -106,25 +107,25 @@ namespace TensorFlowNET.Examples.ImageProcess
var fc1 = fc_layer(layer_flat, h1, "FC1", use_relu: true);
var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false);
- with(tf.variable_scope("Train"), delegate
+ tf_with(tf.variable_scope("Train"), delegate
{
- with(tf.variable_scope("Loss"), delegate
+ tf_with(tf.variable_scope("Loss"), delegate
{
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits), name: "loss");
});
- with(tf.variable_scope("Optimizer"), delegate
+ tf_with(tf.variable_scope("Optimizer"), delegate
{
optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss);
});
- with(tf.variable_scope("Accuracy"), delegate
+ tf_with(tf.variable_scope("Accuracy"), delegate
{
var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred");
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy");
});
- with(tf.variable_scope("Prediction"), delegate
+ tf_with(tf.variable_scope("Prediction"), delegate
{
cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions");
});
@@ -144,6 +145,8 @@ namespace TensorFlowNET.Examples.ImageProcess
float loss_val = 100.0f;
float accuracy_val = 0f;
+ var sw = new Stopwatch();
+ sw.Start();
foreach (var epoch in range(epochs))
{
print($"Training epoch: {epoch + 1}");
@@ -165,7 +168,8 @@ namespace TensorFlowNET.Examples.ImageProcess
var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
loss_val = result[0];
accuracy_val = result[1];
- print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}");
+ print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")} {sw.ElapsedMilliseconds}ms");
+ sw.Restart();
}
}
@@ -200,7 +204,7 @@ namespace TensorFlowNET.Examples.ImageProcess
/// The output array
private Tensor conv_layer(Tensor x, int filter_size, int num_filters, int stride, string name)
{
- return with(tf.variable_scope(name), delegate {
+ return tf_with(tf.variable_scope(name), delegate {
var num_in_channel = x.shape[x.NDims - 1];
var shape = new[] { filter_size, filter_size, num_in_channel, num_filters };
@@ -240,7 +244,7 @@ namespace TensorFlowNET.Examples.ImageProcess
/// flattened array
private Tensor flatten_layer(Tensor layer)
{
- return with(tf.variable_scope("Flatten_layer"), delegate
+ return tf_with(tf.variable_scope("Flatten_layer"), delegate
{
var layer_shape = layer.TensorShape;
var num_features = layer_shape[new Slice(1, 4)].Size;
@@ -289,7 +293,7 @@ namespace TensorFlowNET.Examples.ImageProcess
/// The output array
private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true)
{
- return with(tf.variable_scope(name), delegate
+ return tf_with(tf.variable_scope(name), delegate
{
var in_dim = x.shape[1];
@@ -306,14 +310,14 @@ namespace TensorFlowNET.Examples.ImageProcess
public void PrepareData()
{
- mnist = MNIST.read_data_sets("mnist", one_hot: true);
- (x_train, y_train) = Reformat(mnist.train.data, mnist.train.labels);
- (x_valid, y_valid) = Reformat(mnist.validation.data, mnist.validation.labels);
- (x_test, y_test) = Reformat(mnist.test.data, mnist.test.labels);
+ mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result;
+ (x_train, y_train) = Reformat(mnist.Train.Data, mnist.Train.Labels);
+ (x_valid, y_valid) = Reformat(mnist.Validation.Data, mnist.Validation.Labels);
+ (x_test, y_test) = Reformat(mnist.Test.Data, mnist.Test.Labels);
print("Size of:");
- print($"- Training-set:\t\t{len(mnist.train.data)}");
- print($"- Validation-set:\t{len(mnist.validation.data)}");
+ print($"- Training-set:\t\t{len(mnist.Train.Data)}");
+ print($"- Validation-set:\t{len(mnist.Validation.Data)}");
}
///
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs
index 09fdc818..02feecb9 100644
--- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs
@@ -17,10 +17,10 @@
using NumSharp;
using System;
using Tensorflow;
-using TensorFlowNET.Examples.Utility;
+using Tensorflow.Hub;
using static Tensorflow.Python;
-namespace TensorFlowNET.Examples.ImageProcess
+namespace TensorFlowNET.Examples
{
///
/// Neural Network classifier for Hand Written Digits
@@ -44,7 +44,7 @@ namespace TensorFlowNET.Examples.ImageProcess
int batch_size = 100;
float learning_rate = 0.001f;
int h1 = 200; // number of nodes in the 1st hidden layer
- Datasets mnist;
+ Datasets mnist;
Tensor x, y;
Tensor loss, accuracy;
@@ -59,11 +59,11 @@ namespace TensorFlowNET.Examples.ImageProcess
PrepareData();
BuildGraph();
- with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
Train(sess);
Test(sess);
- });
+ };
return loss_test < 0.09 && accuracy_test > 0.95;
}
@@ -121,13 +121,13 @@ namespace TensorFlowNET.Examples.ImageProcess
public void PrepareData()
{
- mnist = MNIST.read_data_sets("mnist", one_hot: true);
+ mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result;
}
public void Train(Session sess)
{
// Number of training iterations in each epoch
- var num_tr_iter = mnist.train.labels.len / batch_size;
+ var num_tr_iter = mnist.Train.Labels.len / batch_size;
var init = tf.global_variables_initializer();
sess.run(init);
@@ -139,13 +139,13 @@ namespace TensorFlowNET.Examples.ImageProcess
{
print($"Training epoch: {epoch + 1}");
// Randomly shuffle the training data at the beginning of each epoch
- var (x_train, y_train) = randomize(mnist.train.data, mnist.train.labels);
+ var (x_train, y_train) = mnist.Randomize(mnist.Train.Data, mnist.Train.Labels);
foreach (var iteration in range(num_tr_iter))
{
var start = iteration * batch_size;
var end = (iteration + 1) * batch_size;
- var (x_batch, y_batch) = get_next_batch(x_train, y_train, start, end);
+ var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end);
// Run optimization op (backprop)
sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
@@ -161,7 +161,8 @@ namespace TensorFlowNET.Examples.ImageProcess
}
// Run validation after every epoch
- var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.validation.data), new FeedItem(y, mnist.validation.labels));
+ var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.Validation.Data), new FeedItem(y, mnist.Validation.Labels));
+
loss_val = results1[0];
accuracy_val = results1[1];
print("---------------------------------------------------------");
@@ -172,35 +173,12 @@ namespace TensorFlowNET.Examples.ImageProcess
public void Test(Session sess)
{
- var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels));
+ var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.Test.Data), new FeedItem(y, mnist.Test.Labels));
loss_test = result[0];
accuracy_test = result[1];
print("---------------------------------------------------------");
print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}");
print("---------------------------------------------------------");
}
-
- private (NDArray, NDArray) randomize(NDArray x, NDArray y)
- {
- var perm = np.random.permutation(y.shape[0]);
-
- np.random.shuffle(perm);
- return (mnist.train.data[perm], mnist.train.labels[perm]);
- }
-
- ///
- /// selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method)
- ///
- ///
- ///
- ///
- ///
- ///
- private (NDArray, NDArray) get_next_batch(NDArray x, NDArray y, int start, int end)
- {
- var x_batch = x[$"{start}:{end}"];
- var y_batch = y[$"{start}:{end}"];
- return (x_batch, y_batch);
- }
}
}
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs
index d51ca9ad..b91a19ca 100644
--- a/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs
@@ -17,10 +17,10 @@
using NumSharp;
using System;
using Tensorflow;
-using TensorFlowNET.Examples.Utility;
+using Tensorflow.Hub;
using static Tensorflow.Python;
-namespace TensorFlowNET.Examples.ImageProcess
+namespace TensorFlowNET.Examples
{
///
/// Recurrent Neural Network for handwritten digits MNIST.
@@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples.ImageProcess
int n_inputs = 28;
int n_outputs = 10;
- Datasets mnist;
+ Datasets mnist;
Tensor x, y;
Tensor loss, accuracy, cls_prediction;
@@ -64,11 +64,11 @@ namespace TensorFlowNET.Examples.ImageProcess
PrepareData();
BuildGraph();
- with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
Train(sess);
Test(sess);
- });
+ }
return loss_test < 0.09 && accuracy_test > 0.95;
}
@@ -143,15 +143,15 @@ namespace TensorFlowNET.Examples.ImageProcess
public void PrepareData()
{
- mnist = MNIST.read_data_sets("mnist", one_hot: true);
- (x_train, y_train) = (mnist.train.data, mnist.train.labels);
- (x_valid, y_valid) = (mnist.validation.data, mnist.validation.labels);
- (x_test, y_test) = (mnist.test.data, mnist.test.labels);
+ mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result;
+ (x_train, y_train) = (mnist.Train.Data, mnist.Train.Labels);
+ (x_valid, y_valid) = (mnist.Validation.Data, mnist.Validation.Labels);
+ (x_test, y_test) = (mnist.Test.Data, mnist.Test.Labels);
print("Size of:");
- print($"- Training-set:\t\t{len(mnist.train.data)}");
- print($"- Validation-set:\t{len(mnist.validation.data)}");
- print($"- Test-set:\t\t{len(mnist.test.data)}");
+ print($"- Training-set:\t\t{len(mnist.Train.Data)}");
+ print($"- Validation-set:\t{len(mnist.Validation.Data)}");
+ print($"- Test-set:\t\t{len(mnist.Test.Data)}");
}
public Graph ImportGraph() => throw new NotImplementedException();
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs
index c43c853a..8eed577b 100644
--- a/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs
@@ -4,7 +4,7 @@ using Tensorflow;
using TensorFlowNET.Examples.Utility;
using static Tensorflow.Python;
-namespace TensorFlowNET.Examples.ImageProcess
+namespace TensorFlowNET.Examples
{
///
/// This example removes the background from an input image.
@@ -32,11 +32,11 @@ namespace TensorFlowNET.Examples.ImageProcess
Tensor output = graph.OperationByName("SemanticPredictions");
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
// Runs inference on a single image.
sess.run(output, new FeedItem(output, "[np.asarray(resized_image)]"));
- });
+ }
return false;
}
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs
index a0f61029..efcb0b73 100644
--- a/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs
@@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples
var result_labels = new List();
var sw = new Stopwatch();
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
foreach (var nd in file_ndarrays)
{
@@ -58,7 +58,7 @@ namespace TensorFlowNET.Examples
Console.WriteLine($"{labels[idx]} {results[idx]} in {sw.ElapsedMilliseconds}ms", Color.Tan);
result_labels.Add(labels[idx]);
}
- });
+ }
return result_labels.Contains("military uniform");
}
@@ -69,19 +69,19 @@ namespace TensorFlowNET.Examples
int input_mean = 117,
int input_std = 1)
{
- return with(tf.Graph().as_default(), graph =>
- {
- var file_reader = tf.read_file(file_name, "file_reader");
- var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg");
- var cast = tf.cast(decodeJpeg, tf.float32);
- var dims_expander = tf.expand_dims(cast, 0);
- var resize = tf.constant(new int[] { input_height, input_width });
- var bilinear = tf.image.resize_bilinear(dims_expander, resize);
- var sub = tf.subtract(bilinear, new float[] { input_mean });
- var normalized = tf.divide(sub, new float[] { input_std });
-
- return with(tf.Session(graph), sess => sess.run(normalized));
- });
+ var graph = tf.Graph().as_default();
+
+ var file_reader = tf.read_file(file_name, "file_reader");
+ var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg");
+ var cast = tf.cast(decodeJpeg, tf.float32);
+ var dims_expander = tf.expand_dims(cast, 0);
+ var resize = tf.constant(new int[] { input_height, input_width });
+ var bilinear = tf.image.resize_bilinear(dims_expander, resize);
+ var sub = tf.subtract(bilinear, new float[] { input_mean });
+ var normalized = tf.divide(sub, new float[] { input_std });
+
+ using (var sess = tf.Session(graph))
+ return sess.run(normalized);
}
public void PrepareData()
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs
index 2f6b9ab1..f51833d2 100644
--- a/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs
@@ -45,9 +45,12 @@ namespace TensorFlowNET.Examples
var input_operation = graph.get_operation_by_name(input_name);
var output_operation = graph.get_operation_by_name(output_name);
- var results = with(tf.Session(graph),
- sess => sess.run(output_operation.outputs[0],
- new FeedItem(input_operation.outputs[0], nd)));
+ NDArray results;
+ using (var sess = tf.Session(graph))
+ {
+ results = sess.run(output_operation.outputs[0],
+ new FeedItem(input_operation.outputs[0], nd));
+ }
results = np.squeeze(results);
@@ -69,19 +72,19 @@ namespace TensorFlowNET.Examples
int input_mean = 0,
int input_std = 255)
{
- return with(tf.Graph().as_default(), graph =>
- {
- var file_reader = tf.read_file(file_name, "file_reader");
- var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader");
- var caster = tf.cast(image_reader, tf.float32);
- var dims_expander = tf.expand_dims(caster, 0);
- var resize = tf.constant(new int[] { input_height, input_width });
- var bilinear = tf.image.resize_bilinear(dims_expander, resize);
- var sub = tf.subtract(bilinear, new float[] { input_mean });
- var normalized = tf.divide(sub, new float[] { input_std });
-
- return with(tf.Session(graph), sess => sess.run(normalized));
- });
+ var graph = tf.Graph().as_default();
+
+ var file_reader = tf.read_file(file_name, "file_reader");
+ var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader");
+ var caster = tf.cast(image_reader, tf.float32);
+ var dims_expander = tf.expand_dims(caster, 0);
+ var resize = tf.constant(new int[] { input_height, input_width });
+ var bilinear = tf.image.resize_bilinear(dims_expander, resize);
+ var sub = tf.subtract(bilinear, new float[] { input_mean });
+ var normalized = tf.divide(sub, new float[] { input_std });
+
+ using (var sess = tf.Session(graph))
+ return sess.run(normalized);
}
public void PrepareData()
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs b/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs
index f40be91f..8f8d0dd9 100644
--- a/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs
@@ -51,7 +51,8 @@ namespace TensorFlowNET.Examples
var graph = IsImportingGraph ? ImportGraph() : BuildGraph();
- with(tf.Session(graph), sess => Predict(sess));
+ using (var sess = tf.Session(graph))
+ Predict(sess);
return true;
}
@@ -101,14 +102,15 @@ namespace TensorFlowNET.Examples
private NDArray ReadTensorFromImageFile(string file_name)
{
- return with(tf.Graph().as_default(), graph =>
- {
- var file_reader = tf.read_file(file_name, "file_reader");
- var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg");
- var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8);
- var dims_expander = tf.expand_dims(casted, 0);
- return with(tf.Session(graph), sess => sess.run(dims_expander));
- });
+ var graph = tf.Graph().as_default();
+
+ var file_reader = tf.read_file(file_name, "file_reader");
+ var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg");
+ var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8);
+ var dims_expander = tf.expand_dims(casted, 0);
+
+ using (var sess = tf.Session(graph))
+ return sess.run(dims_expander);
}
private void buildOutputImage(NDArray[] resultArr)
diff --git a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs
index 4d3a858f..becd9f7e 100644
--- a/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs
@@ -25,7 +25,7 @@ using Tensorflow;
using TensorFlowNET.Examples.Utility;
using static Tensorflow.Python;
-namespace TensorFlowNET.Examples.ImageProcess
+namespace TensorFlowNET.Examples
{
///
/// In this tutorial, we will reuse the feature extraction capabilities from powerful image classifiers trained on ImageNet
@@ -83,19 +83,19 @@ namespace TensorFlowNET.Examples.ImageProcess
#region For debug purpose
// predict images
- Predict(null);
+ // Predict(null);
// load saved pb and test new images.
- Test(null);
+ // Test(null);
#endregion
var graph = IsImportingGraph ? ImportGraph() : BuildGraph();
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
Train(sess);
- });
+ }
return test_accuracy > 0.75f;
}
@@ -141,20 +141,18 @@ namespace TensorFlowNET.Examples.ImageProcess
Tensor evaluation_step = null;
Tensor prediction = null;
- with(eval_graph.as_default(), graph =>
- {
- // Add the new layer for exporting.
- var (_, _, bottleneck_input, ground_truth_input, final_tensor) =
- add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor,
- wants_quantization, is_training: false);
+ var graph = eval_graph.as_default();
+ // Add the new layer for exporting.
+ var (_, _, bottleneck_input, ground_truth_input, final_tensor) =
+ add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor,
+ wants_quantization, is_training: false);
- // Now we need to restore the values from the training graph to the eval
- // graph.
- tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME);
+ // Now we need to restore the values from the training graph to the eval
+ // graph.
+ tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME);
- (evaluation_step, prediction) = add_evaluation_step(final_tensor,
- ground_truth_input);
- });
+ (evaluation_step, prediction) = add_evaluation_step(final_tensor,
+ ground_truth_input);
return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input,
evaluation_step, prediction);
@@ -180,7 +178,7 @@ namespace TensorFlowNET.Examples.ImageProcess
Tensor bottleneck_tensor, bool quantize_layer, bool is_training)
{
var (batch_size, bottleneck_tensor_size) = (bottleneck_tensor.TensorShape.Dimensions[0], bottleneck_tensor.TensorShape.Dimensions[1]);
- with(tf.name_scope("input"), scope =>
+ tf_with(tf.name_scope("input"), scope =>
{
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
@@ -193,10 +191,10 @@ namespace TensorFlowNET.Examples.ImageProcess
// Organizing the following ops so they are easier to see in TensorBoard.
string layer_name = "final_retrain_ops";
Tensor logits = null;
- with(tf.name_scope(layer_name), scope =>
+ tf_with(tf.name_scope(layer_name), scope =>
{
RefVariable layer_weights = null;
- with(tf.name_scope("weights"), delegate
+ tf_with(tf.name_scope("weights"), delegate
{
var initial_value = tf.truncated_normal(new int[] { bottleneck_tensor_size, class_count }, stddev: 0.001f);
layer_weights = tf.Variable(initial_value, name: "final_weights");
@@ -204,13 +202,13 @@ namespace TensorFlowNET.Examples.ImageProcess
});
RefVariable layer_biases = null;
- with(tf.name_scope("biases"), delegate
+ tf_with(tf.name_scope("biases"), delegate
{
layer_biases = tf.Variable(tf.zeros(class_count), name: "final_biases");
variable_summaries(layer_biases);
});
- with(tf.name_scope("Wx_plus_b"), delegate
+ tf_with(tf.name_scope("Wx_plus_b"), delegate
{
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases;
tf.summary.histogram("pre_activations", logits);
@@ -239,7 +237,7 @@ namespace TensorFlowNET.Examples.ImageProcess
return (null, null, bottleneck_input, ground_truth_input, final_tensor);
Tensor cross_entropy_mean = null;
- with(tf.name_scope("cross_entropy"), delegate
+ tf_with(tf.name_scope("cross_entropy"), delegate
{
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
labels: ground_truth_input, logits: logits);
@@ -247,7 +245,7 @@ namespace TensorFlowNET.Examples.ImageProcess
tf.summary.scalar("cross_entropy", cross_entropy_mean);
- with(tf.name_scope("train"), delegate
+ tf_with(tf.name_scope("train"), delegate
{
var optimizer = tf.train.GradientDescentOptimizer(learning_rate);
train_step = optimizer.minimize(cross_entropy_mean);
@@ -259,12 +257,12 @@ namespace TensorFlowNET.Examples.ImageProcess
private void variable_summaries(RefVariable var)
{
- with(tf.name_scope("summaries"), delegate
+ tf_with(tf.name_scope("summaries"), delegate
{
var mean = tf.reduce_mean(var);
tf.summary.scalar("mean", mean);
Tensor stddev = null;
- with(tf.name_scope("stddev"), delegate
+ tf_with(tf.name_scope("stddev"), delegate
{
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)));
});
@@ -279,7 +277,7 @@ namespace TensorFlowNET.Examples.ImageProcess
{
var (height, width) = (299, 299);
- return with(tf.Graph().as_default(), graph =>
+ return tf_with(tf.Graph().as_default(), graph =>
{
tf.train.import_meta_graph("graph/InceptionV3.meta");
Tensor resized_input_tensor = graph.OperationByName("Placeholder"); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3));
@@ -350,15 +348,15 @@ namespace TensorFlowNET.Examples.ImageProcess
{
Tensor evaluation_step = null, correct_prediction = null, prediction = null;
- with(tf.name_scope("accuracy"), scope =>
+ tf_with(tf.name_scope("accuracy"), scope =>
{
- with(tf.name_scope("correct_prediction"), delegate
+ tf_with(tf.name_scope("correct_prediction"), delegate
{
prediction = tf.argmax(result_tensor, 1);
correct_prediction = tf.equal(prediction, ground_truth_tensor);
});
- with(tf.name_scope("accuracy"), delegate
+ tf_with(tf.name_scope("accuracy"), delegate
{
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32));
});
@@ -596,7 +594,7 @@ namespace TensorFlowNET.Examples.ImageProcess
create_module_graph();
// Add the new layer that we'll be training.
- with(graph.as_default(), delegate
+ tf_with(graph.as_default(), delegate
{
(train_step, cross_entropy, bottleneck_input,
ground_truth_input, final_tensor) = add_final_retrain_ops(
@@ -745,13 +743,13 @@ namespace TensorFlowNET.Examples.ImageProcess
Tensor input = graph.OperationByName("Placeholder");
Tensor output = graph.OperationByName("final_result");
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
var result = sess.run(output, new FeedItem(input, fileBytes));
var prob = np.squeeze(result);
var idx = np.argmax(prob);
print($"Prediction result: [{labels[idx]} {prob[idx][0]}] for {img_path}.");
- });
+ }
}
private NDArray ReadTensorFromImageFile(string file_name,
@@ -760,19 +758,19 @@ namespace TensorFlowNET.Examples.ImageProcess
int input_mean = 0,
int input_std = 255)
{
- return with(tf.Graph().as_default(), graph =>
- {
- var file_reader = tf.read_file(file_name, "file_reader");
- var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader");
- var caster = tf.cast(image_reader, tf.float32);
- var dims_expander = tf.expand_dims(caster, 0);
- var resize = tf.constant(new int[] { input_height, input_width });
- var bilinear = tf.image.resize_bilinear(dims_expander, resize);
- var sub = tf.subtract(bilinear, new float[] { input_mean });
- var normalized = tf.divide(sub, new float[] { input_std });
-
- return with(tf.Session(graph), sess => sess.run(normalized));
- });
+ var graph = tf.Graph().as_default();
+
+ var file_reader = tf.read_file(file_name, "file_reader");
+ var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader");
+ var caster = tf.cast(image_reader, tf.float32);
+ var dims_expander = tf.expand_dims(caster, 0);
+ var resize = tf.constant(new int[] { input_height, input_width });
+ var bilinear = tf.image.resize_bilinear(dims_expander, resize);
+ var sub = tf.subtract(bilinear, new float[] { input_mean });
+ var normalized = tf.divide(sub, new float[] { input_std });
+
+ using (var sess = tf.Session(graph))
+ return sess.run(normalized);
}
public void Test(Session sess_)
@@ -783,7 +781,7 @@ namespace TensorFlowNET.Examples.ImageProcess
var graph = Graph.ImportFromPB(output_graph);
var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding();
- with(tf.Session(graph), sess =>
+ tf_with(tf.Session(graph), sess =>
{
(test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor, resized_image_tensor,
diff --git a/test/TensorFlowNET.Examples/TensorFlowNET.Examples.GPU.csproj b/test/TensorFlowNET.Examples/TensorFlowNET.Examples.GPU.csproj
new file mode 100644
index 00000000..1bd3d530
--- /dev/null
+++ b/test/TensorFlowNET.Examples/TensorFlowNET.Examples.GPU.csproj
@@ -0,0 +1,24 @@
+
+
+
+ Exe
+ netcoreapp2.2
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj b/test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj
index 149bd549..f4e2340a 100644
--- a/test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj
+++ b/test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj
@@ -16,7 +16,9 @@
+
+
diff --git a/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs b/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs
index a2afe43d..9ec17f12 100644
--- a/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs
+++ b/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs
@@ -64,7 +64,9 @@ namespace TensorFlowNET.Examples
{
PrepareData();
var graph = IsImportingGraph ? ImportGraph() : BuildGraph();
- with(tf.Session(graph), sess => Train(sess));
+
+ using (var sess = tf.Session(graph))
+ Train(sess);
return max_accuracy > 0.9;
}
diff --git a/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs b/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs
index 2ec703ba..7e324c56 100644
--- a/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs
+++ b/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs
@@ -63,7 +63,7 @@ namespace TensorFlowNET.Examples.Text.NER
var init = tf.global_variables_initializer();
- with(tf.Session(), sess =>
+ using (var sess = tf.Session())
{
sess.run(init);
@@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples.Text.NER
loss_value = run_epoch(sess, train, dev, epoch);
print($"train loss: {loss_value}");
}
- });
+ }
return loss_value < 0.1;
}
diff --git a/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs b/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs
index ce6628e3..8ed87748 100644
--- a/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs
+++ b/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs
@@ -66,7 +66,7 @@ namespace TensorFlowNET.Examples
// Initialize the variables (i.e. assign their default value)
var init = tf.global_variables_initializer();
- with(tf.Session(graph), sess =>
+ using (var sess = tf.Session(graph))
{
// Run the initializer
sess.run(init);
@@ -112,7 +112,7 @@ namespace TensorFlowNET.Examples
}
}
}
- });
+ }
return average_loss < 100;
}
diff --git a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs
index 390c6040..75308b8c 100644
--- a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs
+++ b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs
@@ -26,7 +26,7 @@ namespace TensorFlowNET.Examples.Text
Tensor conv3 = null, conv4 = null, conv5 = null, conv6 = null;
Tensor h_pool = null;
- with(tf.name_scope("conv-maxpool-1"), delegate
+ tf_with(tf.name_scope("conv-maxpool-1"), delegate
{
var conv1 = tf.layers.conv2d(x_expanded,
filters: num_filters,
@@ -40,7 +40,7 @@ namespace TensorFlowNET.Examples.Text
pool1 = tf.transpose(pool1, new[] { 0, 1, 3, 2 });
});
- with(tf.name_scope("conv-maxpool-2"), delegate
+ tf_with(tf.name_scope("conv-maxpool-2"), delegate
{
var conv2 = tf.layers.conv2d(pool1,
filters: num_filters,
@@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples.Text
pool2 = tf.transpose(pool2, new[] { 0, 1, 3, 2 });
});
- with(tf.name_scope("conv-3"), delegate
+ tf_with(tf.name_scope("conv-3"), delegate
{
conv3 = tf.layers.conv2d(pool2,
filters: num_filters,
@@ -64,7 +64,7 @@ namespace TensorFlowNET.Examples.Text
conv3 = tf.transpose(conv3, new[] { 0, 1, 3, 2 });
});
- with(tf.name_scope("conv-4"), delegate
+ tf_with(tf.name_scope("conv-4"), delegate
{
conv4 = tf.layers.conv2d(conv3,
filters: num_filters,
@@ -74,7 +74,7 @@ namespace TensorFlowNET.Examples.Text
conv4 = tf.transpose(conv4, new[] { 0, 1, 3, 2 });
});
- with(tf.name_scope("conv-5"), delegate
+ tf_with(tf.name_scope("conv-5"), delegate
{
conv5 = tf.layers.conv2d(conv4,
filters: num_filters,
@@ -84,7 +84,7 @@ namespace TensorFlowNET.Examples.Text
conv5 = tf.transpose(conv5, new[] { 0, 1, 3, 2 });
});
- with(tf.name_scope("conv-maxpool-6"), delegate
+ tf_with(tf.name_scope("conv-maxpool-6"), delegate
{
conv6 = tf.layers.conv2d(conv5,
filters: num_filters,
@@ -105,7 +105,7 @@ namespace TensorFlowNET.Examples.Text
Tensor logits = null;
Tensor predictions = null;
- with(tf.name_scope("fc-1"), delegate
+ tf_with(tf.name_scope("fc-1"), delegate
{
fc1_out = tf.layers.dense(h_pool,
1024,
@@ -113,7 +113,7 @@ namespace TensorFlowNET.Examples.Text
kernel_initializer: kernel_initializer);
});
- with(tf.name_scope("fc-2"), delegate
+ tf_with(tf.name_scope("fc-2"), delegate
{
fc2_out = tf.layers.dense(fc1_out,
1024,
@@ -121,7 +121,7 @@ namespace TensorFlowNET.Examples.Text
kernel_initializer: kernel_initializer);
});
- with(tf.name_scope("fc-3"), delegate
+ tf_with(tf.name_scope("fc-3"), delegate
{
logits = tf.layers.dense(fc2_out,
num_class,
@@ -129,7 +129,7 @@ namespace TensorFlowNET.Examples.Text
predictions = tf.argmax(logits, -1, output_type: tf.int32);
});
- with(tf.name_scope("loss"), delegate
+ tf_with(tf.name_scope("loss"), delegate
{
var y_one_hot = tf.one_hot(y, num_class);
var loss = tf.reduce_mean(
@@ -137,7 +137,7 @@ namespace TensorFlowNET.Examples.Text
var optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step: global_step);
});
- with(tf.name_scope("accuracy"), delegate
+ tf_with(tf.name_scope("accuracy"), delegate
{
var correct_predictions = tf.equal(predictions, y);
var accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name: "accuracy");
diff --git a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs
index c71376f8..8087a2b2 100644
--- a/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs
+++ b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs
@@ -41,7 +41,7 @@ namespace TensorFlowNET.Examples.Text
global_step = tf.Variable(0, trainable: false);
// Embedding Layer
- with(tf.name_scope("embedding"), delegate
+ tf_with(tf.name_scope("embedding"), delegate
{
var init_embeddings = tf.random_uniform(new int[] { alphabet_size, embedding_size }, -1.0f, 1.0f);
embeddings = tf.get_variable("embeddings", initializer: init_embeddings);
@@ -59,7 +59,7 @@ namespace TensorFlowNET.Examples.Text
Tensor fc2_out = null;
// First Convolution Layer
- with(tf.variable_scope("conv-0"), delegate
+ tf_with(tf.variable_scope("conv-0"), delegate
{
conv0 = tf.layers.conv2d(x_expanded,
filters: num_filters[0],
@@ -70,25 +70,25 @@ namespace TensorFlowNET.Examples.Text
conv0 = tf.transpose(conv0, new int[] { 0, 1, 3, 2 });
});
- with(tf.name_scope("conv-block-1"), delegate {
+ tf_with(tf.name_scope("conv-block-1"), delegate {
conv1 = conv_block(conv0, 1);
});
- with(tf.name_scope("conv-block-2"), delegate {
+ tf_with(tf.name_scope("conv-block-2"), delegate {
conv2 = conv_block(conv1, 2);
});
- with(tf.name_scope("conv-block-3"), delegate {
+ tf_with(tf.name_scope("conv-block-3"), delegate {
conv3 = conv_block(conv2, 3);
});
- with(tf.name_scope("conv-block-4"), delegate
+ tf_with(tf.name_scope("conv-block-4"), delegate
{
conv4 = conv_block(conv3, 4, max_pool: false);
});
// ============= k-max Pooling =============
- with(tf.name_scope("k-max-pooling"), delegate
+ tf_with(tf.name_scope("k-max-pooling"), delegate
{
var h = tf.transpose(tf.squeeze(conv4, new int[] { -1 }), new int[] { 0, 2, 1 });
var top_k = tf.nn.top_k(h, k: 8, sorted: false)[0];
@@ -96,30 +96,30 @@ namespace TensorFlowNET.Examples.Text
});
// ============= Fully Connected Layers =============
- with(tf.name_scope("fc-1"), scope =>
+ tf_with(tf.name_scope("fc-1"), scope =>
{
fc1_out = tf.layers.dense(h_flat, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer);
});
- with(tf.name_scope("fc-2"), scope =>
+ tf_with(tf.name_scope("fc-2"), scope =>
{
fc2_out = tf.layers.dense(fc1_out, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer);
});
- with(tf.name_scope("fc-3"), scope =>
+ tf_with(tf.name_scope("fc-3"), scope =>
{
logits = tf.layers.dense(fc2_out, num_class, activation: null, kernel_initializer: fc_initializer);
predictions = tf.argmax(logits, -1, output_type: tf.int32);
});
// ============= Loss and Accuracy =============
- with(tf.name_scope("loss"), delegate
+ tf_with(tf.name_scope("loss"), delegate
{
var y_one_hot = tf.one_hot(y, num_class);
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits: logits, labels: y_one_hot));
var update_ops = tf.get_collection(ops.GraphKeys.UPDATE_OPS) as List