diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln
index 9efeddaa..f113418a 100644
--- a/TensorFlow.NET.sln
+++ b/TensorFlow.NET.sln
@@ -1,133 +1,133 @@
-
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio Version 16
-VisualStudioVersion = 16.0.29102.190
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}"
-EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}"
-EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}"
-EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}"
-EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}"
-EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorFlowNET.Hub\Tensorflow.Hub.csproj", "{95B077C1-E21B-486F-8BDD-1C902FE687AB}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|Any CPU = Debug|Any CPU
- Debug|x64 = Debug|x64
- Debug-Minimal|Any CPU = Debug-Minimal|Any CPU
- Debug-Minimal|x64 = Debug-Minimal|x64
- Publish|Any CPU = Publish|Any CPU
- Publish|x64 = Publish|x64
- Release|Any CPU = Release|Any CPU
- Release|x64 = Release|x64
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.ActiveCfg = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.Build.0 = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.ActiveCfg = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.Build.0 = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.ActiveCfg = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.Build.0 = Debug|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.Build.0 = Release|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.ActiveCfg = Release|Any CPU
- {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.Build.0 = Release|Any CPU
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
- GlobalSection(ExtensibilityGlobals) = postSolution
- SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A}
- EndGlobalSection
-EndGlobal
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 16
+VisualStudioVersion = 16.0.29102.190
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorFlowNET.Hub\Tensorflow.Hub.csproj", "{95B077C1-E21B-486F-8BDD-1C902FE687AB}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Debug|x64 = Debug|x64
+ Debug-Minimal|Any CPU = Debug-Minimal|Any CPU
+ Debug-Minimal|x64 = Debug-Minimal|x64
+ Publish|Any CPU = Publish|Any CPU
+ Publish|x64 = Publish|x64
+ Release|Any CPU = Release|Any CPU
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU
+ {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU
+ {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU
+ {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.Build.0 = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.ActiveCfg = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.Build.0 = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.ActiveCfg = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.Build.0 = Debug|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.Build.0 = Release|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.ActiveCfg = Release|Any CPU
+ {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.Build.0 = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A}
+ EndGlobalSection
+EndGlobal
diff --git a/docs/assets/Logo.md b/docs/assets/Logo.md
index dad45dca..21e7858a 100644
--- a/docs/assets/Logo.md
+++ b/docs/assets/Logo.md
@@ -1,3 +1,3 @@
-TensorFlow.NET logo (c) 2019 by Meinrad Recheis.
-
+TensorFlow.NET logo (c) 2019 by Meinrad Recheis.
+
The logo is based on the original Tensorflow logo which is copyrighted by the respective creator.
\ No newline at end of file
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Control.cs b/src/TensorFlowNET.Core/Graphs/Graph.Control.cs
index c6a5dee0..81c13827 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.Control.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.Control.cs
@@ -1,17 +1,17 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
******************************************************************************/
using System.Collections.Generic;
@@ -77,8 +77,8 @@ namespace Tensorflow
///
/// Use with the `with` keyword to specify that all operations constructed
/// within the context should have control dependencies on
- /// `control_inputs`.
- ///
+ /// `control_inputs`.
+ ///
public _ControlDependenciesController control_dependencies(object[] control_inputs)
{
if (control_inputs == null)
@@ -92,20 +92,20 @@ namespace Tensorflow
// TODO: implement IndexedSlices
//case IndexedSlices islice:
// control_ops.Add(islice.op);
- // break;
+ // break;
case Tensor t:
control_ops.Add(t.op);
break;
case Operation op:
control_ops.Add(op);
- break;
+ break;
default:
var t1 = _as_graph_element(c);
if (t1 == null)
throw new TypeError($"Control input must be Operation or Tensor:{c}");
control_ops.Add(t1.op);
- break;
- }
+ break;
+ }
}
return new _ControlDependenciesController(this, control_ops);
}
@@ -138,9 +138,9 @@ namespace Tensorflow
_control_dependencies_stack.RemoveAt(_control_dependencies_stack.Count-1);
}
- ///
- /// Record that the given op depends on all registered control dependencies.
- ///
+ ///
+ /// Record that the given op depends on all registered control dependencies.
+ ///
public void _record_op_seen_by_control_dependencies(Operation op)
{
foreach (var controller in _control_dependencies_stack)
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
index 75f46a59..a826d2f6 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
@@ -1,17 +1,17 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
******************************************************************************/
using System;
@@ -38,8 +38,8 @@ namespace Tensorflow
public OperationDescription NewOperation(string opType, string opName)
{
return c_api.TF_NewOperation(_handle, opType, opName);
- }
-
+ }
+
public Operation[] ReturnOperations(IntPtr results)
{
TF_Operation return_oper_handle = new TF_Operation();
@@ -89,14 +89,14 @@ namespace Tensorflow
public ITensorOrOperation[] get_operations()
{
return _nodes_by_name.Values.ToArray();
- }
-
+ }
+
///
/// Returns the `Operation` with the given `name`.
///
- /// This method may be called concurrently from multiple threads.
- ///
- /// The name of the `Operation` to return.
+ /// This method may be called concurrently from multiple threads.
+ ///
+ /// The name of the `Operation` to return.
public Operation get_operation_by_name(string name)
=> as_graph_element(name, allow_tensor: false, allow_operation: true) as Operation;
@@ -109,8 +109,8 @@ namespace Tensorflow
{
var op_name = Marshal.PtrToStringAnsi(c_api.TF_OperationName(tf_oper));
return _get_operation_by_name_unsafe(op_name);
- }
-
+ }
+
///
/// Creates an `Operation` in this graph from the supplied TF_Operation.
///
@@ -125,7 +125,7 @@ namespace Tensorflow
///
/// a wrapped TF_Operation
/// (Optional.) If True, device functions will be executed
- /// to compute the device property of the Operation.
+ /// to compute the device property of the Operation.
/// An `Operation` object.
public Operation _create_op_from_tf_operation(IntPtr c_op, bool compute_device = true)
{
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs
index 48420d18..e53aa02e 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.cs
@@ -1,21 +1,21 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
******************************************************************************/
using System;
-using System.Collections;
+using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
@@ -75,7 +75,7 @@ namespace Tensorflow
/// then create a TensorFlow session to run parts of the graph across a set of local and remote devices.
///
/// https://www.tensorflow.org/guide/graphs https://www.tensorflow.org/api_docs/python/tf/Graph
- public partial class Graph : DisposableObject
+ public partial class Graph : DisposableObject
#if !SERIALIZABLE
, IEnumerable
#endif
@@ -105,18 +105,18 @@ namespace Tensorflow
///
private Dictionary _collections = new Dictionary();
- public bool building_function;
-
- int _seed;
- public int seed
- {
- get => _seed;
- set
- {
- _seed = value;
- }
- }
-
+ public bool building_function;
+
+ int _seed;
+ public int seed
+ {
+ get => _seed;
+ set
+ {
+ _seed = value;
+ }
+ }
+
public Graph()
{
_handle = c_api.TF_NewGraph();
@@ -133,20 +133,20 @@ namespace Tensorflow
_nodes_by_name = new Dictionary();
_names_in_use = new Dictionary();
_graph_key = $"grap-key-{ops.uid()}/";
- }
-
+ }
+
public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true)
{
return _as_graph_element_locked(obj, allow_tensor, allow_operation);
}
- ///
- /// Returns a context manager that makes this `Graph` the default graph.
- ///
+ ///
+ /// Returns a context manager that makes this `Graph` the default graph.
+ ///
///
- public Graph as_default()
- {
- return ops.set_default_graph(this);
+ public Graph as_default()
+ {
+ return ops.set_default_graph(this);
}
private Tensor _as_graph_element(object obj)
@@ -155,8 +155,8 @@ namespace Tensorflow
return var._as_graph_element();
return null;
- }
-
+ }
+
private ITensorOrOperation _as_graph_element_locked(object obj, bool allow_tensor = true, bool allow_operation = true)
{
string types_str = "";
@@ -259,8 +259,8 @@ namespace Tensorflow
throw new RuntimeError("Graph is finalized and cannot be modified.");
}
- public Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes,
- TF_DataType[] input_types = null, string name = null,
+ public Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes,
+ TF_DataType[] input_types = null, string name = null,
Dictionary attrs = null, OpDef op_def = null)
{
if (inputs == null)
@@ -272,12 +272,12 @@ namespace Tensorflow
// If a names ends with a '/' it is a "name scope" and we use it as-is,
// after removing the trailing '/'.
name = name.EndsWith("/") ? ops.name_from_scope_name(name) : unique_name(name);
- var node_def = ops._NodeDef(op_type, name, device: "", attrs: attrs);
+ var node_def = ops._NodeDef(op_type, name, device: "", attrs: attrs);
- var input_ops = inputs.Select(x => x.op).ToArray();
+ var input_ops = inputs.Select(x => x.op).ToArray();
var control_inputs = _control_dependencies_for_inputs(input_ops);
- var op = new Operation(node_def,
+ var op = new Operation(node_def,
this,
inputs: inputs,
output_types: dtypes,
@@ -297,9 +297,9 @@ namespace Tensorflow
return op;
}
- public void device(string device_name)
- {
- throw new NotImplementedException("");
+ public void device(string device_name)
+ {
+ throw new NotImplementedException("");
}
private void _create_op_helper(Operation op, bool compute_device = true)
@@ -353,8 +353,8 @@ namespace Tensorflow
_name_stack = new_stack;
return String.IsNullOrEmpty(new_stack) ? "" : new_stack + "/";
- }
-
+ }
+
///
/// Return a unique operation name for `name`.
///
@@ -379,10 +379,10 @@ namespace Tensorflow
/// A string to be passed to `create_op()` that will be used
/// to name the operation being created.
public string unique_name(string name, bool mark_as_used = true)
- {
- if (name.EndsWith("basic_r_n_n_cell"))
- {
-
+ {
+ if (name.EndsWith("basic_r_n_n_cell"))
+ {
+
}
if (!String.IsNullOrEmpty(_name_stack))
name = _name_stack + "/" + name;
@@ -411,7 +411,7 @@ namespace Tensorflow
// Return the new name with the original capitalization of the given name.
name = $"{name}_{i - 1}";
- }
+ }
return name;
}
@@ -424,7 +424,7 @@ namespace Tensorflow
unsafe
{
var tf_output_ptr = (TF_Output*)return_output_handle;
- for (int i = 0; i < num_return_outputs; i++)
+ for (int i = 0; i < num_return_outputs; i++)
return_outputs[i] = *(tf_output_ptr + i);
return return_outputs;
}
@@ -444,25 +444,25 @@ namespace Tensorflow
{
List t = default;
var collection = _collections.ContainsKey(name) ? _collections[name] : new List();
- switch (collection)
- {
- case List list:
- t = list.Select(x => (T)(object)x).ToList();
- break;
- case List list:
- t = list.Select(x => (T)(object)x).ToList();
- break;
- case List list:
- t = list.Select(x => (T)(object)x).ToList();
- break;
- case List list:
- t = list.Select(x => (T)(object)x).ToList();
- break;
- case List list:
- t = list.Select(x => (T)(object)x).ToList();
- break;
- default:
- throw new NotImplementedException($"get_collection<{typeof(T).FullName}>");
+ switch (collection)
+ {
+ case List list:
+ t = list.Select(x => (T)(object)x).ToList();
+ break;
+ case List list:
+ t = list.Select(x => (T)(object)x).ToList();
+ break;
+ case List list:
+ t = list.Select(x => (T)(object)x).ToList();
+ break;
+ case List list:
+ t = list.Select(x => (T)(object)x).ToList();
+ break;
+ case List list:
+ t = list.Select(x => (T)(object)x).ToList();
+ break;
+ default:
+ throw new NotImplementedException($"get_collection<{typeof(T).FullName}>");
}
return t;
}
@@ -482,22 +482,22 @@ namespace Tensorflow
public void prevent_fetching(Operation op)
{
_unfetchable_ops.Add(op);
- }
-
- protected override void DisposeManagedResources()
- {
- ops.default_graph_stack.remove(this);
- }
-
- protected override void DisposeUnmanagedResources(IntPtr handle)
- {
- c_api.TF_DeleteGraph(handle);
}
- public Tensor get_tensor_by_tf_output(TF_Output tf_output)
- {
- var op = _get_operation_by_tf_operation(tf_output.oper);
- return op.outputs[tf_output.index];
+ protected override void DisposeManagedResources()
+ {
+ ops.default_graph_stack.remove(this);
+ }
+
+ protected override void DisposeUnmanagedResources(IntPtr handle)
+ {
+ c_api.TF_DeleteGraph(handle);
+ }
+
+ public Tensor get_tensor_by_tf_output(TF_Output tf_output)
+ {
+ var op = _get_operation_by_tf_operation(tf_output.oper);
+ return op.outputs[tf_output.index];
}
///
@@ -510,48 +510,48 @@ namespace Tensorflow
public Tensor get_tensor_by_name(string name)
{
return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false);
- }
-
- public TensorShape GetTensorShape(TF_Output output)
- {
- var status = new Status();
- var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status);
- status.Check();
-
- if (ndim == -1)
- return new TensorShape();
-
- var dims = new long[ndim];
- c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status);
- status.Check();
-
- return new TensorShape(dims.Select(x => (int)x).ToArray());
- }
-
- string debugString = string.Empty;
- public override string ToString()
- {
- return $"{graph_key}, ({_handle})";
- /*if (string.IsNullOrEmpty(debugString))
- {
- int len = 0;
- debugString = c_api.TF_GraphDebugString(_handle, out len);
- }
-
- return debugString;*/
- }
-
+ }
+
+ public TensorShape GetTensorShape(TF_Output output)
+ {
+ var status = new Status();
+ var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status);
+ status.Check();
+
+ if (ndim == -1)
+ return new TensorShape();
+
+ var dims = new long[ndim];
+ c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status);
+ status.Check();
+
+ return new TensorShape(dims.Select(x => (int)x).ToArray());
+ }
+
+ string debugString = string.Empty;
+ public override string ToString()
+ {
+ return $"{graph_key}, ({_handle})";
+ /*if (string.IsNullOrEmpty(debugString))
+ {
+ int len = 0;
+ debugString = c_api.TF_GraphDebugString(_handle, out len);
+ }
+
+ return debugString;*/
+ }
+
#if !SERIALIZABLE
- private IEnumerable GetEnumerable()
+ private IEnumerable GetEnumerable()
=> c_api_util.tf_operations(this);
- IEnumerator IEnumerable.GetEnumerator()
- => GetEnumerable().GetEnumerator();
-
- IEnumerator IEnumerable.GetEnumerator()
- => throw new NotImplementedException();
+ IEnumerator IEnumerable.GetEnumerator()
+ => GetEnumerable().GetEnumerator();
+
+ IEnumerator IEnumerable.GetEnumerator()
+ => throw new NotImplementedException();
#endif
-
+
public static implicit operator IntPtr(Graph graph)
{
return graph._handle;
diff --git a/src/TensorFlowNET.Core/Graphs/_ControlDependenciesController.cs b/src/TensorFlowNET.Core/Graphs/_ControlDependenciesController.cs
index 63285bae..3472db29 100644
--- a/src/TensorFlowNET.Core/Graphs/_ControlDependenciesController.cs
+++ b/src/TensorFlowNET.Core/Graphs/_ControlDependenciesController.cs
@@ -1,17 +1,17 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
******************************************************************************/
using System.Collections.Generic;
@@ -32,8 +32,8 @@ namespace Tensorflow
private bool _new_stack;
private ControlFlowContext _old_control_flow_context;
- public ITensorOrOperation[] control_inputs => _control_inputs_val.ToArray();
-
+ public ITensorOrOperation[] control_inputs => _control_inputs_val.ToArray();
+
///
/// Create a new `_ControlDependenciesController`.
///
@@ -69,7 +69,7 @@ namespace Tensorflow
_new_stack = false;
}
- _seen_nodes = new List();
+ _seen_nodes = new List();
_old_stack = null;
_old_control_flow_context = null;
}
@@ -113,16 +113,16 @@ namespace Tensorflow
public void Dispose()
{
- }
-
- public void __init__()
- {
-
- }
-
- public void __del__()
- {
-
- }
+ }
+
+ public void __init__()
+ {
+
+ }
+
+ public void __del__()
+ {
+
+ }
}
}
diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs
index 9351cab4..d04eefe2 100644
--- a/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs
+++ b/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs
@@ -1,324 +1,324 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System;
-using System.Linq;
-using System.Collections.Generic;
-using util = Tensorflow.control_flow_util;
-using static Tensorflow.Binding;
-
-namespace Tensorflow.Operations.ControlFlows
-{
- ///
- /// Maintain the mapping from the loops to their grad states.
- ///
- public class ControlFlowState
- {
- Dictionary _map;
- //class ControlFlowState(object):
- // """Maintain the mapping from the loops to their grad states."""
-
- // def __init__(self):
- // self._map = {} # maps forward loop context to GradLoopState
-
- // def GetGradState(self, op, before):
- // """Return the grad state for this op if it's in a forward loop context."""
- // if before and util.IsLoopExit(op):
- // forward_ctxt = op._get_control_flow_context()
- // forward_ctxt = forward_ctxt.outer_context
- // if forward_ctxt:
- // forward_ctxt = forward_ctxt.GetWhileContext()
- // else:
- // forward_ctxt = _GetWhileContext(op)
- // if forward_ctxt:
- // return self._map.get(forward_ctxt)
- // return None
-
- public ControlFlowState()
- {
- _map = new Dictionary();
- }
-
- ///
- /// Return the grad state for this op if it's in a forward loop context.
- ///
- ///
- ///
- ///
- public GradLoopState GetGradState(Operation op, bool before)
- {
- ControlFlowContext forward_ctxt = null;
- if (before && util.IsLoopExit(op))
- {
- forward_ctxt = op._get_control_flow_context();
- forward_ctxt = forward_ctxt.outer_context;
- if (forward_ctxt != null)
- forward_ctxt = forward_ctxt.GetWhileContext();
- }
- else
- forward_ctxt = util.GetWhileContext(op);
- if (forward_ctxt != null)
- return _map.get(forward_ctxt);
- return null;
- }
-
- public Tensor[] ProcessUnusedLoopExits(Dictionary pending_count, List to_ops_set)
- {
- var loop_exits = new List();
- foreach(var grad_state in _map.Values)
- {
- foreach(var y in grad_state.forward_loop_exits)
- {
- if(!pending_count.ContainsKey(y.op.name))
- {
- grad_state.pending_exits_count -= 1;
- if (!to_ops_set.Contains(y.op))
- grad_state.unused_exits.append(y);
- if (grad_state.pending_exits_count == 0)
- loop_exits.extend(grad_state.unused_exits);
- }
- }
-
- foreach(var y in grad_state.forward_context.loop_enters)
- {
- if (!pending_count.ContainsKey(y.op.name))
- pending_count[y.op.name] = 1;
- }
- }
-
- return loop_exits.ToArray();
- }
-
- public void EnterGradWhileContext(Operation op, bool before)
- {
- var grad_state = GetGradState(op, before);
- if (grad_state != null)
- grad_state.grad_context.Enter();
- }
-
- public void ExitGradWhileContext(Operation op, bool before)
- {
- var grad_state = GetGradState(op, before);
- if (grad_state != null)
- grad_state.grad_context.Exit();
- }
-
- // def AddWhileContext(self, op, between_op_list, between_ops):
- // """Add the grad state for the while loop that op belongs to.
-
- // Note that op is an Exit, and this method must be called in
- // the control flow context where gradients() is called.
-
- // Note that this method modifies `between_op_list` and `between_ops`.
- // """
- // forward_ctxt = _GetWhileContext(op)
- // grad_state = self._map.get(forward_ctxt)
- // if grad_state is None:
- // # This is a new while loop so create a grad state for it.
- // outer_forward_ctxt = forward_ctxt.outer_context
- // if outer_forward_ctxt:
- // outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
- // outer_grad_state = None
- // if outer_forward_ctxt:
- // outer_grad_state = self._map.get(outer_forward_ctxt)
- // grad_state = GradLoopState(forward_ctxt, outer_grad_state)
- // self._map[forward_ctxt] = grad_state
-
- // # We need to include all exits of a loop for backprop.
- // for loop_exit in grad_state.forward_loop_exits:
- // if loop_exit.op not in between_ops:
- // between_ops.add(loop_exit.op)
- // between_op_list.append(loop_exit.op)
- public void AddWhileContext(Operation op, List between_op_list, List between_ops)
- {
- var forward_ctxt = op.GetWhileContext();
- var grad_state = _map.ContainsKey(forward_ctxt) ? _map[forward_ctxt] : null;
- if(grad_state == null)
- {
- GradLoopState outer_grad_state = null;
- var outer_forward_ctxt = forward_ctxt.outer_context;
- if (outer_forward_ctxt != null)
- outer_forward_ctxt = outer_forward_ctxt.GetWhileContext();
- if (outer_forward_ctxt != null)
- outer_grad_state = _map[outer_forward_ctxt];
- grad_state = new GradLoopState(forward_ctxt, outer_grad_state);
- _map[forward_ctxt] = grad_state;
-
- // We need to include all exits of a loop for backprop.
- foreach (var loop_exit in grad_state.forward_loop_exits)
- {
- if(!between_ops.Contains(loop_exit.op))
- {
- between_ops.add(loop_exit.op);
- between_op_list.append(loop_exit.op);
- }
- }
- }
- }
-
- // def ZerosLikeForExit(self, val):
- // """Create zeros_like gradient for a loop exit.
-
- // If the result of a loop variable is not used but is involved in
- // computing the result of some needed loop variable, we create a
- // zero-valued tensor that is fed as gradient for the Exit node of that
- // loop variable. Note that val.op is an Exit, and this method must be
- // called in the control flow context where gradients() is called.
-
- // Args:
- // val: The output tensor of an Exit op.
-
- // Returns:
- // A zero tensor of the same shape of val.
- // """
- // val_shape = val.get_shape()
- // forward_ctxt = val.op._get_control_flow_context()
- // outer_forward_ctxt = forward_ctxt.outer_context
- // if outer_forward_ctxt:
- // outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
- // outer_grad_state = None
- // if outer_forward_ctxt:
- // outer_grad_state = self._map.get(outer_forward_ctxt)
- // if outer_grad_state:
- // # This is a nested loop.
- // if val_shape.is_fully_defined():
- // # If the shape is known statically, just create a zero tensor
- // # with the right shape in the right context.
- // outer_grad_state.grad_context.Enter()
- // result = array_ops.zeros(val_shape.dims, val.dtype)
- // outer_grad_state.grad_context.Exit()
- // else:
- // # Only the shape of value is needed for backprop.
- // forward_ctxt.outer_context.Enter()
- // shape = array_ops.shape_internal(val, optimize=False)
- // forward_ctxt.outer_context.Exit()
- // # Save the shape to a stack.
- // history_shape = outer_grad_state.AddForwardAccumulator(shape)
- // # Get the shape back from the stack.
- // outer_grad_ctxt = outer_grad_state.grad_context
- // outer_grad_ctxt.Enter()
- // real_shape = outer_grad_state.AddBackpropAccumulatedValue(
- // history_shape, shape)
- // result = array_ops.zeros(real_shape, val.dtype)
- // outer_grad_ctxt.Exit()
- // else:
- // # This is not a nested loop.
- // if val_shape.is_fully_defined():
- // # If the shape is known statically, just create a zero tensor
- // # with the right shape.
- // result = array_ops.zeros(val_shape.dims, val.dtype)
- // else:
- // result = array_ops.zeros_like(val, optimize=False)
- // return result
-
- public Tensor ZerosLike(Operation op, int index)
- {
- if (util.IsLoopSwitch(op))
- return null;
- if (op.graph.building_function)
- return array_ops.zeros_like(op.outputs[index]);
- var dead_branch = util.IsSwitch(op);
- var forward_ctxt = util.GetWhileContext(op);
- var grad_state = _map.get(forward_ctxt);
- // op is not in a while loop that is part of gradients().
- if (grad_state == null)
- return ZerosLikeOutsideLoop(op, index);
- throw new NotImplementedException("ZerosLike");
- }
-
- public Tensor ZerosLikeOutsideLoop(Operation op, int index)
- {
- var val = op.outputs[index];
- if (!util.IsSwitch(op))
- {
- if (val.dtype == dtypes.resource)
- throw new NotImplementedException("ZerosLikeOutsideLoop");
- /*return array_ops.zeros(
- gen_resource_variable_ops.variable_shape(val),
- dtype: default_gradient.get_zeros_dtype(val));*/
- return array_ops.zeros_like(val, optimize: false);
- }
- else
- throw new NotImplementedException("ZerosLikeOutsideLoop");
- }
-
- ///
- /// Create zeros_like gradient for a loop exit.
- ///
- ///
- ///
- public Tensor ZerosLikeForExit(Tensor val)
- {
- Tensor result = null;
- var val_shape = val.TensorShape;
- var forward_ctxt = val.op._get_control_flow_context();
- var outer_forward_ctxt = forward_ctxt.outer_context;
- if (outer_forward_ctxt != null)
- outer_forward_ctxt = outer_forward_ctxt.GetWhileContext();
- GradLoopState outer_grad_state = null;
- if (outer_forward_ctxt != null)
- outer_grad_state = _map.get(outer_forward_ctxt);
- // This is a nested loop.
- if (outer_grad_state != null)
- {
- throw new NotImplementedException("ZerosLikeForExit");
- }
- else
- {
- // If the shape is known statically, just create a zero tensor
- // with the right shape.
- if (val_shape.is_fully_defined())
- result = array_ops.zeros(val_shape.dims, val.dtype);
- else
- result = array_ops.zeros_like(val, optimize: false);
- }
- return result;
- }
-
- public void PostProcessing()
- {
- foreach(var grad_state in _map.Values)
- {
- foreach(var b_merge in grad_state.switch_map.Values)
- {
- if(b_merge.op.inputs[0] == b_merge.op.inputs[1])
- {
- Tensor next_grad_val = null;
- // The value of this loop variable at iteration i+1 doesn't
- // depend on its value at iteration i. So use zeros as the
- // gradients for all iterations > 0.
- var dtype = b_merge.op.inputs[0].dtype;
- var shape = b_merge.op.inputs[0].TensorShape;
- if (shape.is_fully_defined())
- {
- grad_state.grad_context.Enter();
- // Create a zeros and use it for iterations > 0.
- var grad_val = constant_op.constant(0, dtype: dtype, shape: shape);
- next_grad_val = control_flow_ops._NextIteration(grad_val);
- grad_state.grad_context.Exit();
- }
- else
- {
- throw new NotImplementedException("PostProcessing shape is not fully defined.");
- }
-
- b_merge.op._update_input(1, next_grad_val);
- }
- }
- }
- }
- }
-}
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Linq;
+using System.Collections.Generic;
+using util = Tensorflow.control_flow_util;
+using static Tensorflow.Binding;
+
+namespace Tensorflow.Operations.ControlFlows
+{
+ ///
+ /// Maintain the mapping from the loops to their grad states.
+ ///
+ public class ControlFlowState
+ {
+ Dictionary _map;
+ //class ControlFlowState(object):
+ // """Maintain the mapping from the loops to their grad states."""
+
+ // def __init__(self):
+ // self._map = {} # maps forward loop context to GradLoopState
+
+ // def GetGradState(self, op, before):
+ // """Return the grad state for this op if it's in a forward loop context."""
+ // if before and util.IsLoopExit(op):
+ // forward_ctxt = op._get_control_flow_context()
+ // forward_ctxt = forward_ctxt.outer_context
+ // if forward_ctxt:
+ // forward_ctxt = forward_ctxt.GetWhileContext()
+ // else:
+ // forward_ctxt = _GetWhileContext(op)
+ // if forward_ctxt:
+ // return self._map.get(forward_ctxt)
+ // return None
+
+ public ControlFlowState()
+ {
+ _map = new Dictionary();
+ }
+
+ ///
+ /// Return the grad state for this op if it's in a forward loop context.
+ ///
+ ///
+ ///
+ ///
+ public GradLoopState GetGradState(Operation op, bool before)
+ {
+ ControlFlowContext forward_ctxt = null;
+ if (before && util.IsLoopExit(op))
+ {
+ forward_ctxt = op._get_control_flow_context();
+ forward_ctxt = forward_ctxt.outer_context;
+ if (forward_ctxt != null)
+ forward_ctxt = forward_ctxt.GetWhileContext();
+ }
+ else
+ forward_ctxt = util.GetWhileContext(op);
+ if (forward_ctxt != null)
+ return _map.get(forward_ctxt);
+ return null;
+ }
+
+ public Tensor[] ProcessUnusedLoopExits(Dictionary pending_count, List to_ops_set)
+ {
+ var loop_exits = new List();
+ foreach(var grad_state in _map.Values)
+ {
+ foreach(var y in grad_state.forward_loop_exits)
+ {
+ if(!pending_count.ContainsKey(y.op.name))
+ {
+ grad_state.pending_exits_count -= 1;
+ if (!to_ops_set.Contains(y.op))
+ grad_state.unused_exits.append(y);
+ if (grad_state.pending_exits_count == 0)
+ loop_exits.extend(grad_state.unused_exits);
+ }
+ }
+
+ foreach(var y in grad_state.forward_context.loop_enters)
+ {
+ if (!pending_count.ContainsKey(y.op.name))
+ pending_count[y.op.name] = 1;
+ }
+ }
+
+ return loop_exits.ToArray();
+ }
+
+ public void EnterGradWhileContext(Operation op, bool before)
+ {
+ var grad_state = GetGradState(op, before);
+ if (grad_state != null)
+ grad_state.grad_context.Enter();
+ }
+
+ public void ExitGradWhileContext(Operation op, bool before)
+ {
+ var grad_state = GetGradState(op, before);
+ if (grad_state != null)
+ grad_state.grad_context.Exit();
+ }
+
+ // def AddWhileContext(self, op, between_op_list, between_ops):
+ // """Add the grad state for the while loop that op belongs to.
+
+ // Note that op is an Exit, and this method must be called in
+ // the control flow context where gradients() is called.
+
+ // Note that this method modifies `between_op_list` and `between_ops`.
+ // """
+ // forward_ctxt = _GetWhileContext(op)
+ // grad_state = self._map.get(forward_ctxt)
+ // if grad_state is None:
+ // # This is a new while loop so create a grad state for it.
+ // outer_forward_ctxt = forward_ctxt.outer_context
+ // if outer_forward_ctxt:
+ // outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
+ // outer_grad_state = None
+ // if outer_forward_ctxt:
+ // outer_grad_state = self._map.get(outer_forward_ctxt)
+ // grad_state = GradLoopState(forward_ctxt, outer_grad_state)
+ // self._map[forward_ctxt] = grad_state
+
+ // # We need to include all exits of a loop for backprop.
+ // for loop_exit in grad_state.forward_loop_exits:
+ // if loop_exit.op not in between_ops:
+ // between_ops.add(loop_exit.op)
+ // between_op_list.append(loop_exit.op)
+ public void AddWhileContext(Operation op, List between_op_list, List between_ops)
+ {
+ var forward_ctxt = op.GetWhileContext();
+ var grad_state = _map.ContainsKey(forward_ctxt) ? _map[forward_ctxt] : null;
+ if(grad_state == null)
+ {
+ GradLoopState outer_grad_state = null;
+ var outer_forward_ctxt = forward_ctxt.outer_context;
+ if (outer_forward_ctxt != null)
+ outer_forward_ctxt = outer_forward_ctxt.GetWhileContext();
+ if (outer_forward_ctxt != null)
+ outer_grad_state = _map[outer_forward_ctxt];
+ grad_state = new GradLoopState(forward_ctxt, outer_grad_state);
+ _map[forward_ctxt] = grad_state;
+
+ // We need to include all exits of a loop for backprop.
+ foreach (var loop_exit in grad_state.forward_loop_exits)
+ {
+ if(!between_ops.Contains(loop_exit.op))
+ {
+ between_ops.add(loop_exit.op);
+ between_op_list.append(loop_exit.op);
+ }
+ }
+ }
+ }
+
+ // def ZerosLikeForExit(self, val):
+ // """Create zeros_like gradient for a loop exit.
+
+ // If the result of a loop variable is not used but is involved in
+ // computing the result of some needed loop variable, we create a
+ // zero-valued tensor that is fed as gradient for the Exit node of that
+ // loop variable. Note that val.op is an Exit, and this method must be
+ // called in the control flow context where gradients() is called.
+
+ // Args:
+ // val: The output tensor of an Exit op.
+
+ // Returns:
+ // A zero tensor of the same shape of val.
+ // """
+ // val_shape = val.get_shape()
+ // forward_ctxt = val.op._get_control_flow_context()
+ // outer_forward_ctxt = forward_ctxt.outer_context
+ // if outer_forward_ctxt:
+ // outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
+ // outer_grad_state = None
+ // if outer_forward_ctxt:
+ // outer_grad_state = self._map.get(outer_forward_ctxt)
+ // if outer_grad_state:
+ // # This is a nested loop.
+ // if val_shape.is_fully_defined():
+ // # If the shape is known statically, just create a zero tensor
+ // # with the right shape in the right context.
+ // outer_grad_state.grad_context.Enter()
+ // result = array_ops.zeros(val_shape.dims, val.dtype)
+ // outer_grad_state.grad_context.Exit()
+ // else:
+ // # Only the shape of value is needed for backprop.
+ // forward_ctxt.outer_context.Enter()
+ // shape = array_ops.shape_internal(val, optimize=False)
+ // forward_ctxt.outer_context.Exit()
+ // # Save the shape to a stack.
+ // history_shape = outer_grad_state.AddForwardAccumulator(shape)
+ // # Get the shape back from the stack.
+ // outer_grad_ctxt = outer_grad_state.grad_context
+ // outer_grad_ctxt.Enter()
+ // real_shape = outer_grad_state.AddBackpropAccumulatedValue(
+ // history_shape, shape)
+ // result = array_ops.zeros(real_shape, val.dtype)
+ // outer_grad_ctxt.Exit()
+ // else:
+ // # This is not a nested loop.
+ // if val_shape.is_fully_defined():
+ // # If the shape is known statically, just create a zero tensor
+ // # with the right shape.
+ // result = array_ops.zeros(val_shape.dims, val.dtype)
+ // else:
+ // result = array_ops.zeros_like(val, optimize=False)
+ // return result
+
+ public Tensor ZerosLike(Operation op, int index)
+ {
+ if (util.IsLoopSwitch(op))
+ return null;
+ if (op.graph.building_function)
+ return array_ops.zeros_like(op.outputs[index]);
+ var dead_branch = util.IsSwitch(op);
+ var forward_ctxt = util.GetWhileContext(op);
+ var grad_state = _map.get(forward_ctxt);
+ // op is not in a while loop that is part of gradients().
+ if (grad_state == null)
+ return ZerosLikeOutsideLoop(op, index);
+ throw new NotImplementedException("ZerosLike");
+ }
+
+ public Tensor ZerosLikeOutsideLoop(Operation op, int index)
+ {
+ var val = op.outputs[index];
+ if (!util.IsSwitch(op))
+ {
+ if (val.dtype == dtypes.resource)
+ throw new NotImplementedException("ZerosLikeOutsideLoop");
+ /*return array_ops.zeros(
+ gen_resource_variable_ops.variable_shape(val),
+ dtype: default_gradient.get_zeros_dtype(val));*/
+ return array_ops.zeros_like(val, optimize: false);
+ }
+ else
+ throw new NotImplementedException("ZerosLikeOutsideLoop");
+ }
+
+ ///
+ /// Create zeros_like gradient for a loop exit.
+ ///
+ ///
+ ///
+ public Tensor ZerosLikeForExit(Tensor val)
+ {
+ Tensor result = null;
+ var val_shape = val.TensorShape;
+ var forward_ctxt = val.op._get_control_flow_context();
+ var outer_forward_ctxt = forward_ctxt.outer_context;
+ if (outer_forward_ctxt != null)
+ outer_forward_ctxt = outer_forward_ctxt.GetWhileContext();
+ GradLoopState outer_grad_state = null;
+ if (outer_forward_ctxt != null)
+ outer_grad_state = _map.get(outer_forward_ctxt);
+ // This is a nested loop.
+ if (outer_grad_state != null)
+ {
+ throw new NotImplementedException("ZerosLikeForExit");
+ }
+ else
+ {
+ // If the shape is known statically, just create a zero tensor
+ // with the right shape.
+ if (val_shape.is_fully_defined())
+ result = array_ops.zeros(val_shape.dims, val.dtype);
+ else
+ result = array_ops.zeros_like(val, optimize: false);
+ }
+ return result;
+ }
+
+ public void PostProcessing()
+ {
+ foreach(var grad_state in _map.Values)
+ {
+ foreach(var b_merge in grad_state.switch_map.Values)
+ {
+ if(b_merge.op.inputs[0] == b_merge.op.inputs[1])
+ {
+ Tensor next_grad_val = null;
+ // The value of this loop variable at iteration i+1 doesn't
+ // depend on its value at iteration i. So use zeros as the
+ // gradients for all iterations > 0.
+ var dtype = b_merge.op.inputs[0].dtype;
+ var shape = b_merge.op.inputs[0].TensorShape;
+ if (shape.is_fully_defined())
+ {
+ grad_state.grad_context.Enter();
+ // Create a zeros and use it for iterations > 0.
+ var grad_val = constant_op.constant(0, dtype: dtype, shape: shape);
+ next_grad_val = control_flow_ops._NextIteration(grad_val);
+ grad_state.grad_context.Exit();
+ }
+ else
+ {
+ throw new NotImplementedException("PostProcessing shape is not fully defined.");
+ }
+
+ b_merge.op._update_input(1, next_grad_val);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs
index 2552df8a..2011ca56 100644
--- a/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs
+++ b/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs
@@ -1,335 +1,335 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System;
-using System.Collections;
-using System.Collections.Generic;
-using System.Linq;
-using static Tensorflow.Binding;
-using util = Tensorflow.control_flow_util;
-
-namespace Tensorflow.Operations.ControlFlows
-{
- ///
- /// The state used for constructing the gradient graph for a while loop.
- ///
- public class GradLoopState
- {
- private WhileContext _grad_context = null;
-
- public WhileContext grad_context => _grad_context;
-
- // # The loop counter added by AddBackpropLoopCounter. It is the value
- // # of the loop counter for the current iteration.
- // self._grad_index = None
-
- // # A sync op for backprop.
- // self._grad_sync = None
-
- // # Information needed by backprop.
- private Hashtable _history_map = new Hashtable();
- public Hashtable history_map => _history_map;
- Dictionary _switch_map = new Dictionary();
- public Dictionary switch_map => _switch_map;
-
- ///
- /// The while loop context for forward.
- ///
- WhileContext _forward_context;
- public WhileContext forward_context => _forward_context;
-
- ///
- /// The grad loop state for the outer while loop.
- ///
- GradLoopState _outer_grad_state;
- public GradLoopState outer_grad_state => _outer_grad_state;
-
- Tensor _forward_index;
- public Tensor forward_index => _forward_index;
- Tensor _grad_index;
-
- Tensor[] _forward_loop_exits;
- ///
- /// The list of exits of the forward loop.
- ///
- public Tensor[] forward_loop_exits => _forward_loop_exits;
-
- List _deferred_exits;
- public List deferred_exits => _deferred_exits;
-
- List _unused_exits;
- public List unused_exits => _unused_exits;
-
- ///
- /// The number of exits we expect to see but haven't.
- ///
- public int pending_exits_count { get; set; }
-
- Operation _grad_sync;
- public Operation grad_sync
- {
- get
- {
- if(_grad_sync == null)
- {
- tf_with(ops.control_dependencies(null), delegate
- {
- _grad_sync = gen_control_flow_ops.control_trigger(name: "b_sync");
- });
- _grad_sync._set_control_flow_context(_grad_context);
- _grad_index.op._add_control_input(_grad_sync);
- if (_grad_context.outer_context != null)
- _grad_context.outer_context.AddInnerOp(_grad_sync);
- }
- return _grad_sync;
- }
- }
-
- public GradLoopState(WhileContext forward_ctxt, GradLoopState outer_grad_state_)
- {
- // Information needed by backprop.
- _unused_exits = new List();
- _deferred_exits = new List();
- _forward_loop_exits = list(forward_ctxt.loop_exits);
- pending_exits_count = len(forward_ctxt.loop_exits);
-
- _outer_grad_state = outer_grad_state_;
-
- ControlFlowContext outer_forward_ctxt = null;
- if (outer_grad_state_ != null)
- outer_forward_ctxt = outer_grad_state_.forward_context;
-
- // Add the forward loop counter.
- // with forward_ctxt._graph.as_default():
- Tensor cnt, forward_index;
- {
- if (outer_forward_ctxt != null)
- outer_forward_ctxt.Enter();
- (cnt, forward_index) = forward_ctxt.AddForwardLoopCounter(outer_grad_state);
- if (outer_forward_ctxt != null)
- outer_forward_ctxt.Exit();
- }
- _forward_context = forward_ctxt;
- _forward_index = forward_index;
-
- // Add the backprop WhileContext, and the backprop loop counter.
- if (outer_grad_state != null)
- {
- // This is a nested loop. Remember the iteration counts for each
- // execution of this inner loop.
- throw new NotImplementedException("GradLoopState");
- }
- else
- {
- if (outer_forward_ctxt != null)
- outer_forward_ctxt.Enter();
- _grad_context = new WhileContext(
- maximum_iterations: forward_ctxt.maximum_iterations,
- parallel_iterations: forward_ctxt.parallel_iterations,
- back_prop: forward_ctxt.back_prop,
- swap_memory: forward_ctxt.swap_memory,
- name: forward_ctxt.name,
- grad_state: this);
- _grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state);
- if (outer_forward_ctxt != null)
- outer_forward_ctxt.Exit();
- }
- }
-
- ///
- /// Add an accumulator for each forward tensor that is needed in backprop.
- ///
- /// This is added to the forward loop at the first time when a tensor
- /// in the forward loop is used by backprop gradient computation loop.
- /// We create an accumulator that accumulates the value of tensor at each
- /// iteration. Called in the control flow context where gradients() is called.
- ///
- /// The pseudocode is:
- /// ```
- /// acc = stack();
- /// while (_pivot) {
- /// acc = stack_push(acc, value);
- /// }
- /// ```
- ///
- /// We make sure that the stack push op in one iteration is executed before
- /// next iteration. This is achieved by adding a control edge from
- /// `forward_index.op.inputs[0].op` to the push op, and another control
- /// edge from the push op to either `forward_index.op` or `forward_sync`.
- ///
- /// The source tensor in forward that is to be accumulated.
- /// True iff the tensor is on a dead branch of a cond.
- /// The stack that contains the accumulated history of the tensor.
- public Tensor AddForwardAccumulator(Tensor value, bool dead_branch = false)
- {
- _forward_index.graph.as_default();
- {
- var curr_ctxt = ops.get_default_graph()._get_control_flow_context();
- return tf_with(ops.control_dependencies(null), delegate
- {
- Tensor acc = null;
- Tensor push = null;
- if (curr_ctxt != null)
- curr_ctxt.Enter();
- ops.colocate_with(value);
- {
- // We only need to pass maximum_iterations to the stack if
- // we're inside an XLA context.
- var max_size = constant_op.constant(-1, dtypes.int32);
- acc = gen_data_flow_ops.stack_v2(
- max_size: max_size, elem_type: value.dtype.as_base_dtype(), name: "f_acc");
- }
- if (curr_ctxt != null)
- curr_ctxt.Exit();
-
- // Make acc available in the forward context.
- var enter_acc = forward_context.AddValue(acc);
-
- // Add the stack_push op in the context of value.op.
- var swap_enabled = forward_context.swap_memory;
- var value_ctxt = util.GetOutputContext(value.op);
- if(value_ctxt == forward_context)
- {
- // value is not nested in the forward context.
- forward_context.Enter();
- push = gen_data_flow_ops.stack_push_v2(enter_acc, value, swap_memory: swap_enabled);
- forward_context.Exit();
- // Protect stack push and order it before forward_index.
- forward_index.op._add_control_input(push.op);
- }
- else
- {
- throw new NotImplementedException("AddForwardAccumulator");
- }
-
- // Order stack push after the successor of forward_index
- var add_op = forward_index.op.inputs[0].op;
- push.op._add_control_input(add_op);
- return acc;
- });
- }
- }
-
- // """Add the getter for an accumulated value in the grad context.
- //
- // This is added to the backprop loop. Called in the grad context to
- // get the value of an accumulated value. The stack pop op must be guarded
- // by the pred of the controlling cond.
- //
- // Args:
- // history_value: The history (a stack) of a value.
- // value: The value that is pushed onto the stack.
- // dead_branch: True iff the tensor is on a dead branch of a cond.
- //
- // Returns:
- // The current value (the top of the stack).
- // """
-
- public Tensor AddBackpropAccumulatedValue(Tensor history_value, Tensor value, bool dead_branch= false)
- {
- var history_ctxt = history_value.op._get_control_flow_context();
- // Find the cond context that controls history_value if any.
- CondContext cond_ctxt = null;
- Tensor pop = null;
- var value_ctxt = value.op._get_control_flow_context();
- while(value_ctxt != null && value_ctxt != history_ctxt)
- {
- if (value_ctxt is CondContext cc)
- cond_ctxt = cc;
- value_ctxt = value_ctxt.outer_context;
- }
- tf_with(ops.control_dependencies(null), delegate
- {
- grad_context.Enter();
- if(cond_ctxt != null)
- {
- throw new NotImplementedException("AddBackpropAccumulatedValue");
- }
- pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.as_base_dtype());
- pop.set_shape(value.TensorShape);
- grad_context.Exit();
- });
- var parallel_iterations = grad_context.parallel_iterations;
- if (parallel_iterations > 1)
- // All pops are ordered after pivot_for_body and before grad_sync.
- grad_sync._add_control_input(pop.op);
- return pop;
- }
-
- ///
- /// Get the real value of `value`.
- ///
- /// A tensor to be captured.
- /// The same tensor obtained from the saved history.
- public Tensor GetRealValue(Tensor value)
- {
- Tensor real_value = null;
- if(real_value == null)
- {
- var cur_value = value;
- var cur_grad_state = this;
- Tensor history_value = null;
- while (true)
- {
- var enter_op = util.GetLoopConstantEnter(cur_value);
- if(enter_op != null)
- {
- // Special case: cur_value comes from a constant Enter node.
- cur_value = enter_op.inputs[0];
- cur_grad_state = cur_grad_state.outer_grad_state;
- if(cur_grad_state == null)
- {
- // We are now outside all nested loops for this gradient(),
- // so `value` is a loop invariant and there is no need to
- // save the history of value. Just make cur_value to enter
- // the right control flow context.
- real_value = _grad_context.AddValue(cur_value);
- break;
- }
- }
- else if (constant_op.is_constant(cur_value))
- {
- // We are now outside all nested loops for this gradient(),
- // so `value` is a loop invariant and there is no need to
- // save the history of value. Just make cur_value to enter
- // the right control flow context.
- real_value = constant_op.constant(
- tensor_util.constant_value(cur_value), dtype: cur_value.dtype);
- break;
- }
- else
- {
- // Record the history of this value in forward_ctxt.
- _grad_context.Exit();
- history_value = cur_grad_state.AddForwardAccumulator(cur_value);
- _grad_context.Enter();
- break;
- }
- }
-
- if(real_value == null)
- {
- // Add the stack pop op in the grad context.
- real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value);
- if (cur_grad_state != this)
- real_value = _grad_context.AddValue(real_value);
- }
- _history_map[value.name] = real_value;
- }
- return real_value;
- }
- }
-}
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Linq;
+using static Tensorflow.Binding;
+using util = Tensorflow.control_flow_util;
+
+namespace Tensorflow.Operations.ControlFlows
+{
+ ///
+ /// The state used for constructing the gradient graph for a while loop.
+ ///
+ public class GradLoopState
+ {
+ private WhileContext _grad_context = null;
+
+ public WhileContext grad_context => _grad_context;
+
+ // # The loop counter added by AddBackpropLoopCounter. It is the value
+ // # of the loop counter for the current iteration.
+ // self._grad_index = None
+
+ // # A sync op for backprop.
+ // self._grad_sync = None
+
+ // # Information needed by backprop.
+ private Hashtable _history_map = new Hashtable();
+ public Hashtable history_map => _history_map;
+ Dictionary _switch_map = new Dictionary();
+ public Dictionary switch_map => _switch_map;
+
+ ///
+ /// The while loop context for forward.
+ ///
+ WhileContext _forward_context;
+ public WhileContext forward_context => _forward_context;
+
+ ///
+ /// The grad loop state for the outer while loop.
+ ///
+ GradLoopState _outer_grad_state;
+ public GradLoopState outer_grad_state => _outer_grad_state;
+
+ Tensor _forward_index;
+ public Tensor forward_index => _forward_index;
+ Tensor _grad_index;
+
+ Tensor[] _forward_loop_exits;
+ ///
+ /// The list of exits of the forward loop.
+ ///
+ public Tensor[] forward_loop_exits => _forward_loop_exits;
+
+ List _deferred_exits;
+ public List deferred_exits => _deferred_exits;
+
+ List _unused_exits;
+ public List unused_exits => _unused_exits;
+
+ ///
+ /// The number of exits we expect to see but haven't.
+ ///
+ public int pending_exits_count { get; set; }
+
+ Operation _grad_sync;
+ public Operation grad_sync
+ {
+ get
+ {
+ if(_grad_sync == null)
+ {
+ tf_with(ops.control_dependencies(null), delegate
+ {
+ _grad_sync = gen_control_flow_ops.control_trigger(name: "b_sync");
+ });
+ _grad_sync._set_control_flow_context(_grad_context);
+ _grad_index.op._add_control_input(_grad_sync);
+ if (_grad_context.outer_context != null)
+ _grad_context.outer_context.AddInnerOp(_grad_sync);
+ }
+ return _grad_sync;
+ }
+ }
+
+ public GradLoopState(WhileContext forward_ctxt, GradLoopState outer_grad_state_)
+ {
+ // Information needed by backprop.
+ _unused_exits = new List();
+ _deferred_exits = new List();
+ _forward_loop_exits = list(forward_ctxt.loop_exits);
+ pending_exits_count = len(forward_ctxt.loop_exits);
+
+ _outer_grad_state = outer_grad_state_;
+
+ ControlFlowContext outer_forward_ctxt = null;
+ if (outer_grad_state_ != null)
+ outer_forward_ctxt = outer_grad_state_.forward_context;
+
+ // Add the forward loop counter.
+ // with forward_ctxt._graph.as_default():
+ Tensor cnt, forward_index;
+ {
+ if (outer_forward_ctxt != null)
+ outer_forward_ctxt.Enter();
+ (cnt, forward_index) = forward_ctxt.AddForwardLoopCounter(outer_grad_state);
+ if (outer_forward_ctxt != null)
+ outer_forward_ctxt.Exit();
+ }
+ _forward_context = forward_ctxt;
+ _forward_index = forward_index;
+
+ // Add the backprop WhileContext, and the backprop loop counter.
+ if (outer_grad_state != null)
+ {
+ // This is a nested loop. Remember the iteration counts for each
+ // execution of this inner loop.
+ throw new NotImplementedException("GradLoopState");
+ }
+ else
+ {
+ if (outer_forward_ctxt != null)
+ outer_forward_ctxt.Enter();
+ _grad_context = new WhileContext(
+ maximum_iterations: forward_ctxt.maximum_iterations,
+ parallel_iterations: forward_ctxt.parallel_iterations,
+ back_prop: forward_ctxt.back_prop,
+ swap_memory: forward_ctxt.swap_memory,
+ name: forward_ctxt.name,
+ grad_state: this);
+ _grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state);
+ if (outer_forward_ctxt != null)
+ outer_forward_ctxt.Exit();
+ }
+ }
+
+ ///
+ /// Add an accumulator for each forward tensor that is needed in backprop.
+ ///
+ /// This is added to the forward loop at the first time when a tensor
+ /// in the forward loop is used by backprop gradient computation loop.
+ /// We create an accumulator that accumulates the value of tensor at each
+ /// iteration. Called in the control flow context where gradients() is called.
+ ///
+ /// The pseudocode is:
+ /// ```
+ /// acc = stack();
+ /// while (_pivot) {
+ /// acc = stack_push(acc, value);
+ /// }
+ /// ```
+ ///
+ /// We make sure that the stack push op in one iteration is executed before
+ /// next iteration. This is achieved by adding a control edge from
+ /// `forward_index.op.inputs[0].op` to the push op, and another control
+ /// edge from the push op to either `forward_index.op` or `forward_sync`.
+ ///
+ /// The source tensor in forward that is to be accumulated.
+ /// True iff the tensor is on a dead branch of a cond.
+ /// The stack that contains the accumulated history of the tensor.
+ public Tensor AddForwardAccumulator(Tensor value, bool dead_branch = false)
+ {
+ _forward_index.graph.as_default();
+ {
+ var curr_ctxt = ops.get_default_graph()._get_control_flow_context();
+ return tf_with(ops.control_dependencies(null), delegate
+ {
+ Tensor acc = null;
+ Tensor push = null;
+ if (curr_ctxt != null)
+ curr_ctxt.Enter();
+ ops.colocate_with(value);
+ {
+ // We only need to pass maximum_iterations to the stack if
+ // we're inside an XLA context.
+ var max_size = constant_op.constant(-1, dtypes.int32);
+ acc = gen_data_flow_ops.stack_v2(
+ max_size: max_size, elem_type: value.dtype.as_base_dtype(), name: "f_acc");
+ }
+ if (curr_ctxt != null)
+ curr_ctxt.Exit();
+
+ // Make acc available in the forward context.
+ var enter_acc = forward_context.AddValue(acc);
+
+ // Add the stack_push op in the context of value.op.
+ var swap_enabled = forward_context.swap_memory;
+ var value_ctxt = util.GetOutputContext(value.op);
+ if(value_ctxt == forward_context)
+ {
+ // value is not nested in the forward context.
+ forward_context.Enter();
+ push = gen_data_flow_ops.stack_push_v2(enter_acc, value, swap_memory: swap_enabled);
+ forward_context.Exit();
+ // Protect stack push and order it before forward_index.
+ forward_index.op._add_control_input(push.op);
+ }
+ else
+ {
+ throw new NotImplementedException("AddForwardAccumulator");
+ }
+
+ // Order stack push after the successor of forward_index
+ var add_op = forward_index.op.inputs[0].op;
+ push.op._add_control_input(add_op);
+ return acc;
+ });
+ }
+ }
+
+ // """Add the getter for an accumulated value in the grad context.
+ //
+ // This is added to the backprop loop. Called in the grad context to
+ // get the value of an accumulated value. The stack pop op must be guarded
+ // by the pred of the controlling cond.
+ //
+ // Args:
+ // history_value: The history (a stack) of a value.
+ // value: The value that is pushed onto the stack.
+ // dead_branch: True iff the tensor is on a dead branch of a cond.
+ //
+ // Returns:
+ // The current value (the top of the stack).
+ // """
+
+ public Tensor AddBackpropAccumulatedValue(Tensor history_value, Tensor value, bool dead_branch= false)
+ {
+ var history_ctxt = history_value.op._get_control_flow_context();
+ // Find the cond context that controls history_value if any.
+ CondContext cond_ctxt = null;
+ Tensor pop = null;
+ var value_ctxt = value.op._get_control_flow_context();
+ while(value_ctxt != null && value_ctxt != history_ctxt)
+ {
+ if (value_ctxt is CondContext cc)
+ cond_ctxt = cc;
+ value_ctxt = value_ctxt.outer_context;
+ }
+ tf_with(ops.control_dependencies(null), delegate
+ {
+ grad_context.Enter();
+ if(cond_ctxt != null)
+ {
+ throw new NotImplementedException("AddBackpropAccumulatedValue");
+ }
+ pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.as_base_dtype());
+ pop.set_shape(value.TensorShape);
+ grad_context.Exit();
+ });
+ var parallel_iterations = grad_context.parallel_iterations;
+ if (parallel_iterations > 1)
+ // All pops are ordered after pivot_for_body and before grad_sync.
+ grad_sync._add_control_input(pop.op);
+ return pop;
+ }
+
+ ///
+ /// Get the real value of `value`.
+ ///
+ /// A tensor to be captured.
+ /// The same tensor obtained from the saved history.
+ public Tensor GetRealValue(Tensor value)
+ {
+ Tensor real_value = null;
+ if(real_value == null)
+ {
+ var cur_value = value;
+ var cur_grad_state = this;
+ Tensor history_value = null;
+ while (true)
+ {
+ var enter_op = util.GetLoopConstantEnter(cur_value);
+ if(enter_op != null)
+ {
+ // Special case: cur_value comes from a constant Enter node.
+ cur_value = enter_op.inputs[0];
+ cur_grad_state = cur_grad_state.outer_grad_state;
+ if(cur_grad_state == null)
+ {
+ // We are now outside all nested loops for this gradient(),
+ // so `value` is a loop invariant and there is no need to
+ // save the history of value. Just make cur_value to enter
+ // the right control flow context.
+ real_value = _grad_context.AddValue(cur_value);
+ break;
+ }
+ }
+ else if (constant_op.is_constant(cur_value))
+ {
+ // We are now outside all nested loops for this gradient(),
+ // so `value` is a loop invariant and there is no need to
+ // save the history of value. Just make cur_value to enter
+ // the right control flow context.
+ real_value = constant_op.constant(
+ tensor_util.constant_value(cur_value), dtype: cur_value.dtype);
+ break;
+ }
+ else
+ {
+ // Record the history of this value in forward_ctxt.
+ _grad_context.Exit();
+ history_value = cur_grad_state.AddForwardAccumulator(cur_value);
+ _grad_context.Enter();
+ break;
+ }
+ }
+
+ if(real_value == null)
+ {
+ // Add the stack pop op in the grad context.
+ real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value);
+ if (cur_grad_state != this)
+ real_value = _grad_context.AddValue(real_value);
+ }
+ _history_map[value.name] = real_value;
+ }
+ return real_value;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
index fbc68dbf..cee1ffd4 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
@@ -1,17 +1,17 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
******************************************************************************/
using static Tensorflow.Binding;
@@ -22,23 +22,23 @@ namespace Tensorflow.Operations
{
public static OpDefLibrary _op_def_lib = new OpDefLibrary();
- ///
- /// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
- ///
- /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
- /// and a filter / kernel tensor of shape
- /// `[filter_height, filter_width, in_channels, out_channels]`, this op
- /// performs the following:
- ///
- /// 1. Flattens the filter to a 2-D matrix with shape
- /// `[filter_height * filter_width * in_channels, output_channels]`.
- /// 2. Extracts image patches from the input tensor to form a *virtual*
- /// tensor of shape `[batch, out_height, out_width,
- /// filter_height * filter_width * in_channels]`.
- /// 3. For each patch, right-multiplies the filter matrix and the image patch
- /// vector.
- ///
- ///
+ ///
+ /// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
+ ///
+ /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
+ /// and a filter / kernel tensor of shape
+ /// `[filter_height, filter_width, in_channels, out_channels]`, this op
+ /// performs the following:
+ ///
+ /// 1. Flattens the filter to a 2-D matrix with shape
+ /// `[filter_height * filter_width * in_channels, output_channels]`.
+ /// 2. Extracts image patches from the input tensor to form a *virtual*
+ /// tensor of shape `[batch, out_height, out_width,
+ /// filter_height * filter_width * in_channels]`.
+ /// 3. For each patch, right-multiplies the filter matrix and the image patch
+ /// vector.
+ ///
+ ///
///
public static Tensor conv2d(Conv2dParams parameters)
{
@@ -55,15 +55,15 @@ namespace Tensorflow.Operations
});
return _op.outputs[0];
- }
-
- ///
- /// Computes the gradients of convolution with respect to the filter.
- ///
- ///
- ///
- public static Tensor conv2d_backprop_filter(Conv2dParams parameters)
- {
+ }
+
+ ///
+ /// Computes the gradients of convolution with respect to the filter.
+ ///
+ ///
+ ///
+ public static Tensor conv2d_backprop_filter(Conv2dParams parameters)
+ {
var _op = _op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new
{
input = parameters.Input,
@@ -77,16 +77,16 @@ namespace Tensorflow.Operations
dilations = parameters.Dilations
});
- return _op.outputs[0];
+ return _op.outputs[0];
}
- ///
- /// Computes the gradients of convolution with respect to the input.
- ///
- ///
+ ///
+ /// Computes the gradients of convolution with respect to the input.
+ ///
+ ///
///
- public static Tensor conv2d_backprop_input(Conv2dParams parameters)
- {
+ public static Tensor conv2d_backprop_input(Conv2dParams parameters)
+ {
var _op = _op_def_lib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new
{
input_sizes = parameters.InputSizes,
@@ -100,7 +100,7 @@ namespace Tensorflow.Operations
dilations = parameters.Dilations
});
- return _op.outputs[0];
+ return _op.outputs[0];
}
public static Tensor bias_add(Tensor value,
@@ -135,56 +135,56 @@ namespace Tensorflow.Operations
});
return _op.outputs[0];
- }
-
- ///
- /// Computes exponential linear: exp(features) - 1 if < 0, features otherwise.
- ///
- ///
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'.
- ///
- ///
- /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
- ///
- ///
- /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
- /// ](http://arxiv.org/abs/1511.07289)
- ///
- public static Tensor elu(Tensor features, string name = "Elu")
- {
- var op = _op_def_lib._apply_op_helper("Elu", name: name, args: new { features });
- return op.output;
}
- ///
- /// Gradient for batch normalization.
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
+ ///
+ /// Computes exponential linear: exp(features) - 1 if < 0, features otherwise.
+ ///
+ ///
+ ///
+ ///
+ /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'.
+ ///
+ ///
+ /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
+ ///
+ ///
+ /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
+ /// ](http://arxiv.org/abs/1511.07289)
+ ///
+ public static Tensor elu(Tensor features, string name = "Elu")
+ {
+ var op = _op_def_lib._apply_op_helper("Elu", name: name, args: new { features });
+ return op.output;
+ }
+
+ ///
+ /// Gradient for batch normalization.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
///
- public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params)
- {
- var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new
- {
- y_backprop = @params.YBackprop,
- x = @params.X,
- scale = @params.Scale,
- reserve_space_1 = @params.ReserveSpace1,
- reserve_space_2 = @params.ReserveSpace2,
- epsilon = @params.Epsilon,
- data_format = @params.DataFormat,
- is_training = @params.IsTraining
- });
- return op.outputs;
+ public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params)
+ {
+ var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new
+ {
+ y_backprop = @params.YBackprop,
+ x = @params.X,
+ scale = @params.Scale,
+ reserve_space_1 = @params.ReserveSpace1,
+ reserve_space_2 = @params.ReserveSpace2,
+ epsilon = @params.Epsilon,
+ data_format = @params.DataFormat,
+ is_training = @params.IsTraining
+ });
+ return op.outputs;
}
public static Tensor[] fused_batch_norm(Tensor x,
@@ -212,19 +212,19 @@ namespace Tensorflow.Operations
return _op.outputs;
}
- ///
- /// Local Response Normalization.
- ///
- ///
- ///
- ///
- ///
- ///
- ///
+ ///
+ /// Local Response Normalization.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
///
- public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1,
- int alpha = 1, float beta = 0.5f, string name = null)
- {
+ public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1,
+ int alpha = 1, float beta = 0.5f, string name = null)
+ {
var _op = _op_def_lib._apply_op_helper("LRN", name: name, args: new
{
input,
@@ -234,7 +234,7 @@ namespace Tensorflow.Operations
beta
});
- return _op.output;
+ return _op.output;
}
public static Tensor log_softmax(Tensor logits, string name = null)
@@ -245,16 +245,16 @@ namespace Tensorflow.Operations
});
return _op.output;
- }
-
- ///
- /// Says whether the targets are in the top `K` predictions.
- ///
- ///
- ///
- ///
- ///
- /// A `Tensor` of type `bool`.
+ }
+
+ ///
+ /// Says whether the targets are in the top `K` predictions.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A `Tensor` of type `bool`.
public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null)
{
var _op = _op_def_lib._apply_op_helper("InTopKV2", name: name, args: new
@@ -265,8 +265,8 @@ namespace Tensorflow.Operations
});
return _op.output;
- }
-
+ }
+
public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null)
{
var _op = _op_def_lib._apply_op_helper("LeakyRelu", name: name, args: new
@@ -297,9 +297,9 @@ namespace Tensorflow.Operations
return _op.outputs[0];
}
- public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding,
- string data_format= "NHWC", string name= null)
- {
+ public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding,
+ string data_format= "NHWC", string name= null)
+ {
var _op = _op_def_lib._apply_op_helper("MaxPoolGrad", name: name, args: new
{
orig_input,
@@ -311,7 +311,7 @@ namespace Tensorflow.Operations
data_format
});
- return _op.outputs[0];
+ return _op.outputs[0];
}
public static Tensor[] top_kv2(Tensor input, int k, bool sorted = true, string name = null)
@@ -335,8 +335,8 @@ namespace Tensorflow.Operations
});
return _op.outputs[0];
- }
-
+ }
+
public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null)
{
var _op = _op_def_lib._apply_op_helper("LeakyReluGrad", name: name, args: new
@@ -377,81 +377,81 @@ namespace Tensorflow.Operations
return (_op.outputs[0], _op.outputs[1]);
}
- ///
- /// Computes softmax cross entropy cost and gradients to backpropagate.
- ///
- ///
- /// batch_size x num_classes matrix
- ///
- ///
- /// batch_size vector with values in [0, num_classes).
- /// This is the label for the given minibatch entry.
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'.
- ///
- ///
- /// Returns a tuple with multiple values, as follows:
- /// loss : Per example loss (batch_size vector).
- /// backprop : backpropagated gradients (batch_size x num_classes matrix).
- /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property.
- ///
- ///
- /// Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept
- /// a matrix of label probabilities, but rather a single label per row
- /// of features. This label is considered to have probability 1.0 for the
- /// given row.
- ///
- /// Inputs are the logits, not probabilities.
- ///
- public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits")
- {
- var op = _op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels });
- int _idx = 0;
- var loss = op.outputs[_idx++];
- var backprop = op.outputs[_idx++];
- return (loss, backprop);
+ ///
+ /// Computes softmax cross entropy cost and gradients to backpropagate.
+ ///
+ ///
+ /// batch_size x num_classes matrix
+ ///
+ ///
+ /// batch_size vector with values in [0, num_classes).
+ /// This is the label for the given minibatch entry.
+ ///
+ ///
+ /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'.
+ ///
+ ///
+ /// Returns a tuple with multiple values, as follows:
+ /// loss : Per example loss (batch_size vector).
+ /// backprop : backpropagated gradients (batch_size x num_classes matrix).
+ /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property.
+ ///
+ ///
+ /// Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept
+ /// a matrix of label probabilities, but rather a single label per row
+ /// of features. This label is considered to have probability 1.0 for the
+ /// given row.
+ ///
+ /// Inputs are the logits, not probabilities.
+ ///
+ public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits")
+ {
+ var op = _op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels });
+ int _idx = 0;
+ var loss = op.outputs[_idx++];
+ var backprop = op.outputs[_idx++];
+ return (loss, backprop);
}
- ///
- /// Computes rectified linear: `max(features, 0)`.
- ///
- /// A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`.
+ ///
+ /// Computes rectified linear: `max(features, 0)`.
+ ///
+ /// A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`.
/// A name for the operation (optional).
/// A `Tensor`. Has the same type as `features`.
public static Tensor relu(Tensor features, string name = null)
- {
-
- //_ctx = _context._context
- //if _ctx is not None and _ctx._eager_context.is_eager:
- // try:
- // _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
- // _ctx._context_handle, _ctx._eager_context.device_name, "Relu", name,
- // _ctx._post_execution_callbacks, features)
- // return _result
- // except _core._FallbackException:
- // try:
- // return relu_eager_fallback(
- // features, name=name, ctx=_ctx)
- // except _core._SymbolicException:
- // pass # Add nodes to the TensorFlow graph.
- // except (TypeError, ValueError):
- // result = _dispatch.dispatch(
- // relu, features=features, name=name)
- // if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
- // return result
- // raise
- // except _core._NotOkStatusException as e:
- // if name is not None:
- // message = e.message + " name: " + name
- // else:
- // message = e.message
- // _six.raise_from(_core._status_to_exception(e.code, message), None)
+ {
+
+ //_ctx = _context._context
+ //if _ctx is not None and _ctx._eager_context.is_eager:
+ // try:
+ // _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
+ // _ctx._context_handle, _ctx._eager_context.device_name, "Relu", name,
+ // _ctx._post_execution_callbacks, features)
+ // return _result
+ // except _core._FallbackException:
+ // try:
+ // return relu_eager_fallback(
+ // features, name=name, ctx=_ctx)
+ // except _core._SymbolicException:
+ // pass # Add nodes to the TensorFlow graph.
+ // except (TypeError, ValueError):
+ // result = _dispatch.dispatch(
+ // relu, features=features, name=name)
+ // if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ // return result
+ // raise
+ // except _core._NotOkStatusException as e:
+ // if name is not None:
+ // message = e.message + " name: " + name
+ // else:
+ // message = e.message
+ // _six.raise_from(_core._status_to_exception(e.code, message), None)
//# Add nodes to the TensorFlow graph.
//try:
OpDefLibrary _op_def_lib = new OpDefLibrary();
var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new { features });
- return _op.outputs[0];
+ return _op.outputs[0];
//except (TypeError, ValueError):
// result = _dispatch.dispatch(
// relu, features=features, name=name)
diff --git a/src/TensorFlowNET.Core/Operations/Operation.Control.cs b/src/TensorFlowNET.Core/Operations/Operation.Control.cs
index ba7b0829..d6f73884 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.Control.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.Control.cs
@@ -1,68 +1,68 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using Tensorflow.Operations;
-using static Tensorflow.Binding;
-
-namespace Tensorflow
-{
- public partial class Operation
- {
- private ControlFlowContext _control_flow_context;
-
- ///
- /// Add this op to its control flow context.
- ///
- /// This may add new ops and change this op's inputs. self.inputs must be
- /// available before calling this method.
- ///
- public void _control_flow_post_processing()
- {
- foreach(Tensor input_tensor in inputs)
- control_flow_util.CheckInputFromValidContext(this, input_tensor.op);
-
- if (_control_flow_context != null)
- _control_flow_context.AddOp(this);
- }
-
- public void _add_control_input(Operation op)
- {
- //c_api.TF_AddControlInput(_operDesc, op);
- c_api.AddControlInput(graph, _handle, op);
- }
-
- public void _add_control_inputs(Operation[] ops)
- {
- foreach (var op in ops)
- _add_control_input(op);
- }
-
- public void _set_control_flow_context(ControlFlowContext ctx)
- {
- _control_flow_context = ctx;
- }
-
- public ControlFlowContext _get_control_flow_context()
- {
- return _control_flow_context;
- }
-
- public WhileContext GetWhileContext()
- {
- return _control_flow_context as WhileContext;
- }
- }
-}
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Operations;
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public partial class Operation
+ {
+ private ControlFlowContext _control_flow_context;
+
+ ///
+ /// Add this op to its control flow context.
+ ///
+ /// This may add new ops and change this op's inputs. self.inputs must be
+ /// available before calling this method.
+ ///
+ public void _control_flow_post_processing()
+ {
+ foreach(Tensor input_tensor in inputs)
+ control_flow_util.CheckInputFromValidContext(this, input_tensor.op);
+
+ if (_control_flow_context != null)
+ _control_flow_context.AddOp(this);
+ }
+
+ public void _add_control_input(Operation op)
+ {
+ //c_api.TF_AddControlInput(_operDesc, op);
+ c_api.AddControlInput(graph, _handle, op);
+ }
+
+ public void _add_control_inputs(Operation[] ops)
+ {
+ foreach (var op in ops)
+ _add_control_input(op);
+ }
+
+ public void _set_control_flow_context(ControlFlowContext ctx)
+ {
+ _control_flow_context = ctx;
+ }
+
+ public ControlFlowContext _get_control_flow_context()
+ {
+ return _control_flow_context;
+ }
+
+ public WhileContext GetWhileContext()
+ {
+ return _control_flow_context as WhileContext;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Operations/Operation.Input.cs b/src/TensorFlowNET.Core/Operations/Operation.Input.cs
index af3c57b2..fdf92504 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.Input.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.Input.cs
@@ -1,109 +1,109 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System;
-using System.Linq;
-using System.Runtime.InteropServices;
-#if SERIALIZABLE
-using Newtonsoft.Json;
-#endif
-
-namespace Tensorflow
-{
-
- // from ops.py
- public partial class Operation
- {
- public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index));
- public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index));
-
- public int InputListLength(string name)
- {
- int num = 0;
- using(var status = new Status())
- {
- num = c_api.TF_OperationInputListLength(_handle, name, status);
- status.Check(true);
- }
- return num;
- }
-#if SERIALIZABLE
- [JsonIgnore]
-#endif
- public int NumInputs => c_api.TF_OperationNumInputs(_handle);
- private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray();
-
- private InputList _inputs_val;
- public InputList inputs
- {
- get
- {
- if (_inputs_val == null)
- {
- var retval = new Tensor[NumInputs];
-
- for (int i = 0; i < NumInputs; i++)
- {
- var tf_output = Input(i);
- var op = GetOperation(tf_output.oper);
- retval[i] = op.outputs[tf_output.index];
- }
-
- _inputs_val = new InputList(retval);
- }
-
- return _inputs_val;
- }
- }
-
- public int NumControlInputs => c_api.TF_OperationNumControlInputs(_handle);
-
- ///
- /// The `Operation` objects on which this op has a control dependency.
- ///
- /// Before this op is executed, TensorFlow will ensure that the
- /// operations in `self.control_inputs` have finished executing.This
- /// mechanism can be used to run ops sequentially for performance
- /// reasons, or to ensure that the side effects of an op are observed
- /// in the correct order.
- ///
- public Operation[] control_inputs
- {
- get
- {
- return GetControlInputs();
- }
- }
-
- public unsafe Operation[] GetControlInputs()
- {
- var control_inputs = new Operation[NumControlInputs];
-
- if (NumControlInputs > 0)
- {
- IntPtr control_input_handle = Marshal.AllocHGlobal(Marshal.SizeOf() * NumControlInputs);
- c_api.TF_OperationGetControlInputs(_handle, control_input_handle, NumControlInputs);
- for (int i = 0; i < NumControlInputs; i++)
- {
- var handle = control_input_handle + Marshal.SizeOf() * i;
- control_inputs[i] = new Operation(*(IntPtr*)handle);
- }
- }
-
- return control_inputs;
- }
- }
-}
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Linq;
+using System.Runtime.InteropServices;
+#if SERIALIZABLE
+using Newtonsoft.Json;
+#endif
+
+namespace Tensorflow
+{
+
+ // from ops.py
+ public partial class Operation
+ {
+ public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index));
+ public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index));
+
+ public int InputListLength(string name)
+ {
+ int num = 0;
+ using(var status = new Status())
+ {
+ num = c_api.TF_OperationInputListLength(_handle, name, status);
+ status.Check(true);
+ }
+ return num;
+ }
+#if SERIALIZABLE
+ [JsonIgnore]
+#endif
+ public int NumInputs => c_api.TF_OperationNumInputs(_handle);
+ private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray();
+
+ private InputList _inputs_val;
+ public InputList inputs
+ {
+ get
+ {
+ if (_inputs_val == null)
+ {
+ var retval = new Tensor[NumInputs];
+
+ for (int i = 0; i < NumInputs; i++)
+ {
+ var tf_output = Input(i);
+ var op = GetOperation(tf_output.oper);
+ retval[i] = op.outputs[tf_output.index];
+ }
+
+ _inputs_val = new InputList(retval);
+ }
+
+ return _inputs_val;
+ }
+ }
+
+ public int NumControlInputs => c_api.TF_OperationNumControlInputs(_handle);
+
+ ///
+ /// The `Operation` objects on which this op has a control dependency.
+ ///
+ /// Before this op is executed, TensorFlow will ensure that the
+ /// operations in `self.control_inputs` have finished executing.This
+ /// mechanism can be used to run ops sequentially for performance
+ /// reasons, or to ensure that the side effects of an op are observed
+ /// in the correct order.
+ ///
+ public Operation[] control_inputs
+ {
+ get
+ {
+ return GetControlInputs();
+ }
+ }
+
+ public unsafe Operation[] GetControlInputs()
+ {
+ var control_inputs = new Operation[NumControlInputs];
+
+ if (NumControlInputs > 0)
+ {
+ IntPtr control_input_handle = Marshal.AllocHGlobal(Marshal.SizeOf() * NumControlInputs);
+ c_api.TF_OperationGetControlInputs(_handle, control_input_handle, NumControlInputs);
+ for (int i = 0; i < NumControlInputs; i++)
+ {
+ var handle = control_input_handle + Marshal.SizeOf() * i;
+ control_inputs[i] = new Operation(*(IntPtr*)handle);
+ }
+ }
+
+ return control_inputs;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs
index 359dc870..831e6ca5 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.cs
@@ -1,27 +1,27 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
******************************************************************************/
using Google.Protobuf.Collections;
#if SERIALIZABLE
-using Newtonsoft.Json;
-#endif
+using Newtonsoft.Json;
+#endif
using System;
using System.Collections.Generic;
using System.IO;
-using System.Linq;
+using System.Linq;
using Tensorflow.Util;
using static Tensorflow.Binding;
@@ -47,26 +47,26 @@ namespace Tensorflow
///
public partial class Operation : ITensorOrOperation
{
- private readonly IntPtr _handle; // _c_op in python
+ private readonly IntPtr _handle; // _c_op in python
private readonly Graph _graph;
- private NodeDef _node_def;
+ private NodeDef _node_def;
#if SERIALIZABLE
[JsonIgnore]
#endif
- public string type => OpType;
+ public string type => OpType;
#if SERIALIZABLE
[JsonIgnore]
#endif
- public Graph graph => _graph;
+ public Graph graph => _graph;
#if SERIALIZABLE
[JsonIgnore]
#endif
- public int _id => _id_value;
+ public int _id => _id_value;
#if SERIALIZABLE
[JsonIgnore]
#endif
- public int _id_value { get; set; }
+ public int _id_value { get; set; }
#if SERIALIZABLE
[JsonIgnore]
#endif
@@ -74,11 +74,11 @@ namespace Tensorflow
public TF_DataType dtype => TF_DataType.DtInvalid;
public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle));
public string OpType => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationOpType(_handle));
- public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle));
+ public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle));
#if SERIALIZABLE
[JsonIgnore]
#endif
- bool _is_stateful;
+ bool _is_stateful;
#if SERIALIZABLE
[JsonIgnore]
#endif
@@ -176,17 +176,17 @@ namespace Tensorflow
}
}
- _id_value = _graph._next_id();
-
+ _id_value = _graph._next_id();
+
// Dict mapping op name to file and line information for op colocation
// context managers.
- _control_flow_context = graph._get_control_flow_context();
-
+ _control_flow_context = graph._get_control_flow_context();
+
// This will be set by self.inputs.
if (op_def == null)
- op_def = g.GetOpDef(node_def.Op);
-
- var grouped_inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.Attr);
+ op_def = g.GetOpDef(node_def.Op);
+
+ var grouped_inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.Attr);
_handle = ops._create_c_op(g, node_def, grouped_inputs, control_input_ops.ToArray());
_is_stateful = op_def.IsStateful;
diff --git a/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs b/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs
index 20dc0f26..f9571a8a 100644
--- a/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs
+++ b/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs
@@ -1,21 +1,21 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
******************************************************************************/
using System;
-using System.Linq;
+using System.Linq;
using Tensorflow.Operations;
using static Tensorflow.Binding;
@@ -31,26 +31,26 @@ namespace Tensorflow
public static bool IsLoopExit(Operation op)
{
return op.type == "Exit" || op.type == "RefExit";
- }
-
- ///
- /// Returns true if `op` is an Enter.
- ///
- ///
- ///
+ }
+
+ ///
+ /// Returns true if `op` is an Enter.
+ ///
+ ///
+ ///
public static bool IsLoopEnter(Operation op)
{
return op.type == "Enter" || op.type == "RefEnter";
}
- ///
- /// Return true iff op is a loop invariant.
- ///
- ///
+ ///
+ /// Return true iff op is a loop invariant.
+ ///
+ ///
///
- public static bool IsLoopConstantEnter(Operation op)
- {
- return IsLoopEnter(op) && op.get_attr("is_constant");
+ public static bool IsLoopConstantEnter(Operation op)
+ {
+ return IsLoopEnter(op) && op.get_attr("is_constant");
}
///
@@ -61,141 +61,141 @@ namespace Tensorflow
public static bool IsSwitch(Operation op)
{
return op.type == "Switch" || op.type == "RefSwitch";
- }
-
- public static WhileContext GetWhileContext(Operation op)
+ }
+
+ public static WhileContext GetWhileContext(Operation op)
=> op.GetWhileContext();
public static bool IsCondSwitch(Operation op)
- {
- if (!IsSwitch(op))
- return false;
- if (op.outputs == null || op.outputs.Length == 0)
- return false;
-
+ {
+ if (!IsSwitch(op))
+ return false;
+ if (op.outputs == null || op.outputs.Length == 0)
+ return false;
+
// Switch nodes are not part of the cond control flow context that they
// represent, so consider the consumers of its outputs to determine if it is
// cond switch or not. A switch is a cond switch iff all its consumers are in
- // cond contexts.
- var is_cond_switch = true;
- foreach(var o in op.outputs)
- {
- foreach(var c in o.consumers())
- {
- var ctxt = c._get_control_flow_context();
- if (IsLoopEnter(c))
- ctxt = ctxt.outer_context;
- is_cond_switch = is_cond_switch &&(ctxt != null && ctxt.IsCondContext());
- }
- }
-
- return is_cond_switch;
+ // cond contexts.
+ var is_cond_switch = true;
+ foreach(var o in op.outputs)
+ {
+ foreach(var c in o.consumers())
+ {
+ var ctxt = c._get_control_flow_context();
+ if (IsLoopEnter(c))
+ ctxt = ctxt.outer_context;
+ is_cond_switch = is_cond_switch &&(ctxt != null && ctxt.IsCondContext());
+ }
+ }
+
+ return is_cond_switch;
}
- public static bool IsLoopSwitch(Operation op)
- {
- if (IsSwitch(op))
- {
- var ctxt = op._get_control_flow_context();
- return ctxt != null && ctxt.IsWhileContext() && !IsCondSwitch(op);
- }
- return false;
+ public static bool IsLoopSwitch(Operation op)
+ {
+ if (IsSwitch(op))
+ {
+ var ctxt = op._get_control_flow_context();
+ return ctxt != null && ctxt.IsWhileContext() && !IsCondSwitch(op);
+ }
+ return false;
}
- ///
- /// Return the control flow context for the output of an op.
- ///
+ ///
+ /// Return the control flow context for the output of an op.
+ ///
public static ControlFlowContext GetOutputContext(Operation op)
{
var ctxt = op._get_control_flow_context();
- // Exit nodes usually have a control flow context, except in the case where the
- // exit node was imported via import_graph_def (in which case no nodes have
+ // Exit nodes usually have a control flow context, except in the case where the
+ // exit node was imported via import_graph_def (in which case no nodes have
// control flow contexts).
if (ctxt != null && IsLoopExit(op))
ctxt = ctxt.outer_context;
return ctxt;
}
- public static void CheckInputFromValidContext(Operation op, Operation input_op)
- {
- var op_ctxt = op._get_control_flow_context();
- var input_ctxt = GetOutputContext(input_op);
- var valid = false;
- if (input_ctxt == null)
- valid = true;
- else if (op_ctxt == input_ctxt)
- valid = true;
- else
- {
- var while_ctxt = GetContainingWhileContext(op_ctxt);
- var input_while_ctxt = GetContainingWhileContext(input_ctxt);
-
- if (while_ctxt == null)
- {
+ public static void CheckInputFromValidContext(Operation op, Operation input_op)
+ {
+ var op_ctxt = op._get_control_flow_context();
+ var input_ctxt = GetOutputContext(input_op);
+ var valid = false;
+ if (input_ctxt == null)
+ valid = true;
+ else if (op_ctxt == input_ctxt)
+ valid = true;
+ else
+ {
+ var while_ctxt = GetContainingWhileContext(op_ctxt);
+ var input_while_ctxt = GetContainingWhileContext(input_ctxt);
+
+ if (while_ctxt == null)
+ {
// Neither op nor input_op is in a while loop, but one or both are in
// conds. We allow this, although execution will fail if the branch
- // corresponding to input_op's cond context isn't taken.
- if (input_while_ctxt == null)
- valid = true;
- // Invalid if op isn't in a while loop and input_op is. Unless...
- if (IsLoopEnter(op))
- // WhileContext._BuildLoop clears context for Enter nodes.
- valid = true;
- if (IsSwitch(op))
- // CondContext.AddValue clears context for Switch nodes.
- valid = true;
- }
- else if (IsContainingContext(while_ctxt, input_while_ctxt))
- {
- // input_op is in a while loop which contains op's while loop (or not in a
- // while loop at all).
- valid = true;
- }
- else if (while_ctxt.grad_state != null &&
+ // corresponding to input_op's cond context isn't taken.
+ if (input_while_ctxt == null)
+ valid = true;
+ // Invalid if op isn't in a while loop and input_op is. Unless...
+ if (IsLoopEnter(op))
+ // WhileContext._BuildLoop clears context for Enter nodes.
+ valid = true;
+ if (IsSwitch(op))
+ // CondContext.AddValue clears context for Switch nodes.
+ valid = true;
+ }
+ else if (IsContainingContext(while_ctxt, input_while_ctxt))
+ {
+ // input_op is in a while loop which contains op's while loop (or not in a
+ // while loop at all).
+ valid = true;
+ }
+ else if (while_ctxt.grad_state != null &&
IsContainingContext(while_ctxt.grad_state.forward_context,
- input_while_ctxt))
- {
- valid = true;
- }
- else
- throw new NotImplementedException("CheckInputFromValidContext");
- }
-
- if (!valid)
- {
- throw new NotImplementedException("CheckInputFromValidContext");
- }
- }
-
- public static Operation GetLoopConstantEnter(Tensor value)
- {
- var id_ops = new string[] { "Switch", "RefSwitch", "Identity", "RefIdentity" };
- var op = value.op;
- while (id_ops.Contains(op.type))
- op = op.inputs[0].op;
- return IsLoopConstantEnter(op) ? op : null;
+ input_while_ctxt))
+ {
+ valid = true;
+ }
+ else
+ throw new NotImplementedException("CheckInputFromValidContext");
+ }
+
+ if (!valid)
+ {
+ throw new NotImplementedException("CheckInputFromValidContext");
+ }
}
- public static bool IsContainingContext(WhileContext ctxt, WhileContext maybe_containing_ctxt)
- {
- while(ctxt != maybe_containing_ctxt)
- {
- if (ctxt == null)
- return false;
- ctxt = ctxt.outer_context as WhileContext;
- }
- return true;
+ public static Operation GetLoopConstantEnter(Tensor value)
+ {
+ var id_ops = new string[] { "Switch", "RefSwitch", "Identity", "RefIdentity" };
+ var op = value.op;
+ while (id_ops.Contains(op.type))
+ op = op.inputs[0].op;
+ return IsLoopConstantEnter(op) ? op : null;
}
- public static WhileContext GetContainingWhileContext(ControlFlowContext ctxt, ControlFlowContext stop_ctxt = null)
- {
- while (ctxt != null)
- {
- if (ctxt.IsWhileContext() || ctxt == stop_ctxt)
- return ctxt as WhileContext;
- ctxt = ctxt.outer_context;
- }
- return null;
+ public static bool IsContainingContext(WhileContext ctxt, WhileContext maybe_containing_ctxt)
+ {
+ while(ctxt != maybe_containing_ctxt)
+ {
+ if (ctxt == null)
+ return false;
+ ctxt = ctxt.outer_context as WhileContext;
+ }
+ return true;
+ }
+
+ public static WhileContext GetContainingWhileContext(ControlFlowContext ctxt, ControlFlowContext stop_ctxt = null)
+ {
+ while (ctxt != null)
+ {
+ if (ctxt.IsWhileContext() || ctxt == stop_ctxt)
+ return ctxt as WhileContext;
+ ctxt = ctxt.outer_context;
+ }
+ return null;
}
}
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
index 62b0f1b4..5cf240e8 100644
--- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
@@ -1,735 +1,735 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using static Tensorflow.Binding;
-
-namespace Tensorflow
-{
- public static class gen_math_ops
- {
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
-
- public static Tensor _all(Tensor input, Tensor axis, bool keep_dims = false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
-
- return _op.outputs[0];
- }
-
- ///
- /// Add all input tensors element wise.
- ///
- ///
- ///
- ///
- public static Tensor add_n(Tensor[] inputs, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("AddN", name, args: new { inputs });
-
- return _op.outputs[0];
- }
-
- ///
- /// Returns the index with the largest value across dimensions of a tensor.
- ///
- ///
- ///
- ///
- ///
- ///
- public static Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
- => _op_def_lib._apply_op_helper("ArgMax", name, args: new { input, dimension, output_type }).outputs[0];
-
- ///
- /// Returns the index with the smallest value across dimensions of a tensor.
- ///
- ///
- ///
- ///
- ///
- ///
- public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type= TF_DataType.TF_INT64, string name= null)
- =>_op_def_lib._apply_op_helper("ArgMin", name, args: new { input, dimension, output_type }).outputs[0];
-
- ///
- /// Computes Psi, the derivative of Lgamma (the log of the absolute value of
- /// `Gamma(x)`), element-wise.
- ///
- ///
- ///
- ///
- public static Tensor digamma(Tensor x, string name = null)
- => _op_def_lib._apply_op_helper("Digamma", name, args: new { x }).output;
-
- ///
- /// Returns 0 if the denominator is zero.
- ///
- ///
- ///
- ///
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DivNoNan'.
- ///
- ///
- /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
- ///
- ///
- ///
- /// *NOTE*: DivNoNan supports broadcasting. More about broadcasting
- /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
- ///
- public static Tensor div_no_nan(Tensor x, Tensor y, string name = null)
- {
- var op = _op_def_lib._apply_op_helper("DivNoNan", name: name, args: new { x, y });
- return op.output;
- }
-
- ///
- /// Computes the mean of elements across dimensions of a tensor.
- /// Reduces `input` along the dimensions given in `axis`. Unless
- /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
- /// `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
- ///
- /// A `Tensor`. Must be one of the following types:
- /// `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
- /// The tensor to reduce.
- /// A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce.
- /// An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1.
- /// A name for the operation (optional).
- /// A `Tensor`. Has the same type as `input`.
- public static Tensor mean(T1 input, T2 axis, bool keep_dims= false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
-
- return _op.output;
- }
-
- public static Tensor prod(T1 input, T2 axis, bool keep_dims = false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Prod", name, args: new { input, reduction_indices = axis, keep_dims });
-
- return _op.outputs[0];
- }
-
- public static Tensor acos(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Acos", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor asin(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Asin", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor add(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y });
-
- return _op.output;
- }
-
- public static Tensor atan(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Atan", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor ceil(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Ceil", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor sin(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Sin", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- ///
- /// Computes sigmoid of x element-wise.
- ///
- ///
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sigmoid'.
- ///
- ///
- /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
- ///
- ///
- /// Specifically, y = 1 / (1 + exp(-x)).
- ///
- public static Tensor sigmoid(Tensor x, string name = "Sigmoid")
- {
- var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, new { x });
-
- return op.output;
- }
-
- ///
- /// Computes the gradient of the sigmoid of x wrt its input.
- ///
- ///
- ///
- ///
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SigmoidGrad'.
- ///
- ///
- /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
- ///
- ///
- /// Specifically, grad = dy * y * (1 - y), where y = sigmoid(x), and
- /// dy is the corresponding input gradient.
- ///
- public static Tensor sigmoid_grad(Tensor y, Tensor dy, string name = "SigmoidGrad")
- {
- var op = _op_def_lib._apply_op_helper("SigmoidGrad", name: name, args: new { y, dy });
-
- return op.outputs[0];
- }
-
- public static Tensor sign(Tensor x, string name = "Sign")
- {
- var op = _op_def_lib._apply_op_helper("Sign", name: name, args: new {x});
-
- return op.outputs[0];
- }
-
- public static Tensor sinh(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Sinh", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor cos(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Cos", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor cosh(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Cosh", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor cumsum(Tensor x, T axis, bool exclusive = false, bool reverse = false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Cumsum", name, args: new { x, axis, exclusive, reverse });
-
- return _op.outputs[0];
- }
-
- ///
- /// Computes the sum along segments of a tensor.
- ///
- ///
- ///
- ///
- ///
- ///
- public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments });
- return _op.outputs[0];
- }
-
- public static Tensor tan(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Tan", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor tanh(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Tanh", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- ///
- /// Computes the gradient for the tanh of `x` wrt its input.
- ///
- ///
- ///
- ///
- ///
- public static Tensor tanh_grad(Tensor y, Tensor dy, string name = null)
- => _op_def_lib._apply_op_helper("TanhGrad", name: name, args: new { y, dy }).output;
-
- public static Tensor floor(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Floor", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max });
-
- return _op.outputs[0];
- }
-
- public static Tensor greater(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- ///
- /// Computes the log of the absolute value of `Gamma(x)` element-wise.
- ///
- ///
- /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
- ///
- ///
- ///
- ///
- /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
- ///
- public static Tensor lgamma(Tensor x, string name = null)
- {
- var op = _op_def_lib._apply_op_helper("Lgamma", name: name, args: new { x });
-
- return op.output;
- }
-
- public static Tensor greater_equal(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("GreaterEqual", name: name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor less(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor less_equal(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("LessEqual", name: name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor log1p(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Log1p", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor logical_and(Tensor x, Tensor y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor logical_not(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("LogicalNot", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor logical_or(Tensor x, Tensor y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("LogicalOr", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor")
- {
- return logical_and(
- logical_or(x, y),
- logical_not(logical_and(x, y)),
- name);
- }
-
- public static Tensor squared_difference(Tensor x, Tensor y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name });
-
- return _op.outputs[0];
- }
-
- ///
- /// Computes square of x element-wise.
- ///
- /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
- /// A name for the operation (optional).
- /// A `Tensor`. Has the same type as `x`.
- public static Tensor square(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Square", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- ///
- /// Returns which elements of x are finite.
- ///
- /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
- /// A name for the operation (optional).
- /// A `Tensor` of type `bool`.
- public static Tensor is_finite(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("IsFinite", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor is_nan(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("IsNan", name: name, args: new { x });
-
- return _op.outputs[0];
- }
-
- ///
- /// Computes exponential of x element-wise. \\(y = e^x\\).
- ///
- /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
- /// A name for the operation (optional).
- /// A `Tensor`. Has the same type as `x`.
- public static Tensor exp(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Exp", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- ///
- /// Computes natural logarithm of x element-wise.
- ///
- /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
- /// name: A name for the operation (optional).
- /// A `Tensor`. Has the same type as `x`.
- public static Tensor log(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Log", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= "")
- {
- var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate });
-
- return _op.outputs[0];
- }
-
- public static Tensor neg(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor sqrt(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Sqrt", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor sub(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- ///
- /// Returns the truth value of (x == y) element-wise.
- ///
- ///
- ///
- ///
- ///
- public static Tensor equal(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Equal", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- ///
- /// Returns the truth value of (x != y) element-wise.
- ///
- /// The type of the x.
- /// The type of the y.
- /// The x.
- /// The y.
- /// The name.
- ///
- public static Tensor not_equal(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
-
- public static Tensor atan2(Tensor y, Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Atan2", name, args: new { y, x });
-
- return _op.outputs[0];
- }
-
- public static Tensor mul(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor mul_no_nan(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("MulNoNan", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor real_div(Tensor x, Tensor y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor reciprocal(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Reciprocal", name, args: new { x });
-
- return _op.outputs[0];
- }
-
- public static Tensor floor_mod(Tensor x, Tensor y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor floor_div(Tensor x, Tensor y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- ///
- /// Multiply the matrix "a" by the matrix "b".
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b });
-
- return _op.output;
- }
-
- ///
- /// Multiply slices of the two matrices "x" and "y".
- ///
- ///
- /// The `BatchMatMul` operation is embedded into the
- /// `MatMul` operation on the DLL side. However the expected
- /// attributes are not the same, hence we need to expose this
- /// method to have the right args list on the `_apply_op_helper`
- /// function.
- ///
- /// For each rank > 2 the first rank - 2 dimensions are considered
- /// as fixed, and have to be consistent across the two matrices. A
- /// common matrix multiplication is then applied over the residual
- /// 2 dimensions.
- ///
- /// e.g.
- /// x is (3, 6, 12); y is (3, 12, 6)
- /// batch_matmul(x, y) ==> (3, 6, 6)
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper(
- "BatchMatMul",
- name,
- args: new { x, y, adj_x, adj_y });
-
- return _op.outputs[0];
- }
-
- ///
- /// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
- ///
- ///
- ///
- ///
- ///
- public static Tensor maximum(T1 x, T2 y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Maximum", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor minimum(T1 x, T2 y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Minimum", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor _abs(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Abs", name, args: new { x });
-
- return _op.output;
- }
-
- public static Tensor _any(Tx input, Ty axis, bool keep_dims = false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims });
-
- return _op.outputs[0];
- }
-
- public static Tensor _max(Tx input, Ty axis, bool keep_dims=false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Max", name, new { input, reduction_indices = axis, keep_dims });
-
- return _op.outputs[0];
- }
-
- public static Tensor _min(Tx input, Ty axis, bool keep_dims = false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Min", name, new { input, reduction_indices = axis, keep_dims });
-
- return _op.outputs[0];
- }
-
- public static Tensor pow(Tx x, Ty y, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y });
-
- return _op.outputs[0];
- }
-
- public static Tensor _sum(Tx input, Ty axis = default, bool keep_dims = false, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims });
-
- return _op.outputs[0];
- }
-
- ///
- /// Creates a sequence of numbers.
- ///
- ///
- ///
- ///
- ///
- ///
- public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta });
-
- return _op.outputs[0];
- }
-
- ///
- /// Rounds the values of a tensor to the nearest integer, element-wise.
- ///
- ///
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Round'.
- ///
- ///
- /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
- ///
- ///
- /// Rounds half to even. Also known as bankers rounding. If you want to round
- /// according to the current system rounding mode use std::cint.
- ///
- public static Tensor round(Tensor x, string name = "Round")
- {
- var op = _op_def_lib._apply_op_helper("Round", name: name, new { x });
-
- return op.output;
- }
-
- ///
- /// Computes reciprocal of square root of x element-wise.
- ///
- ///
- ///
- ///
- public static Tensor rsqrt(Tensor x, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("Rsqrt", name, new { x });
-
- return _op.outputs[0];
- }
-
- ///
- /// Returns the fraction of zeros in value.
- ///
- /// A tensor of numeric type.
- /// A name for the operation (optional).
- /// The fraction of zeros in value, with type float32.
- public static Tensor zero_fraction(Tensor value, string name = null)
- {
- var _op = _op_def_lib._apply_op_helper("zero_fraction", name, new { value, name });
-
- return _op.outputs[0];
- }
- }
-}
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public static class gen_math_ops
+ {
+ public static OpDefLibrary _op_def_lib = new OpDefLibrary();
+
+ public static Tensor _all(Tensor input, Tensor axis, bool keep_dims = false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Add all input tensors element wise.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor add_n(Tensor[] inputs, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("AddN", name, args: new { inputs });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Returns the index with the largest value across dimensions of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
+ => _op_def_lib._apply_op_helper("ArgMax", name, args: new { input, dimension, output_type }).outputs[0];
+
+ ///
+ /// Returns the index with the smallest value across dimensions of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type= TF_DataType.TF_INT64, string name= null)
+ =>_op_def_lib._apply_op_helper("ArgMin", name, args: new { input, dimension, output_type }).outputs[0];
+
+ ///
+ /// Computes Psi, the derivative of Lgamma (the log of the absolute value of
+ /// `Gamma(x)`), element-wise.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor digamma(Tensor x, string name = null)
+ => _op_def_lib._apply_op_helper("Digamma", name, args: new { x }).output;
+
+ ///
+ /// Returns 0 if the denominator is zero.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DivNoNan'.
+ ///
+ ///
+ /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
+ ///
+ ///
+ ///
+ /// *NOTE*: DivNoNan supports broadcasting. More about broadcasting
+ /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+ ///
+ public static Tensor div_no_nan(Tensor x, Tensor y, string name = null)
+ {
+ var op = _op_def_lib._apply_op_helper("DivNoNan", name: name, args: new { x, y });
+ return op.output;
+ }
+
+ ///
+ /// Computes the mean of elements across dimensions of a tensor.
+ /// Reduces `input` along the dimensions given in `axis`. Unless
+ /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+ /// `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
+ ///
+ /// A `Tensor`. Must be one of the following types:
+ /// `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
+ /// The tensor to reduce.
+ /// A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce.
+ /// An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1.
+ /// A name for the operation (optional).
+ /// A `Tensor`. Has the same type as `input`.
+ public static Tensor mean(T1 input, T2 axis, bool keep_dims= false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
+
+ return _op.output;
+ }
+
+ public static Tensor prod(T1 input, T2 axis, bool keep_dims = false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Prod", name, args: new { input, reduction_indices = axis, keep_dims });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor acos(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Acos", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor asin(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Asin", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor add(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y });
+
+ return _op.output;
+ }
+
+ public static Tensor atan(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Atan", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor ceil(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Ceil", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor sin(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Sin", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Computes sigmoid of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sigmoid'.
+ ///
+ ///
+ /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
+ ///
+ ///
+ /// Specifically, y = 1 / (1 + exp(-x)).
+ ///
+ public static Tensor sigmoid(Tensor x, string name = "Sigmoid")
+ {
+ var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, new { x });
+
+ return op.output;
+ }
+
+ ///
+ /// Computes the gradient of the sigmoid of x wrt its input.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SigmoidGrad'.
+ ///
+ ///
+ /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
+ ///
+ ///
+ /// Specifically, grad = dy * y * (1 - y), where y = sigmoid(x), and
+ /// dy is the corresponding input gradient.
+ ///
+ public static Tensor sigmoid_grad(Tensor y, Tensor dy, string name = "SigmoidGrad")
+ {
+ var op = _op_def_lib._apply_op_helper("SigmoidGrad", name: name, args: new { y, dy });
+
+ return op.outputs[0];
+ }
+
+ public static Tensor sign(Tensor x, string name = "Sign")
+ {
+ var op = _op_def_lib._apply_op_helper("Sign", name: name, args: new {x});
+
+ return op.outputs[0];
+ }
+
+ public static Tensor sinh(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Sinh", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor cos(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Cos", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor cosh(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Cosh", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor cumsum(Tensor x, T axis, bool exclusive = false, bool reverse = false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Cumsum", name, args: new { x, axis, exclusive, reverse });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Computes the sum along segments of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments });
+ return _op.outputs[0];
+ }
+
+ public static Tensor tan(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Tan", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor tanh(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Tanh", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Computes the gradient for the tanh of `x` wrt its input.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor tanh_grad(Tensor y, Tensor dy, string name = null)
+ => _op_def_lib._apply_op_helper("TanhGrad", name: name, args: new { y, dy }).output;
+
+ public static Tensor floor(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Floor", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor greater(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Computes the log of the absolute value of `Gamma(x)` element-wise.
+ ///
+ ///
+ /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
+ ///
+ ///
+ ///
+ ///
+ /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
+ ///
+ public static Tensor lgamma(Tensor x, string name = null)
+ {
+ var op = _op_def_lib._apply_op_helper("Lgamma", name: name, args: new { x });
+
+ return op.output;
+ }
+
+ public static Tensor greater_equal(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("GreaterEqual", name: name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor less(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor less_equal(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("LessEqual", name: name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor log1p(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Log1p", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor logical_and(Tensor x, Tensor y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor logical_not(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("LogicalNot", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor logical_or(Tensor x, Tensor y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("LogicalOr", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor")
+ {
+ return logical_and(
+ logical_or(x, y),
+ logical_not(logical_and(x, y)),
+ name);
+ }
+
+ public static Tensor squared_difference(Tensor x, Tensor y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Computes square of x element-wise.
+ ///
+ /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
+ /// A name for the operation (optional).
+ /// A `Tensor`. Has the same type as `x`.
+ public static Tensor square(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Square", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Returns which elements of x are finite.
+ ///
+ /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
+ /// A name for the operation (optional).
+ /// A `Tensor` of type `bool`.
+ public static Tensor is_finite(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("IsFinite", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor is_nan(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("IsNan", name: name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Computes exponential of x element-wise. \\(y = e^x\\).
+ ///
+ /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
+ /// A name for the operation (optional).
+ /// A `Tensor`. Has the same type as `x`.
+ public static Tensor exp(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Exp", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Computes natural logarithm of x element-wise.
+ ///
+ /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
+ /// name: A name for the operation (optional).
+ /// A `Tensor`. Has the same type as `x`.
+ public static Tensor log(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Log", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= "")
+ {
+ var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor neg(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor sqrt(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Sqrt", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor sub(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Returns the truth value of (x == y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor equal(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Equal", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Returns the truth value of (x != y) element-wise.
+ ///
+ /// The type of the x.
+ /// The type of the y.
+ /// The x.
+ /// The y.
+ /// The name.
+ ///
+ public static Tensor not_equal(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+
+ public static Tensor atan2(Tensor y, Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Atan2", name, args: new { y, x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor mul(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor mul_no_nan(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("MulNoNan", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor real_div(Tensor x, Tensor y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor reciprocal(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Reciprocal", name, args: new { x });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor floor_mod(Tensor x, Tensor y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor floor_div(Tensor x, Tensor y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Multiply the matrix "a" by the matrix "b".
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b });
+
+ return _op.output;
+ }
+
+ ///
+ /// Multiply slices of the two matrices "x" and "y".
+ ///
+ ///
+ /// The `BatchMatMul` operation is embedded into the
+ /// `MatMul` operation on the DLL side. However the expected
+ /// attributes are not the same, hence we need to expose this
+ /// method to have the right args list on the `_apply_op_helper`
+ /// function.
+ ///
+ /// For each rank > 2 the first rank - 2 dimensions are considered
+ /// as fixed, and have to be consistent across the two matrices. A
+ /// common matrix multiplication is then applied over the residual
+ /// 2 dimensions.
+ ///
+ /// e.g.
+ /// x is (3, 6, 12); y is (3, 12, 6)
+ /// batch_matmul(x, y) ==> (3, 6, 6)
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper(
+ "BatchMatMul",
+ name,
+ args: new { x, y, adj_x, adj_y });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor maximum(T1 x, T2 y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Maximum", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor minimum(T1 x, T2 y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Minimum", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor _abs(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Abs", name, args: new { x });
+
+ return _op.output;
+ }
+
+ public static Tensor _any(Tx input, Ty axis, bool keep_dims = false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor _max(Tx input, Ty axis, bool keep_dims=false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Max", name, new { input, reduction_indices = axis, keep_dims });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor _min(Tx input, Ty axis, bool keep_dims = false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Min", name, new { input, reduction_indices = axis, keep_dims });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor pow(Tx x, Ty y, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y });
+
+ return _op.outputs[0];
+ }
+
+ public static Tensor _sum(Tx input, Ty axis = default, bool keep_dims = false, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Creates a sequence of numbers.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Rounds the values of a tensor to the nearest integer, element-wise.
+ ///
+ ///
+ ///
+ ///
+ /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Round'.
+ ///
+ ///
+ /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
+ ///
+ ///
+ /// Rounds half to even. Also known as bankers rounding. If you want to round
+ /// according to the current system rounding mode use std::cint.
+ ///
+ public static Tensor round(Tensor x, string name = "Round")
+ {
+ var op = _op_def_lib._apply_op_helper("Round", name: name, new { x });
+
+ return op.output;
+ }
+
+ ///
+ /// Computes reciprocal of square root of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor rsqrt(Tensor x, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Rsqrt", name, new { x });
+
+ return _op.outputs[0];
+ }
+
+ ///
+ /// Returns the fraction of zeros in value.
+ ///
+ /// A tensor of numeric type.
+ /// A name for the operation (optional).
+ /// The fraction of zeros in value, with type float32.
+ public static Tensor zero_fraction(Tensor value, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("zero_fraction", name, new { value, name });
+
+ return _op.outputs[0];
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Sessions/FeedDict.cs b/src/TensorFlowNET.Core/Sessions/FeedDict.cs
index a95b5db1..f39a761d 100644
--- a/src/TensorFlowNET.Core/Sessions/FeedDict.cs
+++ b/src/TensorFlowNET.Core/Sessions/FeedDict.cs
@@ -1,8 +1,8 @@
-using System.Collections;
-
-namespace Tensorflow.Sessions
-{
- public class FeedDict : Hashtable
- {
- }
-}
+using System.Collections;
+
+namespace Tensorflow.Sessions
+{
+ public class FeedDict : Hashtable
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Util/nest.py.cs b/src/TensorFlowNET.Core/Util/nest.py.cs
index 54149fe1..3f5d78eb 100644
--- a/src/TensorFlowNET.Core/Util/nest.py.cs
+++ b/src/TensorFlowNET.Core/Util/nest.py.cs
@@ -1,987 +1,987 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System;
-using System.Collections;
-using System.Collections.Generic;
-using System.Linq;
-using NumSharp;
-using Tensorflow.Operations;
-
-namespace Tensorflow.Util
-{
- //Functions for working with arbitrarily nested sequences of elements.
-
- //This module can perform operations on nested structures. A nested structure is a
- //Python sequence, tuple (including `namedtuple`), or dict that can contain
- //further sequences, tuples, and dicts.
-
- //The utilities here assume (and do not check) that the nested structures form a
- //'tree', i.e., no references in the structure of the input of these functions
- //should be recursive.
-
- //Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
- // (np.array([3, 4]), tf.constant([3, 4])))`
- //
-
- public static class nest
- {
-
-
- ///
- /// Untyped implementation of zip for arbitrary data
- ///
- /// Converts an list of lists or arrays [[1,2,3], [4,5,6], [7,8,9]] into a list of arrays
- /// representing tuples of the same index of all source arrays [[1,4,7], [2,5,9], [3,6,9]]
- ///
- /// one or multiple sequences to be zipped
- ///
- public static IEnumerable