| @@ -9,7 +9,7 @@ | |||
| [](https://996.icu/#/en_US) | |||
| [](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab) | |||
| *master branch is based on tensorflow 2.1 now, v0.15-tensorflow1.15 is from tensorflow1.15.* | |||
| *master branch is based on tensorflow 2.2 now, v0.15-tensorflow1.15 is from tensorflow1.15.* | |||
| TF.NET is a member project of [SciSharp STACK](https://github.com/SciSharp). | |||
| @@ -28,7 +28,7 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr | |||
| ### How to use | |||
| | TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.0 | | |||
| | TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.2 | | |||
| | ----------- | ------- | ------- | ------- | ------ | | |||
| | tf.net 0.20 | | | x | x | | |||
| | tf.net 0.15 | | x | x | | | |||
| @@ -13,98 +13,168 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\Ten | |||
| EndProject | |||
| Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}" | |||
| EndProject | |||
| Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowNET.Console", "src\TensorFlowNET.Console\TensorFlowNET.Console.csproj", "{03F06299-3F4B-4449-A709-3A647657BC0C}" | |||
| EndProject | |||
| Global | |||
| GlobalSection(SolutionConfigurationPlatforms) = preSolution | |||
| Debug|Any CPU = Debug|Any CPU | |||
| Debug|x64 = Debug|x64 | |||
| Debug|x86 = Debug|x86 | |||
| Debug-Minimal|Any CPU = Debug-Minimal|Any CPU | |||
| Debug-Minimal|x64 = Debug-Minimal|x64 | |||
| Debug-Minimal|x86 = Debug-Minimal|x86 | |||
| Publish|Any CPU = Publish|Any CPU | |||
| Publish|x64 = Publish|x64 | |||
| Publish|x86 = Publish|x86 | |||
| Release|Any CPU = Release|Any CPU | |||
| Release|x64 = Release|x64 | |||
| Release|x86 = Release|x86 | |||
| EndGlobalSection | |||
| GlobalSection(ProjectConfigurationPlatforms) = postSolution | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x86.Build.0 = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x86.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x86.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x86.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x86.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.Build.0 = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|x64 | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|x64 | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|x64 | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|x64 | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x86.Build.0 = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|x64 | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|x64 | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x86.ActiveCfg = Release|Any CPU | |||
| {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x86.Build.0 = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|x64 | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|x64 | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|x64 | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|x64 | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x86.Build.0 = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|x64 | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|x64 | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x86.ActiveCfg = Release|Any CPU | |||
| {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x86.Build.0 = Release|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x64.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|Any CPU.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x64.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x64.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x86.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x86.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x64.ActiveCfg = Release|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x64.Build.0 = Release|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x86.ActiveCfg = Release|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x86.Build.0 = Release|Any CPU | |||
| EndGlobalSection | |||
| GlobalSection(SolutionProperties) = preSolution | |||
| HideSolutionNode = FALSE | |||
| @@ -72,5 +72,5 @@ Hello, TensorFlow! | |||
| Press any key to continue . . . | |||
| ``` | |||
| This sample code can be found at [here](https://github.com/SciSharp/TensorFlow.NET/blob/master/test/TensorFlowNET.Examples/HelloWorld.cs). | |||
| This sample code can be found at [here](https://github.com/SciSharp/SciSharp-Stack-Examples/blob/master/src/TensorFlowNET.Examples/HelloWorld.cs). | |||
| @@ -0,0 +1,51 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Diagnostics; | |||
| using System.Text; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| public class MemoryMonitor | |||
| { | |||
| public void WarmUp() | |||
| { | |||
| print(tf.VERSION); | |||
| } | |||
| public void Execute(int epoch, int iterate, Action<int> process) | |||
| { | |||
| /*GC.Collect(); | |||
| GC.WaitForPendingFinalizers(); | |||
| GC.Collect();*/ | |||
| print($"{process.Method.Name} started..."); | |||
| for (int i = 0; i < epoch; i++) | |||
| { | |||
| var initialMemory = Process.GetCurrentProcess().PrivateMemorySize64;// GC.GetTotalMemory(true); | |||
| process(iterate); | |||
| var finalMemory = Process.GetCurrentProcess().PrivateMemorySize64; //GC.GetTotalMemory(true); | |||
| print($"Epoch {i}: {Format(finalMemory - initialMemory)}."); | |||
| } | |||
| GC.Collect(); | |||
| GC.WaitForPendingFinalizers(); | |||
| GC.Collect(); | |||
| print($"Total {process.Method.Name} usage {Format(Process.GetCurrentProcess().PrivateMemorySize64)}"); | |||
| } | |||
| private string Format(long usage) | |||
| { | |||
| if (usage < 0) | |||
| return $"-{Format(0 - usage)}"; | |||
| if (usage <= 1024 && usage >= 0) | |||
| return $"{usage} Bytes"; | |||
| else if (usage > 1024 && usage <= 1024 * 1024) | |||
| return $"{usage / 1024} KB"; | |||
| else | |||
| return $"{usage / 1024 / 1024} MB"; | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,55 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| class MemoryTestingCases | |||
| { | |||
| /// <summary> | |||
| /// | |||
| /// </summary> | |||
| public Action<int> Constant | |||
| => (iterate) => | |||
| { | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var tensor = tf.constant(3112.0f); | |||
| } | |||
| }; | |||
| public Action<int> Variable | |||
| => (iterate) => | |||
| { | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var tensor = tf.Variable(3112.0f); | |||
| } | |||
| }; | |||
| public Action<int> MathAdd | |||
| => (iterate) => | |||
| { | |||
| var x = tf.constant(3112.0f); | |||
| var y = tf.constant(3112.0f); | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var z = x + y; | |||
| } | |||
| }; | |||
| public Action<int> Gradient | |||
| => (iterate) => | |||
| { | |||
| for(int i = 0; i< iterate; i++) | |||
| { | |||
| var w = tf.constant(3112.0f); | |||
| using var tape = tf.GradientTape(); | |||
| tape.watch(w); | |||
| var loss = w * w; | |||
| var grad = tape.gradient(loss, w); | |||
| } | |||
| }; | |||
| } | |||
| } | |||
| @@ -0,0 +1,33 @@ | |||
| using System; | |||
| namespace Tensorflow | |||
| { | |||
| class Program | |||
| { | |||
| static void Main(string[] args) | |||
| { | |||
| // boot .net core 10.5M. | |||
| var mm = new MemoryMonitor(); | |||
| // warm up tensorflow.net 28.5M. | |||
| mm.WarmUp(); | |||
| var cases = new MemoryTestingCases(); | |||
| int batchSize = 1000; | |||
| // 1 million float tensor 58.5M. | |||
| mm.Execute(10, 100 * batchSize, cases.Constant); | |||
| // 100K float variable 80.5M. | |||
| mm.Execute(10, 10 * batchSize, cases.Variable); | |||
| // 1 million math add 36.5M. | |||
| mm.Execute(10, 100 * batchSize, cases.MathAdd); | |||
| // 100K gradient 210M. | |||
| mm.Execute(10, 10 * batchSize, cases.Gradient); | |||
| Console.WriteLine("Finished."); | |||
| Console.ReadLine(); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,18 @@ | |||
| <Project Sdk="Microsoft.NET.Sdk"> | |||
| <PropertyGroup> | |||
| <OutputType>Exe</OutputType> | |||
| <TargetFramework>netcoreapp3.1</TargetFramework> | |||
| <RootNamespace>Tensorflow</RootNamespace> | |||
| <AssemblyName>Tensorflow</AssemblyName> | |||
| </PropertyGroup> | |||
| <ItemGroup> | |||
| <PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.2.0.1" /> | |||
| </ItemGroup> | |||
| <ItemGroup> | |||
| <ProjectReference Include="..\TensorFlowNET.Core\Tensorflow.Binding.csproj" /> | |||
| </ItemGroup> | |||
| </Project> | |||
| @@ -43,7 +43,7 @@ namespace Tensorflow | |||
| /// </summary> | |||
| public partial class c_api | |||
| { | |||
| public const string TensorFlowLibName = "tensorflow"; | |||
| public const string TensorFlowLibName = @"D:\SciSharp\tensorflow-google\bazel-bin\tensorflow\tensorflow.dll"; | |||
| public static string StringPiece(IntPtr handle) | |||
| { | |||
| @@ -51,7 +51,7 @@ namespace Tensorflow | |||
| } | |||
| public delegate void Deallocator(IntPtr data, IntPtr size, ref DeallocatorArgs args); | |||
| public delegate void DeallocatorV2(IntPtr data, long size, IntPtr args); | |||
| public struct DeallocatorArgs | |||
| { | |||
| internal static unsafe c_api.DeallocatorArgs* EmptyPtr; | |||
| @@ -20,8 +20,8 @@ namespace Tensorflow | |||
| { | |||
| public partial class tensorflow | |||
| { | |||
| public GradientActor GradientTape() | |||
| => new GradientActor(); | |||
| public GradientTape GradientTape() | |||
| => new GradientTape(); | |||
| public Tensor[] gradients(Tensor[] ys, | |||
| Tensor[] xs, | |||
| @@ -14,6 +14,7 @@ | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.Operations; | |||
| namespace Tensorflow | |||
| @@ -259,7 +260,6 @@ namespace Tensorflow | |||
| public Tensor sub<Tx, Ty>(Tx a, Ty b, string name = null) | |||
| => gen_math_ops.sub(a, b, name: name); | |||
| public Tensor divide(Tensor a, Tensor b) | |||
| => a / b; | |||
| @@ -348,6 +348,9 @@ namespace Tensorflow | |||
| public Tensor minimum<T1, T2>(T1 x, T2 y, string name = null) | |||
| => gen_math_ops.minimum(x, y, name: name); | |||
| public Tensor multiply(Tensor x, Tensor y, string name = null) | |||
| => gen_math_ops.mul(x, y, name: name); | |||
| /// <summary> | |||
| /// return x * y | |||
| /// </summary> | |||
| @@ -387,7 +390,7 @@ namespace Tensorflow | |||
| => x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"); | |||
| public Tensor pow<T1, T2>(T1 x, T2 y, string name = "pow") | |||
| => gen_math_ops.pow(x, y, name: name); | |||
| => math_ops.pow(x, y, name: name); | |||
| /// <summary> | |||
| /// Divides `x / y` elementwise, rounding toward the most negative integer. | |||
| @@ -525,5 +528,7 @@ namespace Tensorflow | |||
| public Tensor square(Tensor x, string name = null) | |||
| => gen_math_ops.square(x, name: name); | |||
| public Tensor squared_difference(Tensor x, Tensor y, string name = null) | |||
| => gen_math_ops.squared_difference(x: x, y: y, name: name); | |||
| } | |||
| } | |||
| @@ -116,6 +116,8 @@ namespace Tensorflow | |||
| public IActivation relu() => new relu(); | |||
| public IActivation swish() => new swish(); | |||
| public IActivation tanh() => new tanh(); | |||
| public IActivation softmax() => new softmax(); | |||
| public Tensor tanh(Tensor x, string name = null) | |||
| => gen_nn_ops.tanh(x, name); | |||
| @@ -123,8 +125,8 @@ namespace Tensorflow | |||
| => gen_nn_ops.relu(features, name); | |||
| public Tensor[] fused_batch_norm(Tensor x, | |||
| VariableV1 scale, | |||
| VariableV1 offset, | |||
| IVariableV1 scale, | |||
| IVariableV1 offset, | |||
| Tensor mean = null, | |||
| Tensor variance = null, | |||
| float epsilon = 0.001f, | |||
| @@ -18,22 +18,33 @@ namespace Tensorflow | |||
| { | |||
| public partial class tensorflow | |||
| { | |||
| /// <summary> | |||
| /// Outputs random values from a normal distribution. | |||
| /// </summary> | |||
| /// <param name="shape"></param> | |||
| /// <param name="mean"></param> | |||
| /// <param name="stddev"></param> | |||
| /// <param name="dtype"></param> | |||
| /// <param name="seed"></param> | |||
| /// <param name="name"></param> | |||
| /// <returns></returns> | |||
| public Tensor random_normal(TensorShape shape, | |||
| float mean = 0.0f, | |||
| float stddev = 1.0f, | |||
| TF_DataType dtype = TF_DataType.TF_FLOAT, | |||
| int? seed = null, | |||
| string name = null) => random_ops.random_normal(shape, mean, stddev, dtype, seed, name); | |||
| public Random random => new Random(); | |||
| public class Random | |||
| { | |||
| /// <summary> | |||
| /// Outputs random values from a normal distribution. | |||
| /// </summary> | |||
| /// <param name="shape"></param> | |||
| /// <param name="mean"></param> | |||
| /// <param name="stddev"></param> | |||
| /// <param name="dtype"></param> | |||
| /// <param name="seed"></param> | |||
| /// <param name="name"></param> | |||
| /// <returns></returns> | |||
| public Tensor normal(TensorShape shape, | |||
| float mean = 0.0f, | |||
| float stddev = 1.0f, | |||
| TF_DataType dtype = TF_DataType.TF_FLOAT, | |||
| int? seed = null, | |||
| string name = null) => random_ops.random_normal(shape, mean, stddev, dtype, seed, name); | |||
| public Tensor categorical( | |||
| Tensor logits, | |||
| int num_samples, | |||
| int? seed = null, | |||
| string name = null, | |||
| TF_DataType output_dtype = TF_DataType.DtInvalid) => random_ops.multinomial(logits, num_samples, seed: seed, name: name, output_dtype: output_dtype); | |||
| } | |||
| public Tensor random_uniform(TensorShape shape, | |||
| float minval = 0, | |||
| @@ -38,8 +38,8 @@ namespace Tensorflow | |||
| public Optimizer GradientDescentOptimizer(Tensor learning_rate) | |||
| => new GradientDescentOptimizer(learning_rate); | |||
| public Optimizer AdamOptimizer(float learning_rate, string name = "Adam") | |||
| => new AdamOptimizer(learning_rate, name: name); | |||
| public Optimizer AdamOptimizer(float learning_rate, float epsilon = 1e-8f, string name = "Adam") | |||
| => new AdamOptimizer(learning_rate, epsilon:epsilon, name: name); | |||
| public Optimizer AdamOptimizer(float learning_rate, TF_DataType dtype, string name = "Adam") | |||
| => new AdamOptimizer(learning_rate, name: name, dtype: dtype); | |||
| @@ -50,7 +50,7 @@ namespace Tensorflow | |||
| public ExponentialMovingAverage ExponentialMovingAverage(float decay) | |||
| => new ExponentialMovingAverage(decay); | |||
| public Saver Saver(VariableV1[] var_list = null, int max_to_keep = 5) | |||
| public Saver Saver(IVariableV1[] var_list = null, int max_to_keep = 5) | |||
| => new Saver(var_list: var_list, max_to_keep: max_to_keep); | |||
| public string write_graph(Graph graph, string logdir, string name, bool as_text = true) | |||
| @@ -68,7 +68,7 @@ namespace Tensorflow | |||
| clear_devices, | |||
| import_scope).Item1; | |||
| public (MetaGraphDef, Dictionary<string, VariableV1>) export_meta_graph(string filename = "", | |||
| public (MetaGraphDef, Dictionary<string, IVariableV1>) export_meta_graph(string filename = "", | |||
| bool as_text = false, | |||
| bool clear_devices = false, | |||
| bool clear_extraneous_savers = false, | |||
| @@ -21,9 +21,9 @@ namespace Tensorflow | |||
| { | |||
| public partial class tensorflow | |||
| { | |||
| public VariableV1[] global_variables(string scope = null) | |||
| public IVariableV1[] global_variables(string scope = null) | |||
| { | |||
| return (ops.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) as List<VariableV1>) | |||
| return (ops.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) as List<IVariableV1>) | |||
| .ToArray(); | |||
| } | |||
| @@ -33,7 +33,7 @@ namespace Tensorflow | |||
| /// <param name="var_list">List of `Variable` objects to initialize.</param> | |||
| /// <param name="name">Optional name for the returned operation.</param> | |||
| /// <returns>An Op that run the initializers of all the specified variables.</returns> | |||
| public Operation variables_initializer(VariableV1[] var_list, string name = "init") | |||
| public Operation variables_initializer(IVariableV1[] var_list, string name = "init") | |||
| => variables.variables_initializer(var_list, name: name); | |||
| public Operation global_variables_initializer() | |||
| @@ -47,8 +47,8 @@ namespace Tensorflow | |||
| /// </summary> | |||
| /// <param name="scope"></param> | |||
| /// <returns></returns> | |||
| public VariableV1[] trainable_variables(string scope = null) | |||
| => (variables.trainable_variables() as List<VariableV1>).ToArray(); | |||
| public IVariableV1[] trainable_variables(string scope = null) | |||
| => (variables.trainable_variables() as List<IVariableV1>).ToArray(); | |||
| public RefVariable get_variable(string name, | |||
| TensorShape shape = null, | |||
| @@ -195,6 +195,17 @@ namespace Tensorflow | |||
| return (float)(DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalSeconds; | |||
| } | |||
| public static IEnumerable<(T1, T2)> zip<T1, T2>((T1, T1) t1, (T2, T2) t2) | |||
| { | |||
| for (int i = 0; i < 2; i++) | |||
| { | |||
| if (i == 0) | |||
| yield return (t1.Item1, t2.Item1); | |||
| else | |||
| yield return (t1.Item2, t2.Item2); | |||
| } | |||
| } | |||
| public static IEnumerable<(T, T)> zip<T>(NDArray t1, NDArray t2) | |||
| where T : unmanaged | |||
| { | |||
| @@ -0,0 +1,52 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public class EagerOperation : Operation | |||
| { | |||
| public int NumInputs; | |||
| public IntPtr[] InputHandles { get; set; } | |||
| public Tensor[] Inputs { get; set; } | |||
| public int NumOutputs; | |||
| public IntPtr[] OutputHandles { get; set; } | |||
| public Tensor[] Outputs { get; set; } | |||
| public int[] SkipInputIndices { get; set; } | |||
| public EagerOperation() : base(IntPtr.Zero) { } | |||
| public override InputList inputs | |||
| { | |||
| get | |||
| { | |||
| if (_inputs_val == null) | |||
| { | |||
| var retval = new Tensor[NumInputs]; | |||
| for (int i = 0; i < NumInputs; i++) | |||
| { | |||
| } | |||
| _inputs_val = new InputList(Inputs); | |||
| } | |||
| return _inputs_val; | |||
| } | |||
| } | |||
| public override Tensor[] outputs | |||
| { | |||
| get | |||
| { | |||
| if (_outputs == null) | |||
| { | |||
| _outputs = Outputs; | |||
| } | |||
| return _outputs; | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,73 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerTensor : Tensor | |||
| { | |||
| public EagerTensor() : base(IntPtr.Zero) | |||
| { | |||
| EagerTensorHandle = c_api.TFE_NewEagerTensor(); | |||
| } | |||
| public EagerTensor(IntPtr handle) : base(IntPtr.Zero) | |||
| { | |||
| EagerTensorHandle = handle; | |||
| Resolve(); | |||
| } | |||
| public EagerTensor(string value, string device_name) : base(value) | |||
| { | |||
| EagerTensorHandle = c_api.TFE_NewEagerTensor(); | |||
| tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle); | |||
| c_api.TFE_SetEagerTensorHandle(EagerTensorHandle, tfe_tensor_handle); | |||
| Resolve(); | |||
| } | |||
| public EagerTensor(NDArray value, string device_name) : base(value) | |||
| { | |||
| EagerTensorHandle = c_api.TFE_NewEagerTensor(); | |||
| tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle); | |||
| c_api.TFE_SetEagerTensorHandle(EagerTensorHandle, tfe_tensor_handle); | |||
| Resolve(); | |||
| } | |||
| public EagerTensor Resolve() | |||
| { | |||
| _id = c_api.TFE_EagerTensorId(EagerTensorHandle); | |||
| if (tfe_tensor_handle == IntPtr.Zero) | |||
| tfe_tensor_handle = c_api.TFE_EagerTensorHandle(EagerTensorHandle); | |||
| if (_handle == IntPtr.Zero) | |||
| _handle = c_api.TFE_TensorHandleResolve(tfe_tensor_handle, status.Handle); | |||
| /*print($"new Tensor {Id} {_handle.ToString("x16")}"); | |||
| print($"new TensorHandle {Id} {tfe_tensor_handle.ToString("x16")}"); | |||
| print($"new EagerTensor {Id} {EagerTensorHandle.ToString("x16")}");*/ | |||
| GarbageCollector.Increase(_handle, GCItemType.TensorHandle); | |||
| GarbageCollector.Increase(tfe_tensor_handle, GCItemType.LocalTensorHandle); | |||
| GarbageCollector.Increase(EagerTensorHandle, GCItemType.EagerTensorHandle); | |||
| return this; | |||
| } | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| GarbageCollector.Decrease(_handle); | |||
| GarbageCollector.Decrease(tfe_tensor_handle); | |||
| GarbageCollector.Decrease(EagerTensorHandle); | |||
| /*print($"deleting DeleteTensorHandle {Id} {_handle.ToString("x16")}"); | |||
| c_api.TF_DeleteTensor(_handle); | |||
| print($"deleting DeleteTensorHandle {Id} {tfe_tensor_handle.ToString("x16")}"); | |||
| c_api.TFE_DeleteTensorHandle(tfe_tensor_handle); | |||
| print($"deleting DeleteEagerTensor {Id} {EagerTensorHandle.ToString("x16")}"); | |||
| c_api.TFE_DeleteEagerTensor(EagerTensorHandle);*/ | |||
| } | |||
| } | |||
| } | |||
| @@ -8,7 +8,7 @@ namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerTensor | |||
| { | |||
| public static explicit operator TFE_TensorHandle(EagerTensor tensor) | |||
| => tensor.tfe_tensor_handle; | |||
| public static implicit operator IntPtr(EagerTensor tensor) | |||
| => tensor.EagerTensorHandle; | |||
| } | |||
| } | |||
| @@ -2,42 +2,34 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerTensor : Tensor | |||
| { | |||
| Status status = new Status(); | |||
| TFE_TensorHandle tfe_tensor_handle; | |||
| public EagerTensor(IntPtr handle) : base(handle) | |||
| { | |||
| tfe_tensor_handle = handle; | |||
| _handle = c_api.TFE_TensorHandleResolve(handle, status.Handle); | |||
| } | |||
| IntPtr tfe_tensor_handle; | |||
| public IntPtr EagerTensorHandle { get; set; } | |||
| public override string Device => c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(tfe_tensor_handle, status.Handle)); | |||
| public EagerTensor(string value, string device_name) : base(value) | |||
| { | |||
| tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle); | |||
| } | |||
| // public override int rank => c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, status); | |||
| public EagerTensor(int value, string device_name) : base(value) | |||
| public static int GetRank(IntPtr handle) | |||
| { | |||
| tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle); | |||
| var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); | |||
| using var status = new Status(); | |||
| return c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, status.Handle); | |||
| } | |||
| public EagerTensor(float[] value, string device_name) : base(value) | |||
| public static int[] GetDims(IntPtr handle) | |||
| { | |||
| tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle); | |||
| } | |||
| public EagerTensor(double[] value, string device_name) : base(value) | |||
| { | |||
| tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle); | |||
| } | |||
| public EagerTensor(NDArray value, string device_name) : base(value) | |||
| { | |||
| tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle); | |||
| var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); | |||
| using var status = new Status(); | |||
| var dims = new int[c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, status.Handle)]; | |||
| for (int i = 0; i < dims.Length; i++) | |||
| dims[i] = c_api.TFE_TensorHandleDim(tfe_tensor_handle, i, status.Handle); | |||
| return dims; | |||
| } | |||
| public override string ToString() | |||
| @@ -45,23 +37,27 @@ namespace Tensorflow.Eager | |||
| switch (rank) | |||
| { | |||
| case -1: | |||
| return $"tf.Tensor: shape=<unknown>, dtype={dtype.as_numpy_name()}, numpy={GetFormattedString()}"; | |||
| return $"tf.Tensor: shape=<unknown>, dtype={dtype.as_numpy_name()}, numpy={GetFormattedString(dtype, numpy())}"; | |||
| case 0: | |||
| return $"tf.Tensor: shape=(), dtype={dtype.as_numpy_name()}, numpy={GetFormattedString()}"; | |||
| return $"tf.Tensor: shape=(), dtype={dtype.as_numpy_name()}, numpy={GetFormattedString(dtype, numpy())}"; | |||
| default: | |||
| return $"tf.Tensor: shape=({string.Join(",", shape)}), dtype={dtype.as_numpy_name()}, numpy={GetFormattedString()}"; | |||
| return $"tf.Tensor: shape=({string.Join(",", shape)}), dtype={dtype.as_numpy_name()}, numpy={GetFormattedString(dtype, numpy())}"; | |||
| } | |||
| } | |||
| private string GetFormattedString() | |||
| public static string GetFormattedString(TF_DataType dtype, NDArray nd) | |||
| { | |||
| var nd = numpy(); | |||
| if (nd.size == 0) | |||
| return "[]"; | |||
| switch (dtype) | |||
| { | |||
| case TF_DataType.TF_STRING: | |||
| return $"b'{(string)nd}'"; | |||
| case TF_DataType.TF_BOOL: | |||
| return (nd.GetByte(0) > 0).ToString(); | |||
| case TF_DataType.TF_RESOURCE: | |||
| return "<unprintable>"; | |||
| default: | |||
| return nd.ToString(); | |||
| } | |||
| @@ -27,18 +27,26 @@ namespace Tensorflow.Eager | |||
| /// <param name="ctx">The value of context.context().</param> | |||
| /// <param name="name">Customized name for the operation.</param> | |||
| /// <returns>List of output Tensor objects. The list is empty if there are no outputs</returns> | |||
| public Tensor execute(Context ctx, string op_name, Tensor[] inputs, object[] attrs, string name = null) | |||
| public EagerTensor[] execute(Context ctx, string op_name, int num_outputs, | |||
| EagerTensor[] inputs, object[] attrs, | |||
| string name = null) | |||
| { | |||
| ctx.ensure_initialized(); | |||
| using (var status = new Status()) | |||
| { | |||
| var retVals = wrap_tfe_src.TFE_Execute(ctx, ctx.device_name, op_name, inputs, attrs, 1, status); | |||
| return new EagerTensor(retVals[0]); | |||
| } | |||
| var results = Enumerable.Range(0, num_outputs).Select(x => new EagerTensor()).ToArray(); | |||
| using Status status = new Status(c_api.TFE_QuickExecute(ctx, | |||
| ctx.device_name, | |||
| op_name, | |||
| inputs.Select(x => x.EagerTensorHandle).ToArray(), | |||
| inputs.Length, | |||
| op => wrap_tfe_src.SetOpAttrs(op, attrs), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results.Select(x => x.Resolve()).ToArray(); | |||
| } | |||
| public (TF_DataType, Tensor[]) args_to_matching_eager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null) | |||
| public (TF_DataType, EagerTensor[]) args_to_matching_eager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null) | |||
| { | |||
| if (args.Length == 0 && default_dtype != TF_DataType.DtInvalid) | |||
| return (default_dtype, null); | |||
| @@ -55,10 +63,10 @@ namespace Tensorflow.Eager | |||
| if (dtype == TF_DataType.DtInvalid) | |||
| { | |||
| var ret = new List<Tensor>(); | |||
| var ret = new List<EagerTensor>(); | |||
| foreach (var t in args) | |||
| { | |||
| ret.Add(ops.convert_to_tensor(t, dtype, preferred_dtype: default_dtype, ctx: ctx)); | |||
| ret.Add(ops.convert_to_tensor(t, dtype, preferred_dtype: default_dtype, ctx: ctx) as EagerTensor); | |||
| if (dtype == TF_DataType.DtInvalid) | |||
| dtype = ret.Last().dtype; | |||
| } | |||
| @@ -68,10 +76,5 @@ namespace Tensorflow.Eager | |||
| else | |||
| throw new NotImplementedException(""); | |||
| } | |||
| public void record_gradient(string op_name, InputList inputs, Dictionary<string, object> attrs, Tensor[] results, string name = null) | |||
| { | |||
| wrap_tfe_src.RecordGradient(op_name, inputs._inputs, attrs, results, name); | |||
| } | |||
| } | |||
| } | |||
| @@ -1,19 +1,15 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Runtime.InteropServices; | |||
| using System.Text; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| [StructLayout(LayoutKind.Sequential)] | |||
| public struct TFE_TensorHandle | |||
| { | |||
| IntPtr _handle; | |||
| public TFE_TensorHandle(IntPtr handle) | |||
| => _handle = handle; | |||
| public static implicit operator TFE_TensorHandle(IntPtr handle) | |||
| => new TFE_TensorHandle(handle); | |||
| public static implicit operator IntPtr(TFE_TensorHandle tensor) | |||
| => tensor._handle; | |||
| @@ -7,6 +7,46 @@ namespace Tensorflow | |||
| { | |||
| public partial class c_api | |||
| { | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_RegisterGradientFunction(gradient_function_callback gradientFunctionCallback, | |||
| delete_backward_function_callback deleteBackwardFunctionCallback); | |||
| /// <summary> | |||
| /// | |||
| /// </summary> | |||
| /// <param name="op_name"></param> | |||
| /// <param name="op_inputs"></param> | |||
| /// <param name="op_outputs"></param> | |||
| /// <param name="num_attrs"></param> | |||
| /// <param name="output_grads">previous node ouput</param> | |||
| /// <param name="skip_input_indices"></param> | |||
| /// <returns></returns> | |||
| [UnmanagedFunctionPointer(CallingConvention.StdCall)] | |||
| public delegate IntPtr gradient_function_callback(string op_name, | |||
| IntPtr op_inputs, | |||
| IntPtr op_outputs, | |||
| int num_attrs, | |||
| IntPtr output_grads, | |||
| IntPtr skip_input_indices); | |||
| [UnmanagedFunctionPointer(CallingConvention.StdCall)] | |||
| public delegate void delete_backward_function_callback(string op_name, | |||
| IntPtr op_inputs, | |||
| IntPtr op_outputs); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern IntPtr TFE_WrapGradientResult(IntPtr[] gradients, int num_gradients); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern IntPtr VSpace_Handle(VSpace_callback_Ones ones, VSpace_callback_AggregateGrads aggregate_grads); | |||
| [UnmanagedFunctionPointer(CallingConvention.StdCall)] | |||
| public delegate IntPtr VSpace_callback_Ones(long[] shape, int dims, TF_DataType dtype); | |||
| [UnmanagedFunctionPointer(CallingConvention.StdCall)] | |||
| public delegate IntPtr VSpace_callback_AggregateGrads(TF_BindingArray gradients); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_RegisterVSpace(IntPtr vspace); | |||
| /// <summary> | |||
| /// Return a new options object. | |||
| /// </summary> | |||
| @@ -102,14 +142,20 @@ namespace Tensorflow | |||
| public static extern TFE_Op TFE_NewOp(IntPtr ctx, string op_or_function_name, SafeStatusHandle status); | |||
| /// <summary> | |||
| /// | |||
| /// Resets `op_to_reset` with `op_or_function_name` and `raw_device_name`. This | |||
| /// is for performance optimization by reusing an exiting unused op rather than | |||
| /// creating a new op every time. If `raw_device_name` is `NULL` or empty, it | |||
| /// does not set the device name. If it's not `NULL`, then it attempts to parse | |||
| /// and set the device name. It's effectively `TFE_OpSetDevice`, but it is faster | |||
| /// than separately calling it because if the existing op has the same | |||
| /// `raw_device_name`, it skips parsing and just leave as it is. | |||
| /// </summary> | |||
| /// <param name="ctx">TFE_Context*</param> | |||
| /// <param name="op_to_reset">TFE_Op*</param> | |||
| /// <param name="op_or_function_name">const char*</param> | |||
| /// <param name="raw_device_name">const char*</param> | |||
| /// <param name="status">TF_Status*</param> | |||
| /// <param name="op_to_reset">TFE_Op*</param> | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_OpReset(IntPtr ctx, string op_or_function_name, SafeStatusHandle status, IntPtr op_to_reset); | |||
| public static extern void TFE_OpReset(IntPtr op_to_reset, string op_or_function_name, string raw_device_name, SafeStatusHandle status); | |||
| /// <summary> | |||
| /// | |||
| @@ -180,6 +226,18 @@ namespace Tensorflow | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern TFE_TensorHandle TFE_NewTensorHandle(IntPtr t, SafeStatusHandle status); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern IntPtr TFE_EagerTensorHandle(IntPtr t); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern int TFE_EagerTensorId(IntPtr t); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern IntPtr TFE_NewEagerTensor(); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_SetEagerTensorHandle(IntPtr tensor, IntPtr handle); | |||
| /// <summary> | |||
| /// Sets the default execution mode (sync/async). Note that this can be | |||
| /// overridden per thread using TFE_ContextSetExecutorForThread. | |||
| @@ -206,7 +264,8 @@ namespace Tensorflow | |||
| /// <param name="status">TF_Status*</param> | |||
| /// <returns></returns> | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern TF_Tensor TFE_TensorHandleResolve(IntPtr h, SafeStatusHandle status); | |||
| public static extern IntPtr TFE_TensorHandleResolve(IntPtr h, SafeStatusHandle status); | |||
| /// <summary> | |||
| /// This function will block till the operation that produces `h` has completed. | |||
| @@ -217,6 +276,9 @@ namespace Tensorflow | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern int TFE_TensorHandleNumDims(IntPtr h, SafeStatusHandle status); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern int TFE_TensorHandleDim(IntPtr h, int dim, SafeStatusHandle status); | |||
| /// <summary> | |||
| /// Returns the device of the operation that produced `h`. If `h` was produced by | |||
| /// a copy, returns the destination device of the copy. Note that the returned | |||
| @@ -255,6 +317,19 @@ namespace Tensorflow | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_DeleteTensorHandle(IntPtr h); | |||
| /// <summary> | |||
| /// | |||
| /// </summary> | |||
| /// <param name="h">TFE_TensorHandle*</param> | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_DeleteEagerTensor(IntPtr h); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TF_DeleteBindingArray(IntPtr h); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_DeleteBindingTensorArray(IntPtr h); | |||
| /// <summary> | |||
| /// Creates a new eager Executor. Nodes in one executor are guaranteed to be | |||
| /// executed in sequence. Assigning nodes to different executors allows executing | |||
| @@ -304,5 +379,64 @@ namespace Tensorflow | |||
| /// <returns>TFE_Executor*</returns> | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern TFE_Executor TFE_ContextGetExecutorForThread(IntPtr ctx); | |||
| /// <summary> | |||
| /// | |||
| /// </summary> | |||
| /// <param name="ctx"></param> | |||
| /// <param name="device_name"></param> | |||
| /// <param name="op_name"></param> | |||
| /// <param name="name"></param> | |||
| /// <param name="args"></param> | |||
| /// <param name="input_size"></param> | |||
| /// <param name="set_op_attrs"></param> | |||
| /// <param name="status"></param> | |||
| /// <returns>EagerTensorHandle</returns> | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern SafeStatusHandle TFE_FastPathExecute(IntPtr ctx, | |||
| string device_name, | |||
| string op_name, | |||
| string name, | |||
| IntPtr[] inputs, | |||
| int input_size, | |||
| TFE_FastPathExecute_SetOpAttrs set_op_attrs, | |||
| IntPtr[] outputs, | |||
| int output_size); | |||
| [UnmanagedFunctionPointer(CallingConvention.StdCall)] | |||
| public delegate void TFE_FastPathExecute_SetOpAttrs(IntPtr op); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern SafeStatusHandle TFE_QuickExecute(IntPtr ctx, | |||
| string device_name, | |||
| string op_name, | |||
| IntPtr[] inputs, | |||
| int input_size, | |||
| TFE_FastPathExecute_SetOpAttrs set_op_attrs, | |||
| IntPtr[] outputs, | |||
| int output_size); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern IntPtr TFE_TapeSetNew(bool persistent, bool watch_accessed_variables); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_TapeSetRemove(IntPtr tape); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_TapeWatch(IntPtr tape, IntPtr variable); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern void TFE_TapeVariableAccessed(IntPtr variable); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern IntPtr TFE_TapeWatchedVariables(IntPtr tape); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern IntPtr ResourceVariable_Handle(IntPtr variable); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern SafeStatusHandle TFE_TapeGradient(IntPtr tape, | |||
| IntPtr[] target, int target_size, | |||
| IntPtr[] sources, int source_size, | |||
| IntPtr[] outputs, int output_size); | |||
| } | |||
| } | |||
| @@ -1,33 +0,0 @@ | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System; | |||
| using Tensorflow.Gradients; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| /// <summary> | |||
| /// python\eager\pywrap_tfe_src.cc | |||
| /// </summary> | |||
| public partial class wrap_tfe_src | |||
| { | |||
| public static void RecordGradient(string op_name, Tensor[] inputs, Dictionary<string, object> attrs, Tensor[] results, string name = null) | |||
| { | |||
| var input_ids = inputs.Select(x => x.Id).ToArray(); | |||
| var input_dtypes = inputs.Select(x => x.dtype).ToArray(); | |||
| bool should_record = false; | |||
| foreach (var input_dtype in input_dtypes) | |||
| { | |||
| if (Tape.IsDtypeTrainable(input_dtype.as_datatype_enum())) | |||
| { | |||
| should_record = true; | |||
| break; | |||
| } | |||
| } | |||
| if (!should_record) return; | |||
| var op_outputs = results; | |||
| var op_inputs = inputs; | |||
| } | |||
| } | |||
| } | |||
| @@ -1,62 +0,0 @@ | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System; | |||
| using static Tensorflow.OpDef.Types; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| /// <summary> | |||
| /// python\eager\pywrap_tfe_src.cc | |||
| /// </summary> | |||
| public partial class wrap_tfe_src | |||
| { | |||
| public static IntPtr[] TFE_Execute(Context ctx, | |||
| string device_name, | |||
| string op_name, | |||
| Tensor[] inputs, | |||
| object[] attrs, | |||
| int num_outputs, | |||
| Status status) | |||
| => TFE_ExecuteCancelable(ctx, device_name, op_name, inputs, attrs, num_outputs, status); | |||
| public static IntPtr[] TFE_ExecuteCancelable(Context ctx, | |||
| string device_name, | |||
| string op_name, | |||
| Tensor[] inputs, | |||
| object[] attrs, | |||
| int num_outputs, | |||
| Status status) | |||
| { | |||
| var op = GetOp(ctx, op_name, status); | |||
| status.Check(true); | |||
| c_api.TFE_OpSetDevice(op, device_name, status.Handle); | |||
| if(status.ok()) | |||
| { | |||
| for (int i = 0; i < inputs.Length; ++i) | |||
| { | |||
| TFE_TensorHandle tensor_handle; | |||
| switch (inputs[i]) | |||
| { | |||
| case EagerTensor et: | |||
| tensor_handle = (TFE_TensorHandle)et; | |||
| break; | |||
| default: | |||
| tensor_handle = c_api.TFE_NewTensorHandle(inputs[i], status.Handle); | |||
| break; | |||
| } | |||
| c_api.TFE_OpAddInput(op, tensor_handle, status.Handle); | |||
| } | |||
| } | |||
| if (status.ok()) | |||
| SetOpAttrs(ctx, op, attrs, 0, status); | |||
| var outputs = new IntPtr[num_outputs]; | |||
| if (status.ok()) | |||
| { | |||
| c_api.TFE_Execute(op, outputs, ref num_outputs, status.Handle); | |||
| status.Check(true); | |||
| } | |||
| return outputs; | |||
| } | |||
| } | |||
| } | |||
| @@ -2,6 +2,7 @@ | |||
| using System.Linq; | |||
| using System; | |||
| using static Tensorflow.OpDef.Types; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| @@ -10,183 +11,23 @@ namespace Tensorflow.Eager | |||
| /// </summary> | |||
| public partial class wrap_tfe_src | |||
| { | |||
| static int kFastPathExecuteInputStartIndex = 0; | |||
| public static EagerTensor TFE_FastPathExecute(Context ctx, | |||
| string device_name, | |||
| string opName, | |||
| string name, | |||
| Action callbacks, | |||
| params object[] args) | |||
| { | |||
| int args_size = args.Length; | |||
| var attr_list_sizes = new Dictionary<string, long>(); | |||
| using (var status = new Status()) | |||
| { | |||
| var op = GetOp(ctx, opName, status); | |||
| var op_def = Graph.TFE_GetOpDef(opName); | |||
| // Set non-inferred attrs, including setting defaults if the attr is passed in | |||
| // as None. | |||
| for (int i = kFastPathExecuteInputStartIndex + op_def.InputArg.Count; i < args_size; i += 2) | |||
| { | |||
| var attr_name = args[i].ToString(); | |||
| var attr_value = args[i + 1]; | |||
| foreach(var attr in op_def.Attr) | |||
| { | |||
| if(attr_name == attr.Name) | |||
| { | |||
| SetOpAttrWithDefaults(ctx, op, attr, attr_name, attr_value, attr_list_sizes, status); | |||
| status.Check(true); | |||
| break; | |||
| } | |||
| } | |||
| } | |||
| c_api.TFE_OpSetDevice(op, device_name, status.Handle); | |||
| status.Check(true); | |||
| // Add inferred attrs and inputs. | |||
| for (int i = 0; i < op_def.InputArg.Count; i++) | |||
| { | |||
| var input_arg = op_def.InputArg[i]; | |||
| if (!string.IsNullOrEmpty(input_arg.NumberAttr)) | |||
| { | |||
| int len = (args[kFastPathExecuteInputStartIndex + i] as object[]).Length; | |||
| c_api.TFE_OpSetAttrInt(op, input_arg.NumberAttr, len); | |||
| attr_list_sizes[input_arg.NumberAttr] = len; | |||
| if (len > 0) | |||
| { | |||
| var fast_input_array = (object[])args[i]; | |||
| // First item adds the type attr. | |||
| if (!AddInputToOp(fast_input_array[i], true, input_arg, op, status)) | |||
| return null; | |||
| for (var j = 1; j < len; j++) | |||
| { | |||
| // Since the list is homogeneous, we don't need to re-add the attr. | |||
| if (!AddInputToOp(fast_input_array[j], false, input_arg, op, status)) | |||
| return null; | |||
| } | |||
| } | |||
| } | |||
| else if (!string.IsNullOrEmpty(input_arg.TypeListAttr)) | |||
| { | |||
| } | |||
| else | |||
| { | |||
| // The item is a single item. | |||
| AddInputToOp(args[i], true, input_arg, op, status); | |||
| } | |||
| } | |||
| int num_retvals = 0; | |||
| for (int i = 0; i < op_def.OutputArg.Count; i++) | |||
| { | |||
| var output_arg = op_def.OutputArg[i]; | |||
| var delta = 1L; | |||
| if (!string.IsNullOrEmpty(output_arg.NumberAttr)) | |||
| delta = attr_list_sizes[output_arg.NumberAttr]; | |||
| else if (!string.IsNullOrEmpty(output_arg.TypeListAttr)) | |||
| delta = attr_list_sizes[output_arg.TypeListAttr]; | |||
| if(delta < 0) | |||
| throw new RuntimeError("Attributes suggest that the size of an output list is less than 0"); | |||
| num_retvals += (int)delta; | |||
| } | |||
| var retVals = new IntPtr[num_retvals]; | |||
| c_api.TFE_Execute(op, retVals, ref num_retvals, status.Handle); | |||
| status.Check(true); | |||
| return num_retvals == 0 ? null : new EagerTensor(retVals[0]); | |||
| } | |||
| } | |||
| private static TFE_Op GetOp(Context ctx, string op_or_function_name, Status status) | |||
| { | |||
| var maybe_op = ReleaseThreadLocalOp(); | |||
| if (maybe_op != IntPtr.Zero) | |||
| { | |||
| c_api.TFE_OpReset(ctx, op_or_function_name, status.Handle, maybe_op); | |||
| } | |||
| else | |||
| { | |||
| maybe_op = c_api.TFE_NewOp(ctx, op_or_function_name, status.Handle); | |||
| op = maybe_op; | |||
| } | |||
| status.Check(true); | |||
| return maybe_op; | |||
| } | |||
| static TFE_Op op; | |||
| private static TFE_Op ReleaseThreadLocalOp() | |||
| { | |||
| return op; | |||
| } | |||
| /// <summary> | |||
| /// Adds input and type attr to the op, and to the list of flattened | |||
| /// inputs/attrs. | |||
| /// </summary> | |||
| /// <param name="inputs"></param> | |||
| /// <param name="add_type_attr"></param> | |||
| /// <param name="input_arg"></param> | |||
| /// <param name="op"></param> | |||
| /// <param name="status"></param> | |||
| /// <returns></returns> | |||
| private static bool AddInputToOp(object inputs, | |||
| bool add_type_attr, | |||
| ArgDef input_arg, | |||
| IntPtr op, | |||
| Status status) | |||
| { | |||
| TFE_TensorHandle input_handle; | |||
| // ConvertToTensor(); | |||
| switch (inputs) | |||
| { | |||
| case EagerTensor input: | |||
| input_handle = (TFE_TensorHandle)input; | |||
| break; | |||
| case EagerTensor[] input_list: | |||
| input_handle = (TFE_TensorHandle)input_list[0]; | |||
| break; | |||
| default: | |||
| throw new NotImplementedException(""); | |||
| } | |||
| if(add_type_attr && !string.IsNullOrEmpty(input_arg.TypeAttr)) | |||
| { | |||
| var dtype = c_api.TFE_TensorHandleDataType(input_handle); | |||
| c_api.TFE_OpSetAttrType(op, input_arg.TypeAttr, dtype); | |||
| } | |||
| c_api.TFE_OpAddInput(op, input_handle, status.Handle); | |||
| status.Check(true); | |||
| return true; | |||
| } | |||
| private static void SetOpAttrs(Context ctx, TFE_Op op, object[] attrs, int start_index, Status out_status) | |||
| public static void SetOpAttrs(TFE_Op op, params object[] attrs) | |||
| { | |||
| using var status = new Status(); | |||
| var len = attrs.Length; | |||
| for (int i = 0; i < len; i += 2) | |||
| { | |||
| var key = attrs[start_index + i].ToString(); | |||
| var value = attrs[start_index + i + 1]; | |||
| var key = attrs[i].ToString(); | |||
| var value = attrs[i + 1]; | |||
| byte is_list = 0; | |||
| var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, out_status.Handle); | |||
| if (!out_status.ok()) return; | |||
| byte is_list = 0; | |||
| var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, status.Handle); | |||
| if (!status.ok()) return; | |||
| if (is_list != 0) | |||
| SetOpAttrList(ctx, op, key, value, type, null, out_status); | |||
| SetOpAttrList(tf.context, op, key, value, type, null, status); | |||
| else | |||
| SetOpAttrScalar(ctx, op, key, value, type, null, out_status); | |||
| out_status.Check(true); | |||
| SetOpAttrScalar(tf.context, op, key, value, type, null, status); | |||
| status.Check(true); | |||
| } | |||
| } | |||
| @@ -35,7 +35,7 @@ namespace Tensorflow | |||
| return meta_graph_def; | |||
| } | |||
| public static (Dictionary<string, VariableV1>, ITensorOrOperation[]) import_scoped_meta_graph_with_return_elements(MetaGraphDef meta_graph_or_file, | |||
| public static (Dictionary<string, IVariableV1>, ITensorOrOperation[]) import_scoped_meta_graph_with_return_elements(MetaGraphDef meta_graph_or_file, | |||
| bool clear_devices = false, | |||
| string import_scope = "", | |||
| Dictionary<string, Tensor> input_map = null, | |||
| @@ -77,7 +77,7 @@ namespace Tensorflow | |||
| return_elements: return_elements); | |||
| // Restores all the other collections. | |||
| var variable_objects = new Dictionary<ByteString, VariableV1>(); | |||
| var variable_objects = new Dictionary<ByteString, IVariableV1>(); | |||
| foreach (var col in meta_graph_def.CollectionDef.OrderBy(x => x.Key)) | |||
| { | |||
| // Don't add unbound_inputs to the new graph. | |||
| @@ -99,7 +99,7 @@ namespace Tensorflow | |||
| { | |||
| foreach (var value in col.Value.BytesList.Value) | |||
| { | |||
| VariableV1 variable = null; | |||
| IVariableV1 variable = null; | |||
| if (!variable_objects.ContainsKey(value)) | |||
| { | |||
| var proto = VariableDef.Parser.ParseFrom(value); | |||
| @@ -147,10 +147,10 @@ namespace Tensorflow | |||
| } | |||
| } | |||
| var variables = graph.get_collection<VariableV1>(tf.GraphKeys.GLOBAL_VARIABLES, | |||
| var variables = graph.get_collection<IVariableV1>(tf.GraphKeys.GLOBAL_VARIABLES, | |||
| scope: scope_to_prepend_to_names); | |||
| var var_list = new Dictionary<string, VariableV1>(); | |||
| variables.ForEach(v => var_list[ops.strip_name_scope(v.name, scope_to_prepend_to_names)] = v); | |||
| var var_list = new Dictionary<string, IVariableV1>(); | |||
| variables.ForEach(v => var_list[ops.strip_name_scope(v.Name, scope_to_prepend_to_names)] = v); | |||
| return (var_list, imported_return_elements); | |||
| } | |||
| @@ -168,7 +168,7 @@ namespace Tensorflow | |||
| /// <param name="strip_default_attrs"></param> | |||
| /// <param name="meta_info_def"></param> | |||
| /// <returns></returns> | |||
| public static (MetaGraphDef, Dictionary<string, VariableV1>) export_scoped_meta_graph(string filename = "", | |||
| public static (MetaGraphDef, Dictionary<string, IVariableV1>) export_scoped_meta_graph(string filename = "", | |||
| GraphDef graph_def = null, | |||
| bool as_text = false, | |||
| string unbound_inputs_col_name = "unbound_inputs", | |||
| @@ -180,14 +180,14 @@ namespace Tensorflow | |||
| { | |||
| var graph = ops.get_default_graph(); | |||
| var var_list = new Dictionary<string, VariableV1>(); | |||
| var variables = graph.get_collection<VariableV1>(tf.GraphKeys.GLOBAL_VARIABLES); | |||
| var var_list = new Dictionary<string, IVariableV1>(); | |||
| var variables = graph.get_collection<IVariableV1>(tf.GraphKeys.GLOBAL_VARIABLES); | |||
| if (variables != null) | |||
| { | |||
| foreach (var v in variables) | |||
| { | |||
| var_list[v.name] = v; | |||
| var_list[v.Name] = v; | |||
| } | |||
| } | |||
| @@ -268,7 +268,7 @@ namespace Tensorflow | |||
| switch (graph.get_collection(key)) | |||
| { | |||
| case List<VariableV1> collection_list: | |||
| case List<IVariableV1> collection_list: | |||
| col_def.BytesList = new Types.BytesList(); | |||
| foreach (var x in collection_list) | |||
| { | |||
| @@ -1,63 +0,0 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Gradients | |||
| { | |||
| /// <summary> | |||
| /// Record operations for automatic differentiation. | |||
| /// | |||
| /// Operations are recorded if they are executed within this context manager and | |||
| /// at least one of their inputs is being "watched". | |||
| /// | |||
| /// Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`, | |||
| /// where `trainable=True` is default in both cases) are automatically watched. | |||
| /// Tensors can be manually watched by invoking the `watch` method on this context | |||
| /// manager. | |||
| /// </summary> | |||
| public class GradientActor : IDisposable | |||
| { | |||
| bool _recording; | |||
| bool _persistent; | |||
| bool _watch_accessed_variables; | |||
| bool _created_eagerly; | |||
| Tape _tape; | |||
| int tape_nesting_id_counter = 0; | |||
| public GradientActor(bool persistent = false, | |||
| bool watch_accessed_variables = true) | |||
| { | |||
| _persistent = persistent; | |||
| _watch_accessed_variables = watch_accessed_variables; | |||
| _created_eagerly = tf.context.executing_eagerly(); | |||
| _push_tape(); | |||
| } | |||
| private void _push_tape() | |||
| { | |||
| if (_recording) | |||
| throw new ValueError("Tape is still recording, This can happen if you try to " + | |||
| "re-enter an already-active tape."); | |||
| if (_tape == null) | |||
| { | |||
| _tape = new Tape(); | |||
| _tape.tape = new GradientTape(_persistent, _watch_accessed_variables); | |||
| _tape.nesting_id = tape_nesting_id_counter++; | |||
| } | |||
| _recording = true; | |||
| } | |||
| public void watch(Tensor x) | |||
| { | |||
| } | |||
| public void Dispose() | |||
| { | |||
| } | |||
| } | |||
| } | |||
| @@ -1,6 +1,10 @@ | |||
| using System; | |||
| using Google.Protobuf.WellKnownTypes; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Runtime.InteropServices; | |||
| using System.Text; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Gradients | |||
| @@ -16,16 +20,109 @@ namespace Tensorflow.Gradients | |||
| /// Tensors can be manually watched by invoking the `watch` method on this context | |||
| /// manager. | |||
| /// </summary> | |||
| public class GradientTape | |||
| public class GradientTape : IDisposable | |||
| { | |||
| static bool _recording; | |||
| public static bool Recording => _recording; | |||
| bool _persistent; | |||
| bool _watch_accessed_variables; | |||
| ResourceVariable[] _watched_variables; | |||
| bool _created_eagerly; | |||
| Tape _tape; | |||
| public GradientTape(bool persistent = false, | |||
| bool watch_accessed_variables = true) | |||
| { | |||
| _persistent = persistent; | |||
| _watch_accessed_variables = watch_accessed_variables; | |||
| _created_eagerly = tf.context.executing_eagerly(); | |||
| _push_tape(); | |||
| } | |||
| private void _push_tape() | |||
| { | |||
| if (_recording) | |||
| throw new ValueError("Tape is still recording, This can happen if you try to " + | |||
| "re-enter an already-active tape."); | |||
| if (_tape == null) | |||
| _tape = new Tape(_persistent, _watch_accessed_variables); | |||
| else | |||
| throw new NotImplementedException(""); | |||
| _recording = true; | |||
| } | |||
| private void _pop_tape() | |||
| { | |||
| if (!_recording) | |||
| throw new ValueError("Tape is not recording."); | |||
| _tape.pop_tape(_tape); | |||
| _recording = false; | |||
| } | |||
| /// <summary> | |||
| /// Marks this tensor to be watched by the given tape. | |||
| /// </summary> | |||
| /// <param name="x"></param> | |||
| public void watch(Tensor x) | |||
| { | |||
| _tape.watch(x as EagerTensor); | |||
| } | |||
| public Tensor gradient(Tensor target, Tensor source) | |||
| { | |||
| if(_recording) | |||
| { | |||
| if (!_persistent) | |||
| _pop_tape(); | |||
| } | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_TapeGradient(_tape, | |||
| new [] { (target as EagerTensor).EagerTensorHandle }, 1, | |||
| new [] { (source as EagerTensor).EagerTensorHandle }, 1, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| public unsafe (Tensor, Tensor) gradient(Tensor target, (ResourceVariable, ResourceVariable) sources) | |||
| { | |||
| if (_recording) | |||
| { | |||
| if (!_persistent) | |||
| _pop_tape(); | |||
| } | |||
| var results = new[] { new EagerTensor(), new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_TapeGradient(_tape, | |||
| new IntPtr[] | |||
| { | |||
| target as EagerTensor | |||
| }, 1, | |||
| new IntPtr[] | |||
| { | |||
| (sources.Item1.Handle as EagerTensor).EagerTensorHandle, | |||
| (sources.Item2.Handle as EagerTensor).EagerTensorHandle | |||
| }, 2, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| if (!_persistent) | |||
| { | |||
| // Keep track of watched variables before setting tape to None | |||
| _watched_variables = _tape.watched_variables(); | |||
| _tape = null; | |||
| } | |||
| return (results[0].Resolve(), results[1].Resolve()); | |||
| } | |||
| public void Dispose() | |||
| { | |||
| if (_recording) | |||
| _pop_tape(); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,30 @@ | |||
| /***************************************************************************** | |||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| namespace Tensorflow.Gradients | |||
| { | |||
| public class RegisterGradientEager : Attribute | |||
| { | |||
| public string Name { get; set; } | |||
| public RegisterGradientEager(string name) | |||
| { | |||
| Name = name; | |||
| } | |||
| } | |||
| } | |||
| @@ -1,14 +1,48 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Runtime.InteropServices; | |||
| using System.Text; | |||
| using Tensorflow.Eager; | |||
| namespace Tensorflow.Gradients | |||
| { | |||
| public class Tape | |||
| public class Tape : DisposableObject | |||
| { | |||
| public GradientTape tape { get; set; } | |||
| public int nesting_id { get; set; } | |||
| public Tape(bool persistent, bool watch_accessed_variables) | |||
| { | |||
| _handle = c_api.TFE_TapeSetNew(persistent, watch_accessed_variables); | |||
| } | |||
| public void watch(EagerTensor x) | |||
| { | |||
| c_api.TFE_TapeWatch(_handle, x.EagerTensorHandle); | |||
| } | |||
| public void pop_tape(Tape tape) | |||
| { | |||
| c_api.TFE_TapeSetRemove(tape); | |||
| } | |||
| public static void variable_accessed(ResourceVariable variable) | |||
| { | |||
| c_api.TFE_TapeVariableAccessed(variable); | |||
| } | |||
| public unsafe ResourceVariable[] watched_variables() | |||
| { | |||
| BindingArray result = c_api.TFE_TapeWatchedVariables(_handle); | |||
| var variables = result.Data.Select(x => | |||
| { | |||
| var tensor = c_api.ResourceVariable_Handle(x); | |||
| return new ResourceVariable(x, tensor); | |||
| }).ToArray(); | |||
| return variables; | |||
| } | |||
| public static bool IsDtypeTrainable(DataType dtype) | |||
| { | |||
| switch (dtype) | |||
| @@ -26,5 +60,12 @@ namespace Tensorflow.Gradients | |||
| return false; | |||
| } | |||
| } | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| } | |||
| public static implicit operator IntPtr(Tape tape) | |||
| => tape._handle; | |||
| } | |||
| } | |||
| @@ -191,7 +191,7 @@ namespace Tensorflow.Gradients | |||
| grad_ctxt.Enter(); | |||
| var result = control_flow_ops._Enter( | |||
| grad, grad_ctxt.name, is_constant: false, | |||
| grad, grad_ctxt.Name, is_constant: false, | |||
| parallel_iterations: grad_ctxt.parallel_iterations, | |||
| name: "b_exit"); | |||
| @@ -14,8 +14,10 @@ | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using NumSharp; | |||
| using System; | |||
| using System.Linq; | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.Operations; | |||
| using static Tensorflow.Binding; | |||
| @@ -168,10 +170,28 @@ namespace Tensorflow.Gradients | |||
| var x = op.inputs[0]; | |||
| var y = op.inputs[1]; | |||
| var grad = grads[0]; | |||
| if (grad is Tensor && | |||
| if (op is EagerOperation op_eager && | |||
| op_eager.SkipInputIndices.Contains(1) && | |||
| y.NDims == 0) | |||
| { | |||
| return new Tensor[] | |||
| { | |||
| gen_math_ops.mul(grad, math_ops.conj(y)), | |||
| null | |||
| }; | |||
| } | |||
| if (grad is Tensor && | |||
| _ShapesFullySpecifiedAndEqual(x, y, grad) && | |||
| new TF_DataType[] { tf.int32, tf.float32 }.Contains(grad.dtype)) | |||
| return new Tensor[] { gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x) }; | |||
| { | |||
| return new Tensor[] | |||
| { | |||
| gen_math_ops.mul(grad, y), | |||
| gen_math_ops.mul(grad, x) | |||
| }; | |||
| } | |||
| var (sx, sy) = SmartBroadcastGradientArgs(x, y); | |||
| var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); | |||
| @@ -179,15 +199,39 @@ namespace Tensorflow.Gradients | |||
| x = math_ops.conj(x); | |||
| y = math_ops.conj(y); | |||
| var mul1 = gen_math_ops.mul(grad, y); | |||
| var reduce_sum1 = math_ops.reduce_sum(mul1, rx); | |||
| var reshape1 = gen_array_ops.reshape(reduce_sum1, sx); | |||
| Tensor gx = null, gy = null; | |||
| var mul2 = gen_math_ops.mul(x, grad); | |||
| var reduce_sum2 = math_ops.reduce_sum(mul2, ry); | |||
| var reshape2 = gen_array_ops.reshape(reduce_sum2, sy); | |||
| if (op is EagerOperation op_eager1 && | |||
| op_eager1.SkipInputIndices.Contains(0)) | |||
| { | |||
| return new Tensor[] | |||
| { | |||
| gen_math_ops.mul(grad, math_ops.conj(y)), | |||
| null | |||
| }; | |||
| } | |||
| // else if not must_reduce_x: | |||
| // gx = gen_math_ops.mul(grad, y) | |||
| else | |||
| { | |||
| gx = array_ops.reshape( | |||
| math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx); | |||
| } | |||
| return new Tensor[] { reshape1, reshape2 }; | |||
| if (op is EagerOperation op_eager2 && | |||
| op_eager2.SkipInputIndices.Contains(1)) | |||
| { | |||
| } | |||
| // else if not must_reduce_y: | |||
| // gy = gen_math_ops.mul(x, grad) | |||
| else | |||
| { | |||
| gy = array_ops.reshape( | |||
| math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy); | |||
| } | |||
| return new Tensor[] { gx, gy }; | |||
| } | |||
| [RegisterGradient("MatMul")] | |||
| @@ -266,11 +310,23 @@ namespace Tensorflow.Gradients | |||
| var input_shape = op.inputs[0]._shape_tuple(); | |||
| var output_shape = op.outputs[0]._shape_tuple(); | |||
| var input_shape_tensor = array_ops.shape(op.inputs[0]); | |||
| var output_shape_tensor = array_ops.shape(op.outputs[0]); | |||
| var factor = _safe_shape_div(math_ops.reduce_prod(input_shape_tensor), math_ops.reduce_prod(output_shape_tensor)); | |||
| if(input_shape != null && | |||
| output_shape != null) | |||
| { | |||
| var input_size = np.prod(input_shape); | |||
| var output_size = np.prod(output_shape); | |||
| var factor = (int)input_size / Math.Max((int)output_size, 1); | |||
| var factor_tensor = constant_op.constant((int)input_size, dtype: sum_grad.dtype); | |||
| return new Tensor[] { math_ops.truediv(sum_grad, math_ops.cast(factor_tensor, sum_grad.dtype)), null }; | |||
| } | |||
| else | |||
| { | |||
| var input_shape_tensor = array_ops.shape(op.inputs[0]); | |||
| var output_shape_tensor = array_ops.shape(op.outputs[0]); | |||
| var factor = _safe_shape_div(math_ops.reduce_prod(input_shape_tensor), math_ops.reduce_prod(output_shape_tensor)); | |||
| return new Tensor[] { math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), null }; | |||
| return new Tensor[] { math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), null }; | |||
| } | |||
| } | |||
| /// <summary> | |||
| @@ -438,8 +494,18 @@ namespace Tensorflow.Gradients | |||
| var rank = input_0_shape.Length; | |||
| if (Enumerable.SequenceEqual(Enumerable.Range(0, rank), axes.Data<int>())) | |||
| { | |||
| var new_shape = range(rank).Select(x => 1).ToArray(); | |||
| grad = array_ops.reshape(grad, new_shape); | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| // should add ones_rank_cache | |||
| var new_shape_tensor = constant_op.constant(np.array(new int[] { 1 }) * rank, dtype: TF_DataType.TF_INT32); | |||
| grad = array_ops.reshape(grad, new_shape_tensor); | |||
| } | |||
| else | |||
| { | |||
| var new_shape = range(rank).Select(x => 1).ToArray(); | |||
| grad = array_ops.reshape(grad, new_shape); | |||
| } | |||
| // If shape is not fully defined (but rank is), we use Shape. | |||
| if (!input_0_shape.Contains(-1)) | |||
| input_shape = constant_op.constant(input_0_shape); | |||
| @@ -447,7 +513,11 @@ namespace Tensorflow.Gradients | |||
| input_shape = array_ops.shape(op.inputs[0]); | |||
| return new Tensor[] { gen_array_ops.tile(grad, input_shape), null }; | |||
| } | |||
| } | |||
| else | |||
| { | |||
| } | |||
| } | |||
| } | |||
| input_shape = array_ops.shape(op.inputs[0]); | |||
| @@ -605,6 +675,20 @@ namespace Tensorflow.Gradients | |||
| var grad = grads[0]; | |||
| var x = op.inputs[0]; | |||
| var y = op.inputs[1]; | |||
| if (op is EagerOperation op_eager && | |||
| op_eager.SkipInputIndices.Contains(1) && | |||
| y.NDims == 0) | |||
| { | |||
| x = math_ops.conj(x); | |||
| y = math_ops.conj(y); | |||
| return new Tensor[] | |||
| { | |||
| grad * y * math_ops.pow(x, y - 1), | |||
| null | |||
| }; | |||
| } | |||
| var z = op.outputs[0]; | |||
| var (sx, sy) = SmartBroadcastGradientArgs(x, y); | |||
| @@ -0,0 +1,74 @@ | |||
| /***************************************************************************** | |||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using NumSharp; | |||
| using System; | |||
| using System.Linq; | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.Operations; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Gradients | |||
| { | |||
| /// <summary> | |||
| /// Gradients for operators defined in math_ops.py. | |||
| /// </summary> | |||
| [RegisterGradientEager("math_grad")] | |||
| public class math_grad_eager | |||
| { | |||
| [RegisterGradientEager("Mul")] | |||
| public static EagerTensor[] _MulGrad(EagerOperation op, IntPtr[] grads) | |||
| { | |||
| var x = op.InputHandles[0]; | |||
| var y = op.InputHandles[1]; | |||
| var grad = grads[0]; | |||
| if (op.SkipInputIndices.Contains(1) && | |||
| EagerTensor.GetRank(grad) == 0) | |||
| { | |||
| return new EagerTensor[] | |||
| { | |||
| null,//gen_math_ops.mul(grad, math_ops.conj(y)), | |||
| null | |||
| }; | |||
| } | |||
| if (_ShapesFullySpecifiedAndEqual(x, y, grad)) | |||
| { | |||
| return new EagerTensor[] | |||
| { | |||
| gen_math_ops.mul(grad, y), | |||
| gen_math_ops.mul(grad, x) | |||
| }; | |||
| } | |||
| throw new NotImplementedException(""); | |||
| } | |||
| public static bool _ShapesFullySpecifiedAndEqual(IntPtr x, IntPtr y, IntPtr grad) | |||
| { | |||
| var x_shape = EagerTensor.GetDims(x); | |||
| var y_shape = EagerTensor.GetDims(y); | |||
| var grad_shape = EagerTensor.GetDims(grad); | |||
| return x_shape != null && | |||
| y_shape != null && | |||
| Enumerable.SequenceEqual(x_shape, y_shape) && | |||
| Enumerable.SequenceEqual(y_shape, grad_shape) && | |||
| !x_shape.Contains(-1); | |||
| } | |||
| } | |||
| } | |||
| @@ -24,9 +24,9 @@ namespace Tensorflow | |||
| { | |||
| public partial class ops | |||
| { | |||
| static Dictionary<string, Func<Operation, Tensor[], Tensor[]>> gradientFunctions = null; | |||
| public static Dictionary<string, Func<Operation, Tensor[], Tensor[]>> gradientFunctions = null; | |||
| private static void RegisterFromAssembly() | |||
| public static void RegisterFromAssembly() | |||
| { | |||
| if (gradientFunctions == null) | |||
| { | |||
| @@ -0,0 +1,101 @@ | |||
| /***************************************************************************** | |||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Reflection; | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.Gradients; | |||
| namespace Tensorflow | |||
| { | |||
| public partial class ops | |||
| { | |||
| public static Dictionary<string, Func<EagerOperation, IntPtr[], EagerTensor[]>> gradientFunctionsEager = null; | |||
| public static void RegisterFromAssemblyEager() | |||
| { | |||
| if (gradientFunctionsEager == null) | |||
| { | |||
| gradientFunctionsEager = new Dictionary<string, Func<EagerOperation, IntPtr[], EagerTensor[]>>(); | |||
| var gradGroups = Assembly.GetExecutingAssembly() | |||
| .GetTypes() | |||
| .Where(x => x.GetCustomAttribute<RegisterGradientEager>() != null) | |||
| .ToArray(); | |||
| foreach (var g in gradGroups) | |||
| { | |||
| var methods = g.GetMethods() | |||
| .Where(x => x.GetCustomAttribute<RegisterGradientEager>() != null) | |||
| .ToArray(); | |||
| foreach (var m in methods) | |||
| { | |||
| RegisterGradientFunctionEager(m.GetCustomAttribute<RegisterGradientEager>().Name, | |||
| (oper, out_grads) => | |||
| g.InvokeMember(m.Name, | |||
| BindingFlags.InvokeMethod, | |||
| null, | |||
| null, | |||
| args: new object[] { oper, out_grads }) as EagerTensor[] | |||
| ); | |||
| } | |||
| // REGISTER_NO_GRADIENT_OP | |||
| methods = g.GetMethods() | |||
| .Where(x => x.GetCustomAttribute<RegisterNoGradient>() != null) | |||
| .ToArray(); | |||
| foreach (var m in methods) | |||
| RegisterNoGradientFunctionEager(m.GetCustomAttribute<RegisterNoGradient>().Name); | |||
| } | |||
| } | |||
| } | |||
| /// <summary> | |||
| /// Regiter new gradient function | |||
| /// </summary> | |||
| /// <param name="name">operation type</param> | |||
| /// <param name="func">function delegate</param> | |||
| public static void RegisterGradientFunctionEager(string name, Func<EagerOperation, IntPtr[], EagerTensor[]> func) | |||
| { | |||
| RegisterFromAssemblyEager(); | |||
| gradientFunctionsEager[name] = func; | |||
| } | |||
| public static void RegisterNoGradientFunctionEager(string name) | |||
| { | |||
| RegisterFromAssemblyEager(); | |||
| gradientFunctionsEager[name] = null; | |||
| } | |||
| public static Func<EagerOperation, IntPtr[], EagerTensor[]> get_gradient_function_eager(EagerOperation op) | |||
| { | |||
| if (op.inputs == null) return null; | |||
| RegisterFromAssemblyEager(); | |||
| if (!gradientFunctionsEager.ContainsKey(op.type)) | |||
| throw new LookupError($"can't get graident function through get_gradient_function {op.type}"); | |||
| return gradientFunctionsEager[op.type]; | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,32 @@ | |||
| /***************************************************************************** | |||
| Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved. | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow.Gradients | |||
| { | |||
| [RegisterGradient("resource_variable_grad")] | |||
| public class resource_variable_grad | |||
| { | |||
| [RegisterGradient("ReadVariableOp")] | |||
| public static Tensor[] _ReadGrad(Operation op, Tensor[] grads) | |||
| { | |||
| return new Tensor[] { grads[0] }; | |||
| } | |||
| } | |||
| } | |||
| @@ -444,7 +444,7 @@ namespace Tensorflow | |||
| var collection = _collections.ContainsKey(name) ? _collections[name] : new List<T>(); | |||
| switch (collection) | |||
| { | |||
| case List<VariableV1> list: | |||
| case List<IVariableV1> list: | |||
| t = list.Select(x => (T)(object)x).ToList(); | |||
| break; | |||
| case List<ResourceVariable> list: | |||
| @@ -37,8 +37,8 @@ namespace Tensorflow.Keras.Layers | |||
| private IInitializer gamma_initializer; | |||
| private IInitializer moving_mean_initializer; | |||
| private IInitializer moving_variance_initializer; | |||
| private VariableV1 gamma; | |||
| private VariableV1 beta; | |||
| private IVariableV1 gamma; | |||
| private IVariableV1 beta; | |||
| private RefVariable moving_mean; | |||
| private RefVariable moving_variance; | |||
| @@ -23,7 +23,7 @@ namespace Tensorflow.Keras.Layers | |||
| private int input_dim; | |||
| private int output_dim; | |||
| private bool mask_zero; | |||
| public VariableV1 embeddings; | |||
| public IVariableV1 embeddings; | |||
| public IInitializer embeddings_initializer; | |||
| int input_length; | |||
| @@ -51,8 +51,8 @@ namespace Tensorflow.Keras.Layers | |||
| /// </summary> | |||
| protected InputSpec input_spec; | |||
| protected bool supports_masking; | |||
| protected List<VariableV1> _trainable_weights; | |||
| protected List<VariableV1> _non_trainable_weights; | |||
| protected List<IVariableV1> _trainable_weights; | |||
| protected List<IVariableV1> _non_trainable_weights; | |||
| private string _name; | |||
| public string name => _name; | |||
| protected string _base_name; | |||
| @@ -84,8 +84,8 @@ namespace Tensorflow.Keras.Layers | |||
| this.supports_masking = false; | |||
| _init_set_name(name); | |||
| _trainable_weights = new List<VariableV1>(); | |||
| _non_trainable_weights = new List<VariableV1>(); | |||
| _trainable_weights = new List<IVariableV1>(); | |||
| _non_trainable_weights = new List<IVariableV1>(); | |||
| _compute_previous_mask = false; | |||
| _updates = new List<Operation>(); | |||
| @@ -207,12 +207,12 @@ namespace Tensorflow.Keras.Layers | |||
| built = true; | |||
| } | |||
| protected virtual VariableV1 add_weight(string name, | |||
| protected virtual IVariableV1 add_weight(string name, | |||
| int[] shape, | |||
| TF_DataType dtype = TF_DataType.DtInvalid, | |||
| IInitializer initializer = null, | |||
| bool? trainable = null, | |||
| Func<string, int[], TF_DataType, IInitializer, bool, VariableV1> getter = null) | |||
| Func<string, int[], TF_DataType, IInitializer, bool, IVariableV1> getter = null) | |||
| { | |||
| if (dtype == TF_DataType.DtInvalid) | |||
| dtype = TF_DataType.TF_FLOAT; | |||
| @@ -0,0 +1,25 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow.Keras.Optimizers | |||
| { | |||
| public class DeviceDType : IEqualityComparer<DeviceDType> | |||
| { | |||
| public string Device { get; set; } | |||
| public TF_DataType DType { get; set; } | |||
| public bool Equals(DeviceDType x, DeviceDType y) | |||
| { | |||
| return x.ToString() == y.ToString(); | |||
| } | |||
| public int GetHashCode(DeviceDType obj) | |||
| { | |||
| return 0; | |||
| } | |||
| public override string ToString() | |||
| => $"{Device}, {DType}"; | |||
| } | |||
| } | |||
| @@ -1,7 +1,12 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow.Keras.Utils; | |||
| using Tensorflow.Train; | |||
| using static Tensorflow.Binding; | |||
| using Tensorflow; | |||
| using Tensorflow.Eager; | |||
| namespace Tensorflow.Keras.Optimizers | |||
| { | |||
| @@ -10,5 +15,209 @@ namespace Tensorflow.Keras.Optimizers | |||
| /// </summary> | |||
| public class OptimizerV2 : Trackable, IOptimizer | |||
| { | |||
| protected bool _hypers_created; | |||
| protected virtual string _name { get; } | |||
| ResourceVariable _iterations; | |||
| List<ResourceVariable> _weight; | |||
| Dictionary<string, float> _hyper; | |||
| Dictionary<string, ResourceVariable> _hyper_variables; | |||
| protected bool _momentum; | |||
| protected float _initial_decay = 0.0f; | |||
| protected bool _use_locking = true; | |||
| Dictionary<DeviceDType, Dictionary<string, Tensor>> apply_state; | |||
| public OptimizerV2() : base() | |||
| { | |||
| _weight = new List<ResourceVariable>(); | |||
| _hyper = new Dictionary<string, float>(); | |||
| _hyper_variables = new Dictionary<string, ResourceVariable>(); | |||
| apply_state = new Dictionary<DeviceDType, Dictionary<string, Tensor>>(); | |||
| } | |||
| /// <summary> | |||
| /// Apply gradients to variables. | |||
| /// </summary> | |||
| /// <param name="grads_and_vars"></param> | |||
| /// <param name="name"></param> | |||
| /// <param name="experimental_aggregate_gradients"></param> | |||
| public void apply_gradients(IEnumerable<(Tensor, ResourceVariable)> grads_and_vars, | |||
| string name = null, | |||
| bool experimental_aggregate_gradients = true) | |||
| { | |||
| var var_list = grads_and_vars.Select(x => x.Item2).ToArray(); | |||
| tf_with(ops.name_scope(_name), delegate | |||
| { | |||
| ops.init_scope(); | |||
| _create_all_weights(var_list); | |||
| if (grads_and_vars == null || grads_and_vars.Count() == 0) | |||
| return control_flow_ops.no_op(); | |||
| apply_state = _prepare(var_list); | |||
| if(experimental_aggregate_gradients) | |||
| { | |||
| // var reduced_grads = _aggregate_gradients(grads_and_vars); | |||
| _distributed_apply(grads_and_vars, name, apply_state); | |||
| } | |||
| return null; | |||
| }); | |||
| } | |||
| void apply_grad_to_update_var(ResourceVariable var, EagerTensor grad) | |||
| { | |||
| _resource_apply_dense(var, grad, apply_state); | |||
| } | |||
| protected virtual Operation _resource_apply_dense(ResourceVariable var, | |||
| EagerTensor grad, | |||
| Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state) | |||
| { | |||
| throw new NotImplementedException("_resource_apply_dense"); | |||
| } | |||
| void _distributed_apply(IEnumerable<(Tensor, ResourceVariable)> grads_and_vars, | |||
| string name, | |||
| Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state) | |||
| { | |||
| tf_with(ops.name_scope(name, "", new { skip_on_eager = true }), delegate | |||
| { | |||
| foreach(var (grad, var) in grads_and_vars) | |||
| { | |||
| tf_with(ops.name_scope("update"), delegate | |||
| { | |||
| apply_grad_to_update_var(var, grad as EagerTensor); | |||
| }); | |||
| } | |||
| _iterations.assign_add(ops.convert_to_tensor(1, dtype: _iterations.dtype)); | |||
| }); | |||
| } | |||
| Tensor[] _aggregate_gradients(IEnumerable<(Tensor, ResourceVariable)> grads_and_vars) | |||
| { | |||
| return grads_and_vars.Select(x => x.Item1).ToArray(); | |||
| } | |||
| Dictionary<DeviceDType, Dictionary<string, Tensor>> _prepare(ResourceVariable[] var_list) | |||
| { | |||
| var _apply_state = new Dictionary<DeviceDType, Dictionary<string, Tensor>>(); | |||
| var keys = var_list.Select(x => new DeviceDType | |||
| { | |||
| Device = x.Device, | |||
| DType = x.dtype.as_base_dtype() | |||
| }).Distinct(new DeviceDType()).ToArray(); | |||
| foreach(var device_dtype in keys) | |||
| { | |||
| _apply_state[device_dtype] = new Dictionary<string, Tensor>(); | |||
| _prepare_local(device_dtype, _apply_state); | |||
| } | |||
| return _apply_state; | |||
| } | |||
| protected virtual void _prepare_local(DeviceDType device_dtype, | |||
| Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state) | |||
| { | |||
| if (_hyper.ContainsKey("learning_rate")) | |||
| { | |||
| var lr_t = array_ops.identity(_decayed_lr(device_dtype.DType)); | |||
| _apply_state[device_dtype]["lr_t"] = lr_t; | |||
| } | |||
| } | |||
| Tensor _decayed_lr(TF_DataType var_dtype) | |||
| { | |||
| var lr_t = _get_hyper("learning_rate", var_dtype); | |||
| if(_initial_decay > 0.0f) | |||
| { | |||
| throw new NotImplementedException(""); | |||
| } | |||
| return lr_t; | |||
| } | |||
| protected ResourceVariable _get_hyper(string name, TF_DataType dtype = TF_DataType.DtInvalid) | |||
| { | |||
| var value = _hyper_variables[name]; | |||
| return math_ops.cast(value, dtype); | |||
| } | |||
| void _create_all_weights(ResourceVariable[] var_list) | |||
| { | |||
| if(_iterations == null) | |||
| { | |||
| _iterations = add_weight("iter", | |||
| shape: new int[0], | |||
| dtype: TF_DataType.TF_INT64, | |||
| trainable: false, | |||
| aggregation: VariableAggregation.OnlyFirstReplica); | |||
| _weight.Add(_iterations); | |||
| } | |||
| _create_hypers(); | |||
| _create_slots(var_list); | |||
| } | |||
| protected void _set_hyper(string name, float value) | |||
| { | |||
| _hyper[name] = value; | |||
| } | |||
| void _create_hypers() | |||
| { | |||
| if (_hypers_created) | |||
| return; | |||
| foreach (var dict in _hyper) | |||
| { | |||
| var name = dict.Key; | |||
| var value = dict.Value; | |||
| _hyper_variables[name] = add_weight( | |||
| name, | |||
| shape: new int[0], | |||
| trainable: false, | |||
| initializer: tf.constant_initializer(value), | |||
| aggregation: VariableAggregation.OnlyFirstReplica); | |||
| } | |||
| _hypers_created = true; | |||
| } | |||
| void _create_slots(ResourceVariable[] var_list) | |||
| { | |||
| if(_momentum) | |||
| { | |||
| /*for var in var_list: | |||
| self.add_slot(var, "momentum")*/ | |||
| } | |||
| } | |||
| ResourceVariable add_weight(string name, | |||
| TensorShape shape, | |||
| TF_DataType dtype = TF_DataType.TF_FLOAT, | |||
| IInitializer initializer = null, | |||
| bool trainable = false, | |||
| VariableSynchronization synchronization = VariableSynchronization.Auto, | |||
| VariableAggregation aggregation = VariableAggregation.None) | |||
| { | |||
| if (initializer == null) | |||
| initializer = tf.zeros_initializer; | |||
| if (dtype == TF_DataType.DtInvalid) | |||
| dtype = TF_DataType.TF_FLOAT; | |||
| var variable = _add_variable_with_custom_getter(name: name, | |||
| shape: shape, | |||
| getter: base_layer_utils.make_variable, | |||
| dtype: dtype, | |||
| overwrite: true, | |||
| initializer: initializer, | |||
| trainable: trainable, | |||
| use_resource: true, | |||
| synchronization: synchronization, | |||
| aggregation: aggregation); | |||
| return variable as ResourceVariable; | |||
| } | |||
| } | |||
| } | |||
| @@ -1,14 +1,53 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow.Eager; | |||
| namespace Tensorflow.Keras.Optimizers | |||
| { | |||
| public class SGD | |||
| public class SGD : OptimizerV2 | |||
| { | |||
| public SGD(float learning_rate) | |||
| protected override string _name => "SGD"; | |||
| bool nesterov; | |||
| public SGD(float learning_rate, | |||
| float momentum = 0.0f, | |||
| bool nesterov = false, | |||
| float decay = 0.0f) : base() | |||
| { | |||
| _set_hyper("learning_rate", learning_rate); | |||
| _set_hyper("decay", decay); | |||
| _momentum = momentum > 0; | |||
| _set_hyper("momentum", momentum); | |||
| nesterov = nesterov; | |||
| } | |||
| protected override void _prepare_local(DeviceDType device_dtype, | |||
| Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state) | |||
| { | |||
| base._prepare_local(device_dtype, _apply_state); | |||
| _apply_state[device_dtype]["momentum"] = array_ops.identity( | |||
| _get_hyper("momentum", device_dtype.DType)); | |||
| } | |||
| protected override Operation _resource_apply_dense(ResourceVariable var, EagerTensor grad, Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state) | |||
| { | |||
| if (_momentum) | |||
| { | |||
| throw new NotImplementedException("_resource_apply_dense"); | |||
| } | |||
| var device_dtype = _apply_state.Keys.FirstOrDefault(x => x.Device == var.Device && x.DType == var.dtype.as_base_dtype()); | |||
| return gen_training_ops.resource_apply_gradient_descent(var.Handle as EagerTensor, | |||
| _apply_state[device_dtype]["lr_t"] as EagerTensor, | |||
| grad, | |||
| use_locking: _use_locking); | |||
| } | |||
| } | |||
| } | |||
| @@ -32,7 +32,7 @@ namespace Tensorflow.Keras.Utils | |||
| /// <param name="initializer"></param> | |||
| /// <param name="trainable"></param> | |||
| /// <returns></returns> | |||
| public static VariableV1 make_variable(string name, | |||
| public static IVariableV1 make_variable(string name, | |||
| int[] shape, | |||
| TF_DataType dtype = TF_DataType.TF_FLOAT, | |||
| IInitializer initializer = null, | |||
| @@ -46,7 +46,7 @@ namespace Tensorflow.Keras.Utils | |||
| Func<Tensor> init_val = () => initializer.call(new TensorShape(shape), dtype: dtype); | |||
| var variable_dtype = dtype.as_base_dtype(); | |||
| var v = tf.Variable(init_val, | |||
| var v = tf.Variable(init_val, | |||
| dtype: dtype, | |||
| shape: shape, | |||
| name: name); | |||
| @@ -42,14 +42,14 @@ namespace Tensorflow.Keras | |||
| /// Allows to give unique autogenerated names to layers, in a graph-specific way. | |||
| /// </summary> | |||
| public static Dictionary<Graph, Dictionary<(string, string), int>> PER_GRAPH_LAYER_NAME_UIDS = new Dictionary<Graph, Dictionary<(string, string), int>>(); | |||
| public static Dictionary<string, VariableV1> _GRAPH_VARIABLES = new Dictionary<string, VariableV1>(); | |||
| public static Dictionary<string, IVariableV1> _GRAPH_VARIABLES = new Dictionary<string, IVariableV1>(); | |||
| public static Dictionary<string, Optimizer> _GRAPH_TF_OPTIMIZERS = new Dictionary<string, Optimizer>(); | |||
| public static _DummyEagerGraph _DUMMY_EAGER_GRAPH = new _DummyEagerGraph(); | |||
| public static void track_variable(VariableV1 v) | |||
| public static void track_variable(IVariableV1 v) | |||
| { | |||
| var graph = v.graph; | |||
| var graph = v.Graph; | |||
| _GRAPH_VARIABLES[graph.graph_key] = v; | |||
| } | |||
| @@ -42,8 +42,8 @@ namespace Tensorflow.Layers | |||
| this._reuse = _reuse; | |||
| // Avoid an incorrect lint error | |||
| _trainable_weights = new List<VariableV1>(); | |||
| _non_trainable_weights = new List<VariableV1>(); | |||
| _trainable_weights = new List<IVariableV1>(); | |||
| _non_trainable_weights = new List<IVariableV1>(); | |||
| this.built = false; | |||
| _keras_style = false; | |||
| } | |||
| @@ -116,7 +116,7 @@ namespace Tensorflow.Layers | |||
| /// <param name="synchronization"></param> | |||
| /// <param name="aggregation"></param> | |||
| /// <returns></returns> | |||
| protected virtual VariableV1 add_weight(string name, | |||
| protected virtual IVariableV1 add_weight(string name, | |||
| int[] shape, | |||
| TF_DataType dtype = TF_DataType.DtInvalid, | |||
| IInitializer initializer = null, | |||
| @@ -126,7 +126,7 @@ namespace Tensorflow.Layers | |||
| { | |||
| var default_graph = ops.get_default_graph(); | |||
| Graph init_graph = null; | |||
| VariableV1[] existing_variables = null; | |||
| IVariableV1[] existing_variables = null; | |||
| if (synchronization == VariableSynchronization.OnRead) | |||
| trainable = false; | |||
| @@ -77,7 +77,7 @@ namespace Tensorflow.Operations | |||
| _external_values = new Dictionary<string, ITensorOrOperation>(); | |||
| } | |||
| public string name { get => _name; } | |||
| public string Name { get => _name; } | |||
| protected string _name; | |||
| public void __init__(ValuesDef values_def = null, string import_scope = null) | |||
| @@ -141,7 +141,7 @@ namespace Tensorflow.Operations.ControlFlows | |||
| parallel_iterations: forward_ctxt.parallel_iterations, | |||
| back_prop: forward_ctxt.back_prop, | |||
| swap_memory: forward_ctxt.swap_memory, | |||
| name: forward_ctxt.name, | |||
| name: forward_ctxt.Name, | |||
| grad_state: this); | |||
| _grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state); | |||
| if (outer_forward_ctxt != null) | |||
| @@ -21,8 +21,8 @@ namespace Tensorflow | |||
| bool _state_is_tuple; | |||
| IActivation _activation; | |||
| LSTMStateTuple _state; | |||
| VariableV1 _kernel; | |||
| VariableV1 _bias; | |||
| IVariableV1 _kernel; | |||
| IVariableV1 _bias; | |||
| string _WEIGHTS_VARIABLE_NAME = "kernel"; | |||
| string _BIAS_VARIABLE_NAME = "bias"; | |||
| @@ -28,9 +28,9 @@ namespace Tensorflow | |||
| public override object state_size => _num_units; | |||
| public override int output_size => _num_units; | |||
| public VariableV1 _kernel; | |||
| public IVariableV1 _kernel; | |||
| string _WEIGHTS_VARIABLE_NAME = "kernel"; | |||
| public VariableV1 _bias; | |||
| public IVariableV1 _bias; | |||
| string _BIAS_VARIABLE_NAME = "bias"; | |||
| public BasicRnnCell(int num_units, | |||
| @@ -14,6 +14,8 @@ | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Linq; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| @@ -466,10 +468,15 @@ namespace Tensorflow.Operations | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Relu", name, null, | |||
| features); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Relu", name, new IntPtr[] | |||
| { | |||
| features as EagerTensor, | |||
| }, 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new { features }); | |||
| @@ -480,10 +487,15 @@ namespace Tensorflow.Operations | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Tanh", name, null, | |||
| x); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Tanh", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| }, 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Tanh", name: name, args: new { x }); | |||
| @@ -40,9 +40,9 @@ namespace Tensorflow | |||
| public int NumInputs => c_api.TF_OperationNumInputs(_handle); | |||
| private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray(); | |||
| private InputList _inputs_val; | |||
| protected InputList _inputs_val; | |||
| public InputList inputs | |||
| public virtual InputList inputs | |||
| { | |||
| get | |||
| { | |||
| @@ -38,8 +38,8 @@ namespace Tensorflow | |||
| return num; | |||
| } | |||
| private Tensor[] _outputs; | |||
| public Tensor[] outputs => _outputs; | |||
| protected Tensor[] _outputs; | |||
| public virtual Tensor[] outputs => _outputs; | |||
| public Tensor output => _outputs.FirstOrDefault(); | |||
| public int NumControlOutputs => c_api.TF_OperationNumControlOutputs(_handle); | |||
| @@ -64,6 +64,7 @@ namespace Tensorflow | |||
| bool _is_stateful; | |||
| public NodeDef node_def | |||
| { | |||
| get | |||
| @@ -226,6 +226,21 @@ namespace Tensorflow | |||
| private static Tensor expand_dims_v2(Tensor input, int axis, string name = null) | |||
| => gen_array_ops.expand_dims(input, axis, name); | |||
| /// <summary> | |||
| /// Creates a tensor filled with a scalar value. | |||
| /// This operation creates a tensor of shape `dims` and fills it with `value`. | |||
| /// </summary> | |||
| /// <param name="dims">A 1-D sequence of non-negative numbers.</param> | |||
| /// <param name="value">A value to fill the returned `tf.Tensor`.</param> | |||
| /// <param name="name">Optional string. The name of the output `tf.Tensor`.</param> | |||
| /// <returns>A `tf.Tensor` with shape `dims` and the same dtype as `value`.</returns> | |||
| public static Tensor fill(Tensor dims, Tensor value, string name = null) | |||
| { | |||
| var result = gen_array_ops.fill(dims, value, name: name); | |||
| // tensor_util.maybe_set_static_shape(result, dims) | |||
| return result; | |||
| } | |||
| /// <summary> | |||
| /// Returns the rank of a tensor. | |||
| /// </summary> | |||
| @@ -312,20 +327,26 @@ namespace Tensorflow | |||
| }); | |||
| } | |||
| public static Tensor ones(int[] dims, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
| => tf_with(ops.name_scope(name, "ones", new { dims }), scope => | |||
| public static Tensor ones(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
| => tf_with(ops.name_scope(name, "ones", shape), scope => | |||
| { | |||
| dtype = dtype.as_base_dtype(); | |||
| name = scope; | |||
| var shape_tensor = constant_op._tensor_shape_tensor_conversion_function(shape); | |||
| Tensor ones = null; | |||
| switch (dtype) | |||
| { | |||
| case TF_DataType.TF_DOUBLE: | |||
| return _constant_if_small(1.0d, dims, dtype, name); | |||
| ones = constant(1.0d); | |||
| break; | |||
| case TF_DataType.TF_FLOAT: | |||
| return _constant_if_small(1.0f, dims, dtype, name); | |||
| ones = constant(1.0f); | |||
| break; | |||
| default: | |||
| return _constant_if_small(1, dims, dtype, name); | |||
| ones = constant(1); | |||
| break; | |||
| } | |||
| return fill(shape_tensor, ones, name: name); | |||
| }); | |||
| public static Tensor one_hot(Tensor indices, int depth, | |||
| @@ -61,7 +61,7 @@ namespace Tensorflow | |||
| /// <param name="name"></param> | |||
| /// <param name="max_norm"></param> | |||
| /// <returns></returns> | |||
| public static Tensor _embedding_lookup_and_transform(VariableV1 @params, | |||
| public static Tensor _embedding_lookup_and_transform(IVariableV1 @params, | |||
| Tensor ids, | |||
| string partition_strategy = "mod", | |||
| string name = null, | |||
| @@ -131,7 +131,7 @@ namespace Tensorflow | |||
| max_norm: max_norm); | |||
| } | |||
| public static Tensor embedding_lookup(VariableV1 @params, Tensor ids, | |||
| public static Tensor embedding_lookup(IVariableV1 @params, Tensor ids, | |||
| string partition_strategy = "mod", | |||
| string name = null, | |||
| bool validate_indices = true, | |||
| @@ -54,17 +54,27 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| try | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "ConcatV2", name, null, | |||
| values, axis); | |||
| return _result; | |||
| } | |||
| catch (Exception) | |||
| { | |||
| return concat_v2_eager_fallback(values, axis, name, tf.context); | |||
| } | |||
| var results = new[] { new EagerTensor() }; | |||
| Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "ConcatV2", name, new IntPtr[] | |||
| { | |||
| values as EagerTensor, | |||
| axis as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); | |||
| return _op.output; | |||
| } | |||
| public static Tensor concat_v2(Tensor[] values, Tensor axis, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| return concat_v2_eager_fallback(values, axis, name, tf.context); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); | |||
| @@ -79,7 +89,7 @@ namespace Tensorflow | |||
| var _inputs_flat = input.concat(axis1); | |||
| var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx }; | |||
| return _execute.execute(ctx, "ConcatV2", _inputs_flat, _attrs, name: name); | |||
| return _execute.execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0]; | |||
| } | |||
| public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null) | |||
| @@ -152,8 +162,14 @@ namespace Tensorflow | |||
| { | |||
| if(tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, "Pack", name, null, values, "axis", axis); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Pack", name, | |||
| values.Select(x => (x as EagerTensor).EagerTensorHandle).ToArray(), values.Length, | |||
| op => wrap_tfe_src.SetOpAttrs(op, "axis", axis), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis }); | |||
| @@ -170,8 +186,6 @@ namespace Tensorflow | |||
| _attrs["dtype"] = _op.get_attr("dtype"); | |||
| _attrs["shape"] = _op.get_attr("shape"); | |||
| _execute.record_gradient("Placeholder", _inputs_flat, _attrs, _result, name); | |||
| return new Tensor(_op, 0, dtype); | |||
| } | |||
| @@ -214,6 +228,19 @@ namespace Tensorflow | |||
| /// <param name="name"></param> | |||
| public static Tensor identity(Tensor input, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Identity", name, new IntPtr[] | |||
| { | |||
| input as EagerTensor | |||
| }, 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Identity", name, new { input }); | |||
| return _op.output; | |||
| @@ -251,10 +278,16 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Fill", name, null, | |||
| dims, value); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Fill", name, new IntPtr[] | |||
| { | |||
| dims as EagerTensor, | |||
| value as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Fill", name, new { dims, value }); | |||
| @@ -270,6 +303,20 @@ namespace Tensorflow | |||
| /// <returns>A tuple of `Tensor` objects (r0, r1).</returns> | |||
| public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "") | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor(), new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "BroadcastGradientArgs", name, new IntPtr[] | |||
| { | |||
| s0 as EagerTensor, | |||
| s1 as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return (results[0].Resolve(), results[1].Resolve()); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 }); | |||
| return (_op.outputs[0], _op.outputs[1]); | |||
| @@ -283,6 +330,20 @@ namespace Tensorflow | |||
| public static Tensor reshape<T1, T2>(T1 tensor, T2 shape, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Reshape", name, new IntPtr[] | |||
| { | |||
| tensor as EagerTensor, | |||
| shape as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape }); | |||
| return _op.output; | |||
| } | |||
| @@ -360,10 +421,16 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Shape", name, null, | |||
| input, "out_type", out_type); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Shape", name, new IntPtr[] | |||
| { | |||
| input as EagerTensor, | |||
| }, 1, | |||
| op => wrap_tfe_src.SetOpAttrs(op, "out_type", out_type), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Shape", name, new { input, out_type }); | |||
| @@ -411,6 +478,20 @@ namespace Tensorflow | |||
| public static Tensor tile<T>(Tensor input, T multiples, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Tile", name, new IntPtr[] | |||
| { | |||
| input as EagerTensor, | |||
| multiples as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Tile", name, new { input, multiples }); | |||
| return _op.outputs[0]; | |||
| } | |||
| @@ -444,12 +525,24 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "StridedSlice", name, null, | |||
| input, begin, end, strides, "begin_mask", begin_mask, | |||
| "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, | |||
| "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "StridedSlice", name, new IntPtr[] | |||
| { | |||
| input as EagerTensor, | |||
| begin as EagerTensor, | |||
| end as EagerTensor, | |||
| strides as EagerTensor, | |||
| }, 4, | |||
| op => wrap_tfe_src.SetOpAttrs(op, | |||
| "begin_mask", begin_mask, | |||
| "end_mask", end_mask, | |||
| "ellipsis_mask", ellipsis_mask, | |||
| "new_axis_mask", new_axis_mask, | |||
| "shrink_axis_mask", shrink_axis_mask), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("StridedSlice", name, new | |||
| @@ -16,12 +16,13 @@ | |||
| using System; | |||
| using System.Linq; | |||
| using System.Runtime.InteropServices; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| public static class gen_math_ops | |||
| public static partial class gen_math_ops | |||
| { | |||
| public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
| public static Execute _execute = new Execute(); | |||
| @@ -41,11 +42,35 @@ namespace Tensorflow | |||
| /// <returns></returns> | |||
| public static Tensor add_n(Tensor[] inputs, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "AddN", name, | |||
| inputs.Select(x => (x as EagerTensor).EagerTensorHandle).ToArray(), inputs.Length, | |||
| null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("AddN", name, args: new { inputs }); | |||
| return _op.outputs[0]; | |||
| } | |||
| public static IntPtr add_n(IntPtr[] inputs, string name = null) | |||
| { | |||
| var results = new[] { c_api.TFE_NewEagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "AddN", name, | |||
| inputs, inputs.Length, | |||
| null, | |||
| results, results.Length)); | |||
| status.Check(true); | |||
| return results[0]; | |||
| } | |||
| /// <summary> | |||
| /// Returns the index with the largest value across dimensions of a tensor. | |||
| /// </summary> | |||
| @@ -119,17 +144,18 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| try | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Mean", name, null, | |||
| input, axis, "keep_dims", keep_dims); | |||
| return _result; | |||
| } | |||
| catch (Exception) | |||
| { | |||
| return mean_eager_fallback(input as Tensor[], axis as Tensor, keep_dims: keep_dims, name: name, ctx: tf.context); | |||
| } | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Mean", name, | |||
| new IntPtr[] | |||
| { | |||
| input as EagerTensor, | |||
| axis as EagerTensor | |||
| }, 2, | |||
| op => wrap_tfe_src.SetOpAttrs(op, "keep_dims", keep_dims), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims }); | |||
| @@ -137,6 +163,18 @@ namespace Tensorflow | |||
| return _op.output; | |||
| } | |||
| public static Tensor mean(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| return mean_eager_fallback(inputs, axis, keep_dims: keep_dims, name: name, ctx: tf.context); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { inputs, reduction_indices = axis, keep_dims = keep_dims }); | |||
| return _op.output; | |||
| } | |||
| private static Tensor mean_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null) | |||
| { | |||
| var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { inputs }); | |||
| @@ -144,7 +182,7 @@ namespace Tensorflow | |||
| var _inputs_flat = input.concat(axis1); | |||
| var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; | |||
| return _execute.execute(ctx, "Mean", _inputs_flat, _attrs, name: name); | |||
| return _execute.execute(ctx, "Mean", 1, _inputs_flat, _attrs, name: name)[0]; | |||
| } | |||
| public static Tensor prod<T1, T2>(T1 input, T2 axis, bool keep_dims = false, string name = null) | |||
| @@ -153,10 +191,17 @@ namespace Tensorflow | |||
| { | |||
| try | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Prod", name, null, | |||
| input, axis, "keep_dims", keep_dims); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Prod", name, new IntPtr[] | |||
| { | |||
| input as EagerTensor, | |||
| axis as EagerTensor | |||
| }, 2, | |||
| op => wrap_tfe_src.SetOpAttrs(op, "keep_dims", keep_dims), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| catch (Exception) | |||
| { | |||
| @@ -175,7 +220,7 @@ namespace Tensorflow | |||
| var _inputs_flat = input.concat(axis1); | |||
| var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; | |||
| return _execute.execute(ctx, "Prod", _inputs_flat, _attrs, name: name); | |||
| return _execute.execute(ctx, "Prod", 1, _inputs_flat, _attrs, name: name)[0]; | |||
| } | |||
| public static Tensor acos(Tensor x, string name = null) | |||
| @@ -192,14 +237,41 @@ namespace Tensorflow | |||
| return _op.outputs[0]; | |||
| } | |||
| public static Tensor add(Tensor x, Tensor y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Add", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); | |||
| return _op.output; | |||
| } | |||
| public static Tensor add<Tx, Ty>(Tx x, Ty y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Add", name, null, | |||
| x, y); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Add", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); | |||
| @@ -212,10 +284,16 @@ namespace Tensorflow | |||
| // forward_compatible(2019, 6, 25): | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "AddV2", name, null, | |||
| x, y); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "AddV2", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("AddV2", name, args: new { x, y }); | |||
| @@ -241,10 +319,15 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sin", name, null, | |||
| x); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sin", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| }, 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Sin", name, args: new { x }); | |||
| @@ -270,10 +353,15 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sigmoid", name, null, | |||
| x); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sigmoid", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| }, 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, new { x }); | |||
| @@ -358,10 +446,15 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Tan", name, null, | |||
| x); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Tan", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| }, 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Tan", name, args: new { x }); | |||
| @@ -434,6 +527,20 @@ namespace Tensorflow | |||
| public static Tensor less<Tx, Ty>(Tx x, Ty y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Less", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y }); | |||
| return _op.outputs[0]; | |||
| @@ -497,6 +604,19 @@ namespace Tensorflow | |||
| /// <returns> A `Tensor`. Has the same type as `x`.</returns> | |||
| public static Tensor square(Tensor x, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Square", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| }, 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Square", name, args: new { x }); | |||
| return _op.outputs[0]; | |||
| @@ -552,10 +672,14 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Cast", name, null, | |||
| x, "DstT", DstT, "Truncate", Truncate); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Cast", name, | |||
| new IntPtr[] { x as EagerTensor }, 1, | |||
| op => wrap_tfe_src.SetOpAttrs(op, "DstT", DstT, "Truncate", Truncate), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); | |||
| @@ -567,10 +691,15 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Neg", name, null, | |||
| x); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Neg", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); | |||
| @@ -582,10 +711,15 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sqrt", name, null, | |||
| x); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sqrt", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| }, 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Sqrt", name, args: new { x }); | |||
| @@ -593,14 +727,41 @@ namespace Tensorflow | |||
| return _op.outputs[0]; | |||
| } | |||
| public static Tensor sub(Tensor x, Tensor y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sub", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); | |||
| return _op.output; | |||
| } | |||
| public static Tensor sub<Tx, Ty>(Tx x, Ty y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sub", name, null, | |||
| x, y); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sub", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); | |||
| @@ -619,10 +780,16 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Equal", name, null, | |||
| x, y); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Equal", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Equal", name, args: new { x, y }); | |||
| @@ -642,10 +809,16 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "NotEqual", name, null, | |||
| x, y); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "NotEqual", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y }); | |||
| @@ -657,24 +830,57 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Atan2", name, null, | |||
| y, x); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Atan2", name, new IntPtr[] | |||
| { | |||
| y as EagerTensor, | |||
| x as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Atan2", name, args: new { y, x }); | |||
| return _op.output; | |||
| } | |||
| public static Tensor mul(Tensor x, Tensor y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Mul", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); | |||
| return _op.output; | |||
| } | |||
| public static Tensor mul<Tx, Ty>(Tx x, Ty y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Mul", name, null, | |||
| x, y); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Mul", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor, | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); | |||
| @@ -693,8 +899,16 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, "", "RealDiv", name, null, x, y); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "RealDiv", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); | |||
| @@ -711,6 +925,20 @@ namespace Tensorflow | |||
| public static Tensor floor_mod(Tensor x, Tensor y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "FloorMod", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y }); | |||
| return _op.outputs[0]; | |||
| @@ -720,8 +948,16 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, "", "FloorDiv", name, null, x, y); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "FloorDiv", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y }); | |||
| @@ -742,10 +978,20 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "MatMul", name, null, | |||
| a, b, "transpose_a", transpose_a, "transpose_b", transpose_b); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "MatMul", name, | |||
| new IntPtr[] | |||
| { | |||
| a as EagerTensor, | |||
| b as EagerTensor | |||
| }, 2, | |||
| op => wrap_tfe_src.SetOpAttrs(op, | |||
| "transpose_a", transpose_a, | |||
| "transpose_b", transpose_b), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); | |||
| @@ -839,6 +1085,20 @@ namespace Tensorflow | |||
| public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Pow", name, new IntPtr[] | |||
| { | |||
| x as EagerTensor, | |||
| y as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y }); | |||
| return _op.outputs[0]; | |||
| @@ -848,18 +1108,18 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| try | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sum", name, null, | |||
| input, axis, "keep_dims", keep_dims); | |||
| return _result; | |||
| } | |||
| catch (Exception) | |||
| { | |||
| return _sum_eager_fallback(input as Tensor[], axis as Tensor, | |||
| keep_dims: keep_dims, name: name, ctx: tf.context); | |||
| } | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Sum", name, | |||
| new IntPtr[] | |||
| { | |||
| input as EagerTensor, | |||
| axis as EagerTensor | |||
| }, 2, | |||
| op => wrap_tfe_src.SetOpAttrs(op, "keep_dims", keep_dims), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims }); | |||
| @@ -867,6 +1127,19 @@ namespace Tensorflow | |||
| return _op.outputs[0]; | |||
| } | |||
| public static Tensor _sum(Tensor[] inputs, Tensor axis = default, bool keep_dims = false, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| return _sum_eager_fallback(inputs, axis, | |||
| keep_dims: keep_dims, name: name, ctx: tf.context); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { inputs, reduction_indices = axis, keep_dims }); | |||
| return _op.outputs[0]; | |||
| } | |||
| private static Tensor _sum_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null) | |||
| { | |||
| var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { inputs }); | |||
| @@ -874,7 +1147,7 @@ namespace Tensorflow | |||
| var _inputs_flat = input.concat(axis1); | |||
| var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; | |||
| return _execute.execute(ctx, "Sum", _inputs_flat, _attrs, name: name); | |||
| return _execute.execute(ctx, "Sum", 1, _inputs_flat, _attrs, name: name)[0]; | |||
| } | |||
| /// <summary> | |||
| @@ -889,8 +1162,17 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, "Range", name, null, start, limit, delta); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Range", name, new IntPtr[] | |||
| { | |||
| start as EagerTensor, | |||
| limit as EagerTensor, | |||
| delta as EagerTensor | |||
| }, 3, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); | |||
| @@ -0,0 +1,26 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| public static partial class gen_math_ops | |||
| { | |||
| public static EagerTensor mul(IntPtr x, IntPtr y, string name = null) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "Mul", name, new IntPtr[] | |||
| { | |||
| x, | |||
| y, | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| } | |||
| } | |||
| @@ -13,6 +13,10 @@ | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Linq; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -36,6 +40,23 @@ namespace Tensorflow | |||
| if (!seed2.HasValue) | |||
| seed2 = 0; | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "RandomStandardNormal", name, new IntPtr[] | |||
| { | |||
| shape as EagerTensor, | |||
| }, 1, | |||
| op => wrap_tfe_src.SetOpAttrs(op, | |||
| "seed", seed, | |||
| "seed2", seed2, | |||
| "dtype", dtype), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("RandomStandardNormal", | |||
| name: name, | |||
| args: new { shape, dtype, seed, seed2 }); | |||
| @@ -14,6 +14,8 @@ | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Linq; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| @@ -23,14 +25,66 @@ namespace Tensorflow | |||
| { | |||
| public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
| public static Operation assign_sub_variable_op(Tensor resource, Tensor value, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "AssignSubVariableOp", name, | |||
| new IntPtr[] | |||
| { | |||
| resource as EagerTensor, | |||
| value as EagerTensor | |||
| }, 2, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| return null; | |||
| } | |||
| /// <summary> | |||
| /// Adds a value to the current value of a variable. | |||
| /// </summary> | |||
| /// <param name="resource"></param> | |||
| /// <param name="value"></param> | |||
| /// <param name="name"></param> | |||
| /// <returns></returns> | |||
| public static Operation assign_add_variable_op(Tensor resource, Tensor value, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "AssignAddVariableOp", name, | |||
| new IntPtr[] | |||
| { | |||
| resource as EagerTensor, | |||
| value as EagerTensor | |||
| }, 2, null, | |||
| null, 0)); | |||
| status.Check(true); | |||
| return null; | |||
| } | |||
| return null; | |||
| } | |||
| public static Operation assign_variable_op(Tensor resource, Tensor value, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "AssignVariableOp", name, null, | |||
| resource, value); | |||
| return _result; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "AssignVariableOp", name, | |||
| new IntPtr[] | |||
| { | |||
| resource as EagerTensor, | |||
| value as EagerTensor | |||
| }, 2, null, | |||
| null, 0)); | |||
| status.Check(true); | |||
| return null; | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("AssignVariableOp", name, new { resource, value }); | |||
| @@ -42,10 +96,14 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "VarIsInitializedOp", name, null, | |||
| resource); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "VarIsInitializedOp", name, | |||
| new IntPtr[] { resource as EagerTensor }, | |||
| 1, null, | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("VarIsInitializedOp", name, new { resource }); | |||
| @@ -65,12 +123,19 @@ namespace Tensorflow | |||
| public static Tensor var_handle_op(TF_DataType dtype, TensorShape shape, | |||
| string container ="", string shared_name = "", string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| if(tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "VarHandleOp", name, null, | |||
| "container", container, "shared_name", shared_name, "dtype", dtype, "shape", shape.dims); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "VarHandleOp", name, null, 0, | |||
| op => wrap_tfe_src.SetOpAttrs(op, | |||
| "container", container, | |||
| "shared_name", shared_name, | |||
| "dtype", dtype, | |||
| "shape", shape.dims), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("VarHandleOp", name, new { | |||
| @@ -94,10 +159,14 @@ namespace Tensorflow | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "ReadVariableOp", name, null, | |||
| resource, "dtype", dtype); | |||
| return _result; | |||
| var results = new[] { new EagerTensor() }; | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "ReadVariableOp", name, | |||
| new IntPtr[] { resource as EagerTensor }, 1, | |||
| op => wrap_tfe_src.SetOpAttrs(op, "dtype", dtype), | |||
| results.Select(x => x.EagerTensorHandle).ToArray(), results.Length)); | |||
| status.Check(true); | |||
| return results[0].Resolve(); | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("ReadVariableOp", name, new | |||
| @@ -17,6 +17,7 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.Framework; | |||
| using static Tensorflow.Binding; | |||
| @@ -84,6 +85,23 @@ namespace Tensorflow | |||
| }); | |||
| } | |||
| public static ResourceVariable cast(ResourceVariable x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
| { | |||
| var base_type = dtype.as_base_dtype(); | |||
| if (base_type == x.dtype) | |||
| return x; | |||
| return tf_with(ops.name_scope(name, "Cast", new { x }), scope => | |||
| { | |||
| name = scope; | |||
| var t_x = ops.convert_to_tensor(x, name: "x"); | |||
| if (t_x.dtype.as_base_dtype() != base_type) | |||
| t_x = gen_math_ops.cast(t_x, base_type, name: name); | |||
| return x; | |||
| }); | |||
| } | |||
| public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
| { | |||
| var base_type = dtype.as_base_dtype(); | |||
| @@ -540,6 +558,11 @@ namespace Tensorflow | |||
| } | |||
| else | |||
| { | |||
| if(x is EagerTensor) | |||
| { | |||
| return constant_op.constant(np.arange(x.shape.Rank)); | |||
| } | |||
| var rank = array_ops.rank(x); | |||
| return range(0, rank, 1); | |||
| } | |||
| @@ -588,7 +611,14 @@ namespace Tensorflow | |||
| => gen_math_ops.rsqrt(x, name: name); | |||
| public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = null) | |||
| => gen_math_ops.pow(x, y, name: name); | |||
| => tf_with(ops.name_scope(name, "Pow", new { x, y }), scope => | |||
| { | |||
| name = scope; | |||
| var x_tensor = ops.convert_to_tensor(x, name: "x"); | |||
| var y_tensor = ops.convert_to_tensor(y, name: "y", dtype: x_tensor.dtype.as_base_dtype()); | |||
| return gen_math_ops.pow(x_tensor, y_tensor, name: name); | |||
| }); | |||
| public static Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range") | |||
| { | |||
| @@ -98,8 +98,8 @@ namespace Tensorflow | |||
| /// <param name="name"></param> | |||
| /// <returns></returns> | |||
| public static Tensor[] fused_batch_norm(Tensor x, | |||
| VariableV1 scale, | |||
| VariableV1 offset, | |||
| IVariableV1 scale, | |||
| IVariableV1 offset, | |||
| Tensor mean, | |||
| Tensor variance, | |||
| float epsilon = 0.001f, | |||
| @@ -47,6 +47,7 @@ namespace Tensorflow | |||
| var rnd = gen_random_ops.random_standard_normal(shape_tensor, dtype: dtype, seed: seed1, seed2: seed2); | |||
| var mul = rnd * stddev_tensor; | |||
| var value = math_ops.add(mul, mean_tensor, name: name); | |||
| // tensor_util.maybe_set_static_shape(value, shape) | |||
| return value; | |||
| }); | |||
| } | |||
| @@ -15,6 +15,7 @@ | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Linq; | |||
| using Tensorflow.Framework; | |||
| using static Tensorflow.CppShapeInferenceResult.Types; | |||
| @@ -70,7 +71,7 @@ namespace Tensorflow | |||
| throw new NotImplementedException(); | |||
| } | |||
| public static bool is_resource_variable(VariableV1 var) | |||
| public static bool is_resource_variable(IVariableV1 var) | |||
| { | |||
| return var is ResourceVariable; | |||
| } | |||
| @@ -128,14 +129,34 @@ namespace Tensorflow | |||
| // When in eager mode, explicitly ensure so here. When in graph mode, it's | |||
| // ensured by always generating different variable names. | |||
| var exists = gen_resource_variable_ops.var_is_initialized_op(handle); | |||
| } | |||
| return handle; | |||
| // We create an assert Op instead of checking right away in order to be | |||
| // compatible with ASYNC execution mode. Further, since not all devices | |||
| // support string tensors, we encode the assertion string in the Op name | |||
| /*gen_logging_ops._assert( | |||
| math_ops.logical_not(exists), [exists], name = "EagerVariableNameReuse");*/ | |||
| var handle_data = new HandleData(); | |||
| handle_data.IsSet = true; | |||
| handle_data.ShapeAndType.Add(new HandleShapeAndType | |||
| { | |||
| Dtype = dtype.as_datatype_enum(), | |||
| Shape = shape.as_proto() | |||
| }); | |||
| _set_handle_shapes_and_types(handle, handle_data, graph_mode); | |||
| return handle; | |||
| } | |||
| } | |||
| private static void _set_handle_shapes_and_types(Tensor handle, HandleData full_handle_data, bool graph_mode) | |||
| /// <summary> | |||
| /// Sets the shape inference result HandleData on tensor. | |||
| /// </summary> | |||
| /// <param name="handle"></param> | |||
| /// <param name="full_handle_data"></param> | |||
| /// <param name="graph_mode"></param> | |||
| private static void _set_handle_shapes_and_types(Tensor handle, HandleData handle_data, bool graph_mode) | |||
| { | |||
| if (!graph_mode) | |||
| return; | |||
| } | |||
| /// <summary> | |||
| @@ -171,20 +192,5 @@ namespace Tensorflow | |||
| return HandleData.Parser.ParseFrom(handle.BufferToArray()); | |||
| } | |||
| } | |||
| /// <summary> | |||
| /// Represents a future for a read of a variable. | |||
| /// Pretends to be the tensor if anyone looks. | |||
| /// </summary> | |||
| public class _UnreadVariable : BaseResourceVariable | |||
| { | |||
| } | |||
| /// <summary> | |||
| /// A python variable from an existing handle. | |||
| /// </summary> | |||
| public class BaseResourceVariable : VariableV1 | |||
| { | |||
| } | |||
| } | |||
| } | |||
| @@ -6,7 +6,7 @@ | |||
| /// </summary> | |||
| public interface IProtoBuf<TProtoDef, TDef> | |||
| { | |||
| string name { get; } | |||
| string Name { get; } | |||
| /// <summary> | |||
| /// Converts a `Variable` to a `VariableDef` protocol buffer. | |||
| @@ -65,7 +65,8 @@ namespace Tensorflow | |||
| public virtual NDArray run(ITensorOrOperation fetche, params FeedItem[] feed_dict) | |||
| { | |||
| return _run(fetche, feed_dict)[0]; | |||
| var results = _run(fetche, feed_dict); | |||
| return fetche is Tensor ? results[0] : null; | |||
| } | |||
| public virtual (NDArray, NDArray, NDArray, NDArray, NDArray) run( | |||
| @@ -54,6 +54,11 @@ namespace Tensorflow | |||
| Handle = TF_NewStatus(); | |||
| } | |||
| public Status(SafeStatusHandle handle) | |||
| { | |||
| Handle = handle ?? throw new ArgumentNullException(nameof(handle)); | |||
| } | |||
| public void SetStatus(TF_Code code, string msg) | |||
| { | |||
| TF_SetStatus(Handle, code, msg); | |||
| @@ -0,0 +1,17 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow | |||
| { | |||
| public class GCItemCounter | |||
| { | |||
| public GCItemType ItemType { get; set; } | |||
| public int RefCounter { get; set; } | |||
| public DateTime LastUpdateTime { get; set; } | |||
| public IntPtr Handle { get; set; } | |||
| public override string ToString() | |||
| => $"{ItemType} {RefCounter} {LastUpdateTime}"; | |||
| } | |||
| } | |||
| @@ -0,0 +1,13 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow | |||
| { | |||
| public enum GCItemType | |||
| { | |||
| TensorHandle = 0, | |||
| LocalTensorHandle = 1, | |||
| EagerTensorHandle = 2 | |||
| } | |||
| } | |||
| @@ -0,0 +1,95 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using System.Threading; | |||
| using System.Threading.Tasks; | |||
| using System.Timers; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| public class GarbageCollector | |||
| { | |||
| static Dictionary<IntPtr, GCItemCounter> container = new Dictionary<IntPtr, GCItemCounter>(); | |||
| static object locker = new object(); | |||
| public static void Init() | |||
| { | |||
| Task.Run(() => | |||
| { | |||
| while (true) | |||
| { | |||
| Thread.Sleep(100); | |||
| Recycle(); | |||
| } | |||
| }); | |||
| } | |||
| public static void Increase(IntPtr handle, GCItemType type) | |||
| { | |||
| if (handle == IntPtr.Zero) | |||
| return; | |||
| if (container.ContainsKey(handle)) | |||
| { | |||
| container[handle].RefCounter++; | |||
| container[handle].LastUpdateTime = DateTime.Now; | |||
| } | |||
| else | |||
| { | |||
| lock (locker) | |||
| { | |||
| container[handle] = new GCItemCounter | |||
| { | |||
| ItemType = type, | |||
| RefCounter = 1, | |||
| Handle = handle, | |||
| LastUpdateTime = DateTime.Now | |||
| }; | |||
| } | |||
| } | |||
| } | |||
| public static void Decrease(IntPtr handle) | |||
| { | |||
| if (handle != IntPtr.Zero && container.ContainsKey(handle)) | |||
| container[handle].RefCounter--; | |||
| } | |||
| private static void Recycle() | |||
| { | |||
| // dispose before 1 sec | |||
| lock (locker) | |||
| { | |||
| var items = container.Values | |||
| .Where(x => x.RefCounter <= 0 && (DateTime.Now - x.LastUpdateTime).TotalMilliseconds > 100) | |||
| .ToArray(); | |||
| foreach (var item in items) | |||
| { | |||
| item.RefCounter = 0; | |||
| container.Remove(item.Handle); | |||
| switch (item.ItemType) | |||
| { | |||
| case GCItemType.TensorHandle: | |||
| // print($"c_api.TF_DeleteTensor({item.Handle.ToString("x16")})"); | |||
| c_api.TF_DeleteTensor(item.Handle); | |||
| break; | |||
| case GCItemType.LocalTensorHandle: | |||
| // print($"c_api.TFE_DeleteTensorHandle({item.Handle.ToString("x16")})"); | |||
| c_api.TFE_DeleteTensorHandle(item.Handle); | |||
| break; | |||
| case GCItemType.EagerTensorHandle: | |||
| // print($"c_api.TFE_DeleteEagerTensor({item.Handle.ToString("x16")})"); | |||
| c_api.TFE_DeleteEagerTensor(item.Handle); | |||
| break; | |||
| default: | |||
| break; | |||
| } | |||
| } | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -4,8 +4,8 @@ | |||
| <TargetFramework>netstandard2.0</TargetFramework> | |||
| <AssemblyName>TensorFlow.NET</AssemblyName> | |||
| <RootNamespace>Tensorflow</RootNamespace> | |||
| <TargetTensorFlow>2.01.0</TargetTensorFlow> | |||
| <Version>0.20.0</Version> | |||
| <TargetTensorFlow>2.2.0</TargetTensorFlow> | |||
| <Version>0.20.0-alpha2</Version> | |||
| <LangVersion>8.0</LangVersion> | |||
| <Authors>Haiping Chen, Meinrad Recheis, Eli Belash</Authors> | |||
| <Company>SciSharp STACK</Company> | |||
| @@ -15,17 +15,15 @@ | |||
| <RepositoryType>git</RepositoryType> | |||
| <PackageProjectUrl>http://scisharpstack.org</PackageProjectUrl> | |||
| <PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | |||
| <PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags> | |||
| <PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#, TF.NET</PackageTags> | |||
| <Description>Google's TensorFlow full binding in .NET Standard. | |||
| Building, training and infering deep learning models. | |||
| https://tensorflownet.readthedocs.io</Description> | |||
| <AssemblyVersion>0.20.0.0</AssemblyVersion> | |||
| <PackageReleaseNotes>Changes since v0.15.0: | |||
| 1: Add TransformGraphWithStringInputs. | |||
| 2: tf.trainer.load_graph, tf.trainer.freeze_graph | |||
| 3: Import Protobuf.Text | |||
| 4: Support YOLOv3 object detection | |||
| 5: Add implicitation for Operation to RefVariable</PackageReleaseNotes> | |||
| <PackageReleaseNotes>tf.net 0.20.x and above are based on tensorflow native 2.x. | |||
| Eager Mode is added finally. | |||
| It's not stable at this moment and missing many APIs, tf.net 0.15.x is more stable for production. | |||
| Please be patient, we're working hard on missing functions, providing full tensorflow binding is our mission.</PackageReleaseNotes> | |||
| <FileVersion>0.20.0.0</FileVersion> | |||
| <PackageLicenseFile>LICENSE</PackageLicenseFile> | |||
| <PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> | |||
| @@ -37,12 +35,12 @@ https://tensorflownet.readthedocs.io</Description> | |||
| <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'"> | |||
| <AllowUnsafeBlocks>true</AllowUnsafeBlocks> | |||
| <DefineConstants>TRACE;DEBUG</DefineConstants> | |||
| <PlatformTarget>x64</PlatformTarget> | |||
| <PlatformTarget>AnyCPU</PlatformTarget> | |||
| </PropertyGroup> | |||
| <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> | |||
| <AllowUnsafeBlocks>true</AllowUnsafeBlocks> | |||
| <DefineConstants>TRACE;DEBUG;SERIALIZABLE_</DefineConstants> | |||
| <DefineConstants>TRACE;DEBUG</DefineConstants> | |||
| <PlatformTarget>x64</PlatformTarget> | |||
| </PropertyGroup> | |||
| @@ -64,6 +62,7 @@ https://tensorflownet.readthedocs.io</Description> | |||
| <None Remove="Distribute\**" /> | |||
| <None Remove="Models\**" /> | |||
| <None Remove="runtimes\**" /> | |||
| <Compile Remove="Util\BindingArray2.cs" /> | |||
| <None Include="..\..\LICENSE"> | |||
| <Pack>True</Pack> | |||
| <PackagePath></PackagePath> | |||
| @@ -0,0 +1,79 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Reflection; | |||
| using System.Runtime.InteropServices; | |||
| using System.Text; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| public class EagerTensorV2 : DisposableObject, ITensor | |||
| { | |||
| IntPtr tfe_tensor_handle; | |||
| public IntPtr EagerTensorHandle { get; set; } | |||
| public string Device => c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(tfe_tensor_handle, status.Handle)); | |||
| static Status status = new Status(); | |||
| public EagerTensorV2(IntPtr handle) | |||
| { | |||
| EagerTensorHandle = handle; | |||
| tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); | |||
| _handle = c_api.TFE_TensorHandleResolve(tfe_tensor_handle, status.Handle); | |||
| } | |||
| public unsafe EagerTensorV2(NDArray nd, string device_name = "") | |||
| { | |||
| if (nd.typecode == NPTypeCode.String) | |||
| throw new NotImplementedException("Support for NDArray of type string not implemented yet"); | |||
| var arraySlice = nd.Unsafe.Storage.Shape.IsContiguous ? nd.GetData() : nd.CloneData(); | |||
| _handle = c_api.TF_NewTensor(nd.dtype.as_dtype(), | |||
| nd.shape.Select(i => (long)i).ToArray(), | |||
| nd.ndim, | |||
| new IntPtr(arraySlice.Address), | |||
| nd.size * nd.dtypesize, | |||
| deallocator: (IntPtr dataPtr, long len, IntPtr args) => | |||
| { | |||
| }, IntPtr.Zero); | |||
| tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle); | |||
| EagerTensorHandle = c_api.TFE_NewEagerTensor(); | |||
| } | |||
| /*public unsafe EagerTensorV2(float[,] value) | |||
| { | |||
| var dims = new long[] { value.Rank, value.Length / value.Rank }; | |||
| fixed (float* pointer = &value[0, 0]) | |||
| { | |||
| // The address stored in pointerToFirst | |||
| // is valid only inside this fixed statement block. | |||
| tensorHandle = c_api.TF_NewTensor(TF_DataType.TF_FLOAT, | |||
| dims, | |||
| value.Rank, | |||
| new IntPtr(pointer), | |||
| value.Length * sizeof(float), | |||
| deallocator: (IntPtr dataPtr, long len, IntPtr args) => | |||
| { | |||
| }, IntPtr.Zero); | |||
| localTensorHandle = c_api.TFE_NewTensorHandle(tensorHandle, status); | |||
| _handle = c_api.TFE_EagerTensorFromHandle(tf.context, localTensorHandle); | |||
| } | |||
| }*/ | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| c_api.TF_DeleteTensor(_handle); | |||
| c_api.TFE_DeleteTensorHandle(tfe_tensor_handle); | |||
| c_api.TFE_DeleteEagerTensor(EagerTensorHandle); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,11 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow | |||
| { | |||
| public interface ITensor | |||
| { | |||
| } | |||
| } | |||
| @@ -0,0 +1,31 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Runtime.InteropServices; | |||
| using System.Text; | |||
| namespace Tensorflow | |||
| { | |||
| [StructLayout(LayoutKind.Sequential)] | |||
| public struct TF_BindingArray | |||
| { | |||
| public IntPtr array; | |||
| public int length; | |||
| public static implicit operator TF_BindingArray(IntPtr handle) | |||
| => handle == IntPtr.Zero ? default : Marshal.PtrToStructure<TF_BindingArray>(handle); | |||
| public unsafe IntPtr this[int index] | |||
| => array == IntPtr.Zero ? IntPtr.Zero : *((IntPtr*)array + index); | |||
| public unsafe IntPtr[] Data | |||
| { | |||
| get | |||
| { | |||
| var results = new IntPtr[length]; | |||
| for (int i = 0; i < length; i++) | |||
| results[i] = array == IntPtr.Zero ? IntPtr.Zero : *((IntPtr*)array + i); | |||
| return results; | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -17,6 +17,6 @@ namespace Tensorflow | |||
| => tensor._handle; | |||
| public override string ToString() | |||
| => $"TF_Tensor {_handle}"; | |||
| => $"TF_Tensor 0x{_handle.ToString("x16")}"; | |||
| } | |||
| } | |||
| @@ -23,6 +23,7 @@ using System.Runtime.CompilerServices; | |||
| using System.Runtime.InteropServices; | |||
| using System.Text; | |||
| using static Tensorflow.c_api; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -59,6 +60,14 @@ namespace Tensorflow | |||
| //no need to set AllocationType = AllocationType.None; | |||
| } | |||
| public Tensor(int value) | |||
| { | |||
| unsafe | |||
| { | |||
| _handle = TF_NewTensor(tf.int32, dims: null, num_dims: 0, data: null, len: sizeof(int)); | |||
| } | |||
| } | |||
| /// <summary> | |||
| /// Create a new Tensor from the given unmanaged memory pointer (which must be allocated, fixed or pinned by the caller) | |||
| /// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor | |||
| @@ -54,7 +54,7 @@ namespace Tensorflow | |||
| #else | |||
| #region Compute | |||
| public static Tensor operator +(Tensor lhs, ResourceVariable rhs) => BinaryOpWrapper("add", lhs, rhs); | |||
| public static Tensor operator +(Tensor lhs, Tensor rhs) => BinaryOpWrapper("add", lhs, rhs); | |||
| public static Tensor operator +(Tensor lhs, NDArray rhs) => BinaryOpWrapper("add", lhs, rhs); | |||
| public static Tensor operator +(NDArray lhs, Tensor rhs) => BinaryOpWrapper("add", lhs, rhs); | |||
| @@ -1,4 +1,5 @@ | |||
| using NumSharp; | |||
| using NumSharp.Backends; | |||
| using NumSharp.Backends.Unmanaged; | |||
| using NumSharp.Utilities; | |||
| using System; | |||
| @@ -43,7 +44,7 @@ namespace Tensorflow | |||
| { | |||
| //T can only be unmanaged, I believe it is safe to say that MemoryCopy is valid for all cases this method can be called. | |||
| var src = (T*)buffer; | |||
| len *= ((long)itemsize); | |||
| len *= (long)itemsize; | |||
| System.Buffer.MemoryCopy(src, dst, len, len); | |||
| } | |||
| } | |||
| @@ -150,26 +151,40 @@ namespace Tensorflow | |||
| /// Tensor has rank 0. | |||
| /// </returns> | |||
| public NDArray numpy() | |||
| => NDims == 0 ? GetScalar(dtype) : GetNDArray(dtype); | |||
| => GetNDArray(dtype); | |||
| protected unsafe NDArray GetNDArray(TF_DataType dtype) | |||
| { | |||
| UnmanagedStorage storage; | |||
| switch (dtype) | |||
| { | |||
| case TF_DataType.TF_STRING: | |||
| return StringData(); | |||
| return (NDArray)StringData()[0]; | |||
| case TF_DataType.TF_INT32: | |||
| return ToArray<int>(); | |||
| storage = new UnmanagedStorage(NPTypeCode.Int32); | |||
| break; | |||
| case TF_DataType.TF_INT64: | |||
| storage = new UnmanagedStorage(NPTypeCode.Int64); | |||
| break; | |||
| case TF_DataType.TF_FLOAT: | |||
| return ToArray<float>(); | |||
| storage = new UnmanagedStorage(NPTypeCode.Float); | |||
| break; | |||
| case TF_DataType.TF_DOUBLE: | |||
| return ToArray<double>(); | |||
| storage = new UnmanagedStorage(NPTypeCode.Double); | |||
| break; | |||
| default: | |||
| return BufferToArray(); | |||
| } | |||
| storage.Allocate(new Shape(shape)); | |||
| var bytesize = (long)this.bytesize; | |||
| System.Buffer.MemoryCopy(buffer.ToPointer(), storage.Address, bytesize, bytesize); | |||
| return new NDArray(storage); | |||
| } | |||
| protected unsafe NDArray GetScalar(TF_DataType dtype) | |||
| /*protected unsafe NDArray GetScalar(TF_DataType dtype) | |||
| { | |||
| switch(dtype) | |||
| { | |||
| @@ -184,7 +199,7 @@ namespace Tensorflow | |||
| default: | |||
| return BufferToArray(); | |||
| } | |||
| } | |||
| }*/ | |||
| /// <summary> | |||
| /// Copies the memory of current buffer onto newly allocated array. | |||
| @@ -32,14 +32,15 @@ namespace Tensorflow | |||
| /// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes. | |||
| /// </summary> | |||
| [SuppressMessage("ReSharper", "ConvertToAutoProperty")] | |||
| public partial class Tensor : DisposableObject, | |||
| public partial class Tensor : DisposableObject, | |||
| ITensor, | |||
| ITensorOrOperation, | |||
| _TensorLike, | |||
| ITensorOrTensorArray, | |||
| IPackable<Tensor>, | |||
| ICanBeFlattened | |||
| { | |||
| private readonly int _id; | |||
| protected int _id; | |||
| private readonly Operation _op; | |||
| private readonly int _value_index; | |||
| private TF_Output? _tf_output; | |||
| @@ -82,7 +83,7 @@ namespace Tensorflow | |||
| /// <summary> | |||
| /// The name of the device on which this tensor will be produced, or null. | |||
| /// </summary> | |||
| public string Device => op.Device; | |||
| public virtual string Device => op.Device; | |||
| public int[] dims => shape; | |||
| /// <summary> | |||
| @@ -170,7 +171,7 @@ namespace Tensorflow | |||
| /// n n-Tensor (you get the idea) | |||
| /// </summary> | |||
| /// <remarks>https://www.tensorflow.org/api_docs/python/tf/rank</remarks> | |||
| public int rank | |||
| public virtual int rank | |||
| { | |||
| get | |||
| { | |||
| @@ -78,6 +78,9 @@ namespace Tensorflow | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern IntPtr TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, IntPtr data, UIntPtr len, Deallocator deallocator, ref DeallocatorArgs deallocator_arg); | |||
| [DllImport(TensorFlowLibName)] | |||
| public static extern TF_Tensor TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, IntPtr data, long len, DeallocatorV2 deallocator, IntPtr args); | |||
| /// <summary> | |||
| /// Return a new tensor that holds the bytes data[0,len-1] | |||
| /// </summary> | |||
| @@ -101,18 +101,37 @@ namespace Tensorflow | |||
| return op.outputs[0]; | |||
| } | |||
| private static Tensor _eager_fill(int[] dims, Tensor value, Context ctx) | |||
| private static Tensor _eager_fill(int[] dims, EagerTensor value, Context ctx) | |||
| { | |||
| var attr_t = value.dtype.as_datatype_enum(); | |||
| var dims_t = convert_to_eager_tensor(dims, ctx, dtypes.int32); | |||
| var inputs_flat = new[] { dims_t, value }; | |||
| var attrs = new object[] { "T", attr_t, "index_type", TF_DataType.TF_INT32 }; | |||
| var result = _execute.execute(ctx, "Fill", inputs_flat, attrs); | |||
| return result; | |||
| var result = _execute.execute(ctx, "Fill", 1, inputs_flat, attrs); | |||
| return result[0]; | |||
| } | |||
| private static EagerTensor convert_to_eager_tensor(object value, Context ctx, TF_DataType dtype = TF_DataType.DtInvalid) | |||
| { | |||
| // convert data type | |||
| if (dtype != TF_DataType.DtInvalid && | |||
| value.GetType().Name != "NDArray" && | |||
| value.GetType().BaseType.Name != "Array" && | |||
| dtypes.as_base_dtype(dtype) != dtypes.as_dtype(value.GetType())) | |||
| { | |||
| switch (dtype) | |||
| { | |||
| case TF_DataType.TF_FLOAT: | |||
| value = Convert.ToSingle(value); | |||
| break; | |||
| case TF_DataType.TF_INT64: | |||
| value = Convert.ToInt64(value); | |||
| break; | |||
| default: | |||
| break; | |||
| } | |||
| } | |||
| switch (value) | |||
| { | |||
| case NDArray val: | |||
| @@ -125,8 +144,12 @@ namespace Tensorflow | |||
| return new EagerTensor(val, ctx.device_name); | |||
| case int[,] val: | |||
| return new EagerTensor(val, ctx.device_name); | |||
| case long val: | |||
| return new EagerTensor(val, ctx.device_name); | |||
| case float val: | |||
| return new EagerTensor(val, ctx.device_name); | |||
| case float[,] val: | |||
| return new EagerTensor(val, ctx.device_name); | |||
| case double val: | |||
| return new EagerTensor(val, ctx.device_name); | |||
| case float[] val: | |||
| @@ -202,6 +202,7 @@ namespace Tensorflow | |||
| TF_DataType.TF_INT32 => "int32", | |||
| TF_DataType.TF_FLOAT => "float32", | |||
| TF_DataType.TF_BOOL => "bool", | |||
| TF_DataType.TF_RESOURCE => "resource", | |||
| _ => type.ToString() | |||
| }; | |||
| @@ -19,6 +19,7 @@ using System; | |||
| using System.Linq; | |||
| using NumSharp.Utilities; | |||
| using System.Text; | |||
| using Tensorflow.Eager; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -39,6 +40,9 @@ namespace Tensorflow | |||
| /// <returns></returns> | |||
| public static NDArray constant_value(Tensor tensor, bool partial = false) | |||
| { | |||
| if (tensor is EagerTensor) | |||
| return tensor.numpy(); | |||
| NDArray ret = _ConstantValue(tensor, partial); | |||
| if (!(ret is null)) | |||
| tensor.graph.prevent_feeding(tensor); | |||
| @@ -15,6 +15,7 @@ | |||
| ******************************************************************************/ | |||
| using NumSharp; | |||
| using Tensorflow.Eager; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -111,7 +111,7 @@ namespace Tensorflow.Train | |||
| protected override void _create_slots(RefVariable[] var_list) | |||
| { | |||
| var first_var = var_list.OrderBy(x => x.name).First(); | |||
| var first_var = var_list.OrderBy(x => x.Name).First(); | |||
| _create_non_slot_variable(initial_value: _beta1, name: "beta1_power", colocate_with: first_var); | |||
| _create_non_slot_variable(initial_value: _beta2, name: "beta2_power", colocate_with: first_var); | |||
| @@ -44,7 +44,7 @@ namespace Tensorflow | |||
| public Tensor LearningRateTensor => _lr_t; | |||
| public bool _use_locking; | |||
| public Dictionary<string, Dictionary<string, RefVariable>> _slots; | |||
| public Dictionary<string, VariableV1> _non_slot_dict; | |||
| public Dictionary<string, IVariableV1> _non_slot_dict; | |||
| public Dictionary<string, object> _deferred_slot_restorations; | |||
| SlotCreator slot_creator = new SlotCreator(); | |||
| @@ -58,7 +58,7 @@ namespace Tensorflow | |||
| _lr = learning_rate; | |||
| // Dictionary of slots. | |||
| _slots = new Dictionary<string, Dictionary<string, RefVariable>>(); | |||
| _non_slot_dict = new Dictionary<string, VariableV1>(); | |||
| _non_slot_dict = new Dictionary<string, IVariableV1>(); | |||
| _deferred_slot_restorations = new Dictionary<string, object>(); | |||
| } | |||
| @@ -72,7 +72,7 @@ namespace Tensorflow | |||
| _lr_t = learning_rate; | |||
| // Dictionary of slots. | |||
| _slots = new Dictionary<string, Dictionary<string, RefVariable>>(); | |||
| _non_slot_dict = new Dictionary<string, VariableV1>(); | |||
| _non_slot_dict = new Dictionary<string, IVariableV1>(); | |||
| _deferred_slot_restorations = new Dictionary<string, object>(); | |||
| } | |||
| @@ -122,7 +122,7 @@ namespace Tensorflow | |||
| var vars_with_grad = grads_and_vars.Where(x => x.Item1 != null).Select(x => x.Item2).ToArray(); | |||
| if (vars_with_grad.Length == 0) | |||
| throw new ValueError($"No gradients provided for any variable, check your graph for ops" + | |||
| $" that do not support gradients, between variables {string.Join(",", vars_with_grad.Select(x => x.name))} and loss {loss}."); | |||
| $" that do not support gradients, between variables {string.Join(",", vars_with_grad.Select(x => x.Name))} and loss {loss}."); | |||
| return apply_gradients(grads_and_vars, global_step:global_step, name:name); | |||
| } | |||
| @@ -175,7 +175,7 @@ namespace Tensorflow | |||
| if (grad == null) | |||
| continue; | |||
| var scope_name = var.op.name; | |||
| var scope_name = var.Op.name; | |||
| tf_with(ops.name_scope("update_" + scope_name), scope2 => | |||
| { | |||
| var op = processor.update_op(this, grad); | |||
| @@ -241,10 +241,10 @@ namespace Tensorflow | |||
| /// <param name="initial_value"></param> | |||
| /// <param name="name"></param> | |||
| /// <param name="colocate_with"></param> | |||
| protected VariableV1 _create_non_slot_variable(float initial_value, string name, RefVariable colocate_with) | |||
| protected IVariableV1 _create_non_slot_variable(float initial_value, string name, RefVariable colocate_with) | |||
| { | |||
| // Recommendation: Use OptimizerV2 if your optimizer uses non-slot variables. | |||
| var graph = colocate_with.graph; | |||
| var graph = colocate_with.Graph; | |||
| var key = $"{name}.{graph.graph_key}"; | |||
| var v = _non_slot_dict.ContainsKey(key) ? _non_slot_dict[key] : null; | |||
| if(v == null) | |||
| @@ -333,10 +333,10 @@ namespace Tensorflow | |||
| private string _var_key(RefVariable var) | |||
| { | |||
| return $"{var.op.graph.graph_key}.{var.op.name}"; | |||
| return $"{var.Op.graph.graph_key}.{var.Op.name}"; | |||
| } | |||
| protected VariableV1 _get_non_slot_variable(string name, Graph graph = null) | |||
| protected IVariableV1 _get_non_slot_variable(string name, Graph graph = null) | |||
| { | |||
| var key = $"{name}.{graph.graph_key}"; | |||
| var non_slot = _non_slot_dict.ContainsKey(key) ? _non_slot_dict[key] : null; | |||
| @@ -385,7 +385,7 @@ namespace Tensorflow | |||
| case List<RefVariable> values: | |||
| var_list = values.Concat(vars).ToList(); | |||
| break; | |||
| case List<VariableV1> values: | |||
| case List<IVariableV1> values: | |||
| var_list = values.Select(x => x as RefVariable).Concat(vars).ToList(); | |||
| break; | |||
| } | |||
| @@ -79,7 +79,7 @@ namespace Tensorflow | |||
| return gen_io_ops.restore_v2(filename_tensor, names.ToArray(), slices.ToArray(), dtypes.ToArray()); | |||
| } | |||
| public virtual SaverDef _build_internal(VariableV1[] names_to_saveables, | |||
| public virtual SaverDef _build_internal(IVariableV1[] names_to_saveables, | |||
| bool reshape = false, | |||
| bool sharded = false, | |||
| int max_to_keep = 5, | |||
| @@ -22,7 +22,7 @@ namespace Tensorflow | |||
| Tensor[] bulk_restore(Tensor filename_tensor, SaveableObject[] saveables, int preferred_shard, bool restore_sequentially); | |||
| SaverDef _build_internal(VariableV1[] names_to_saveables, | |||
| SaverDef _build_internal(IVariableV1[] names_to_saveables, | |||
| bool reshape = false, | |||
| bool sharded = false, | |||
| int max_to_keep = 5, | |||
| @@ -29,7 +29,7 @@ namespace Tensorflow | |||
| /// </summary> | |||
| public class Saver | |||
| { | |||
| private VariableV1[] _var_list; | |||
| private IVariableV1[] _var_list; | |||
| private bool _reshape; | |||
| private bool _sharded; | |||
| private int _max_to_keep; | |||
| @@ -50,7 +50,7 @@ namespace Tensorflow | |||
| private Dictionary<string, float> _last_checkpoints; | |||
| private Dictionary<string, float> _checkpoints_to_be_deleted; | |||
| public Saver(VariableV1[] var_list = null, | |||
| public Saver(IVariableV1[] var_list = null, | |||
| bool reshape = false, | |||
| bool sharded = false, | |||
| int max_to_keep = 5, | |||
| @@ -28,7 +28,7 @@ namespace Tensorflow | |||
| /// </summary> | |||
| /// <param name="names_to_saveables"></param> | |||
| /// <returns></returns> | |||
| public static SaveableObject[] validate_and_slice_inputs(VariableV1[] names_to_saveables) | |||
| public static SaveableObject[] validate_and_slice_inputs(IVariableV1[] names_to_saveables) | |||
| { | |||
| var names_to_saveables_dict = op_list_to_dict(names_to_saveables); | |||
| var saveables = new List<SaveableObject>(); | |||
| @@ -76,9 +76,9 @@ namespace Tensorflow | |||
| } | |||
| } | |||
| public static Dictionary<string, Tensor> op_list_to_dict(VariableV1[] op_list, bool convert_variable_to_tensor = true) | |||
| public static Dictionary<string, Tensor> op_list_to_dict(IVariableV1[] op_list, bool convert_variable_to_tensor = true) | |||
| { | |||
| op_list = op_list.OrderBy(x => x.name).ToArray(); | |||
| op_list = op_list.OrderBy(x => x.Name).ToArray(); | |||
| var names_to_saveables = new Dictionary<string, Tensor>(); | |||
| foreach(var var in op_list) | |||
| @@ -103,7 +103,7 @@ namespace Tensorflow | |||
| if (convert_variable_to_tensor) | |||
| { | |||
| if (var is ResourceVariable) | |||
| tensor = var.graph_element; | |||
| tensor = var.GraphElement; | |||
| else | |||
| tensor = ops.internal_convert_to_tensor(var, as_ref: true); | |||
| } | |||
| @@ -111,7 +111,7 @@ namespace Tensorflow | |||
| if (tensor.op.type == "ReadVariableOp") | |||
| name = tensor.op.inputs[0].op.name; | |||
| else | |||
| name = var.op.name; | |||
| name = var.Op.name; | |||
| if (names_to_saveables.ContainsKey(name)) | |||
| throw new ValueError($"At least two variables have the same name: {name}"); | |||
| @@ -53,7 +53,7 @@ namespace Tensorflow | |||
| /// <returns></returns> | |||
| public static Saver _create_saver_from_imported_meta_graph(MetaGraphDef meta_graph_def, | |||
| string import_scope, | |||
| Dictionary<string, VariableV1> imported_vars) | |||
| Dictionary<string, IVariableV1> imported_vars) | |||
| { | |||
| if(meta_graph_def.SaverDef != null) | |||
| { | |||
| @@ -64,7 +64,7 @@ namespace Tensorflow | |||
| { | |||
| var sample_key = var_names[0]; | |||
| var sample_var = imported_vars[sample_key]; | |||
| scope = string.Join("", sample_var.name.Skip(sample_key.Length)); | |||
| scope = string.Join("", sample_var.Name.Skip(sample_key.Length)); | |||
| } | |||
| return new Saver(saver_def: meta_graph_def.SaverDef, name: scope); | |||
| } | |||
| @@ -33,7 +33,7 @@ namespace Tensorflow.Train | |||
| public RefVariable create_slot(RefVariable primary, Tensor val, string name, bool colocate_with_primary = true) | |||
| { | |||
| var validate_shape = val.TensorShape.is_fully_defined(); | |||
| var prefix = primary.op.name; | |||
| var prefix = primary.Op.name; | |||
| return tf_with(tf.variable_scope(name: null, prefix + "/" + name), delegate | |||
| { | |||
| return _create_slot_var(primary, val, "", validate_shape, null, TF_DataType.DtInvalid); | |||
| @@ -74,7 +74,7 @@ namespace Tensorflow.Train | |||
| TF_DataType dtype, string name, bool colocate_with_primary = true) | |||
| { | |||
| var validate_shape = shape.is_fully_defined(); | |||
| var prefix = primary.op.name; | |||
| var prefix = primary.Op.name; | |||
| return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate | |||
| { | |||
| return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype); | |||
| @@ -91,7 +91,7 @@ namespace Tensorflow.Train | |||
| /// <param name="shape"></param> | |||
| /// <param name="dtype"></param> | |||
| /// <returns></returns> | |||
| private RefVariable _create_slot_var(VariableV1 primary, object val, string scope, bool validate_shape, | |||
| private RefVariable _create_slot_var(IVariableV1 primary, object val, string scope, bool validate_shape, | |||
| TensorShape shape, TF_DataType dtype) | |||
| { | |||
| bool use_resource = primary is ResourceVariable; | |||
| @@ -15,6 +15,7 @@ | |||
| ******************************************************************************/ | |||
| using System; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Train | |||
| { | |||
| @@ -26,16 +27,26 @@ namespace Tensorflow.Train | |||
| /// Restore-on-create for a variable be saved with this `Checkpointable`. | |||
| /// </summary> | |||
| /// <returns></returns> | |||
| protected virtual VariableV1 _add_variable_with_custom_getter(string name, | |||
| protected virtual IVariableV1 _add_variable_with_custom_getter(string name, | |||
| int[] shape, | |||
| TF_DataType dtype = TF_DataType.TF_FLOAT, | |||
| IInitializer initializer = null, | |||
| Func<string, int[], TF_DataType, IInitializer, bool, VariableV1> getter = null, | |||
| Func<string, int[], TF_DataType, IInitializer, bool, IVariableV1> getter = null, | |||
| bool overwrite = false, | |||
| bool trainable = false) | |||
| bool trainable = false, | |||
| bool use_resource = false, | |||
| VariableSynchronization synchronization = VariableSynchronization.Auto, | |||
| VariableAggregation aggregation = VariableAggregation.None) | |||
| { | |||
| var checkpoint_initializer = true; | |||
| var new_variable = getter(name, shape, dtype, initializer, trainable); | |||
| ops.init_scope(); | |||
| IInitializer checkpoint_initializer = null; | |||
| if (tf.context.executing_eagerly()) | |||
| ; | |||
| else | |||
| checkpoint_initializer = null; | |||
| IVariableV1 new_variable; | |||
| new_variable = getter(name, shape, dtype, initializer, trainable); | |||
| // If we set an initializer and the variable processed it, tracking will not | |||
| // assign again. It will add this variable to our dependencies, and if there | |||
| @@ -53,13 +64,13 @@ namespace Tensorflow.Train | |||
| /// </summary> | |||
| /// <param name="name"></param> | |||
| /// <param name="trackable"></param> | |||
| protected void _handle_deferred_dependencies(string name, VariableV1 trackable) | |||
| protected void _handle_deferred_dependencies(string name, IVariableV1 trackable) | |||
| { | |||
| _maybe_initialize_trackable(); | |||
| // TODO | |||
| } | |||
| protected VariableV1 _track_checkpointable(VariableV1 checkpointable, string name, bool overwrite = false) | |||
| protected IVariableV1 _track_checkpointable(IVariableV1 checkpointable, string name, bool overwrite = false) | |||
| { | |||
| return checkpointable; | |||
| } | |||
| @@ -62,7 +62,7 @@ namespace Tensorflow.Train | |||
| var g = graph.as_default(); | |||
| g.name_scope(null); | |||
| g.name_scope(global_step_tensor.op.name + "/"); | |||
| g.name_scope(global_step_tensor.Op.name + "/"); | |||
| // using initialized_value to ensure that global_step is initialized before | |||
| // this run. This is needed for example Estimator makes all model_fn build | |||
| // under global_step_read_tensor dependency. | |||
| @@ -14,6 +14,11 @@ | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Linq; | |||
| using Tensorflow.Eager; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| public class gen_training_ops | |||
| @@ -55,5 +60,33 @@ namespace Tensorflow | |||
| return _op.outputs[0]; | |||
| } | |||
| public static Operation resource_apply_gradient_descent(EagerTensor var, EagerTensor alpha, EagerTensor delta, bool use_locking = false, string name = null) | |||
| { | |||
| if (tf.context.executing_eagerly()) | |||
| { | |||
| using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
| "ResourceApplyGradientDescent", name, new IntPtr[] | |||
| { | |||
| var, | |||
| alpha, | |||
| delta | |||
| }, 3, | |||
| op => wrap_tfe_src.SetOpAttrs(op, "use_locking", use_locking), | |||
| null, 0)); | |||
| status.Check(true); | |||
| return null; | |||
| } | |||
| var _op = _op_def_lib._apply_op_helper("ResourceApplyGradientDescent", name, new | |||
| { | |||
| var, | |||
| alpha, | |||
| delta, | |||
| use_locking | |||
| }); | |||
| return _op.outputs[0]; | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,50 @@ | |||
| /***************************************************************************** | |||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Runtime.InteropServices; | |||
| namespace Tensorflow | |||
| { | |||
| public class BindingArray : DisposableObject | |||
| { | |||
| TF_BindingArray data; | |||
| public IntPtr Address => data.array; | |||
| public int Length => data.length; | |||
| public BindingArray(IntPtr handle) : base(handle) | |||
| { | |||
| if (_handle != IntPtr.Zero) | |||
| data = Marshal.PtrToStructure<TF_BindingArray>(_handle); | |||
| else | |||
| data = default; | |||
| } | |||
| public static implicit operator BindingArray(IntPtr handle) | |||
| => new BindingArray(handle); | |||
| public unsafe IntPtr this[int index] | |||
| => data[index]; | |||
| public unsafe IntPtr[] Data | |||
| => data.Data; | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| c_api.TF_DeleteBindingArray(_handle); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,50 @@ | |||
| /***************************************************************************** | |||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ******************************************************************************/ | |||
| using System; | |||
| using System.Runtime.InteropServices; | |||
| namespace Tensorflow | |||
| { | |||
| public class BindingTensorArray : DisposableObject | |||
| { | |||
| TF_BindingArray data; | |||
| public IntPtr Address => data.array; | |||
| public int Length => data.length; | |||
| public BindingTensorArray(IntPtr handle) : base(handle) | |||
| { | |||
| if (_handle != IntPtr.Zero) | |||
| data = Marshal.PtrToStructure<TF_BindingArray>(_handle); | |||
| else | |||
| data = default; | |||
| } | |||
| public static implicit operator BindingTensorArray(IntPtr handle) | |||
| => new BindingTensorArray(handle); | |||
| public unsafe IntPtr this[int index] | |||
| => data[index]; | |||
| public unsafe IntPtr[] Data | |||
| => data.Data; | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| c_api.TFE_DeleteBindingTensorArray(_handle); | |||
| } | |||
| } | |||
| } | |||