Browse Source

Merge remote-tracking branch 'SciSharp/master' into safe-status-handle

tags/v0.20
Sam Harwell 5 years ago
parent
commit
d123f44322
100 changed files with 2703 additions and 764 deletions
  1. +2
    -2
      README.md
  2. +108
    -38
      TensorFlow.NET.sln
  3. BIN
      docs/assets/tf2.jpg
  4. BIN
      docs/assets/tf2.psd
  5. +1
    -1
      docs/source/HelloWorld.md
  6. +51
    -0
      src/TensorFlowNET.Console/MemoryMonitor.cs
  7. +55
    -0
      src/TensorFlowNET.Console/MemoryTestingCases.cs
  8. +33
    -0
      src/TensorFlowNET.Console/Program.cs
  9. +18
    -0
      src/TensorFlowNET.Console/TensorFlowNET.Console.csproj
  10. +2
    -2
      src/TensorFlowNET.Core/APIs/c_api.cs
  11. +2
    -2
      src/TensorFlowNET.Core/APIs/tf.gradients.cs
  12. +7
    -2
      src/TensorFlowNET.Core/APIs/tf.math.cs
  13. +4
    -2
      src/TensorFlowNET.Core/APIs/tf.nn.cs
  14. +27
    -16
      src/TensorFlowNET.Core/APIs/tf.random.cs
  15. +4
    -4
      src/TensorFlowNET.Core/APIs/tf.train.cs
  16. +5
    -5
      src/TensorFlowNET.Core/APIs/tf.variable.cs
  17. +11
    -0
      src/TensorFlowNET.Core/Binding.Util.cs
  18. +52
    -0
      src/TensorFlowNET.Core/Eager/EagerOperation.cs
  19. +73
    -0
      src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
  20. +2
    -2
      src/TensorFlowNET.Core/Eager/EagerTensor.Implicit.cs
  21. +25
    -29
      src/TensorFlowNET.Core/Eager/EagerTensor.cs
  22. +17
    -14
      src/TensorFlowNET.Core/Eager/Execute.cs
  23. +2
    -6
      src/TensorFlowNET.Core/Eager/TFE_TensorHandle.cs
  24. +139
    -5
      src/TensorFlowNET.Core/Eager/c_api.eager.cs
  25. +0
    -33
      src/TensorFlowNET.Core/Eager/wrap_tfe_src.RecordGradient.cs
  26. +0
    -62
      src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_Execute.cs
  27. +11
    -170
      src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_FastPathExecute.cs
  28. +11
    -11
      src/TensorFlowNET.Core/Framework/meta_graph.cs
  29. +0
    -63
      src/TensorFlowNET.Core/Gradients/GradientActor.cs
  30. +99
    -2
      src/TensorFlowNET.Core/Gradients/GradientTape.cs
  31. +30
    -0
      src/TensorFlowNET.Core/Gradients/RegisterGradientEager.cs
  32. +43
    -2
      src/TensorFlowNET.Core/Gradients/Tape.cs
  33. +1
    -1
      src/TensorFlowNET.Core/Gradients/control_flow_grad.cs
  34. +100
    -16
      src/TensorFlowNET.Core/Gradients/math_grad.cs
  35. +74
    -0
      src/TensorFlowNET.Core/Gradients/math_grad_eager.cs
  36. +2
    -2
      src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs
  37. +101
    -0
      src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping_eager.cs
  38. +32
    -0
      src/TensorFlowNET.Core/Gradients/resource_variable_grad.cs
  39. +1
    -1
      src/TensorFlowNET.Core/Graphs/Graph.cs
  40. +2
    -2
      src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs
  41. +1
    -1
      src/TensorFlowNET.Core/Keras/Layers/Embedding.cs
  42. +6
    -6
      src/TensorFlowNET.Core/Keras/Layers/Layer.cs
  43. +25
    -0
      src/TensorFlowNET.Core/Keras/Optimizers/DeviceDType.cs
  44. +209
    -0
      src/TensorFlowNET.Core/Keras/Optimizers/OptimizerV2.cs
  45. +41
    -2
      src/TensorFlowNET.Core/Keras/Optimizers/SGD.cs
  46. +2
    -2
      src/TensorFlowNET.Core/Keras/Utils/base_layer_utils.cs
  47. +3
    -3
      src/TensorFlowNET.Core/Keras/backend.cs
  48. +4
    -4
      src/TensorFlowNET.Core/Layers/Layer.cs
  49. +1
    -1
      src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowContext.cs
  50. +1
    -1
      src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs
  51. +2
    -2
      src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
  52. +2
    -2
      src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
  53. +20
    -8
      src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
  54. +2
    -2
      src/TensorFlowNET.Core/Operations/Operation.Input.cs
  55. +2
    -2
      src/TensorFlowNET.Core/Operations/Operation.Output.cs
  56. +1
    -0
      src/TensorFlowNET.Core/Operations/Operation.cs
  57. +26
    -5
      src/TensorFlowNET.Core/Operations/array_ops.cs
  58. +2
    -2
      src/TensorFlowNET.Core/Operations/embedding_ops.cs
  59. +123
    -30
      src/TensorFlowNET.Core/Operations/gen_array_ops.cs
  60. +375
    -93
      src/TensorFlowNET.Core/Operations/gen_math_ops.cs
  61. +26
    -0
      src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs
  62. +21
    -0
      src/TensorFlowNET.Core/Operations/gen_random_ops.cs
  63. +86
    -17
      src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs
  64. +31
    -1
      src/TensorFlowNET.Core/Operations/math_ops.cs
  65. +2
    -2
      src/TensorFlowNET.Core/Operations/nn_impl.py.cs
  66. +1
    -0
      src/TensorFlowNET.Core/Operations/random_ops.cs
  67. +26
    -20
      src/TensorFlowNET.Core/Operations/resource_variable_ops.cs
  68. +1
    -1
      src/TensorFlowNET.Core/Protobuf/IProtoBuf.cs
  69. +2
    -1
      src/TensorFlowNET.Core/Sessions/BaseSession.cs
  70. +5
    -0
      src/TensorFlowNET.Core/Status/Status.cs
  71. +17
    -0
      src/TensorFlowNET.Core/System/GCItemCounter.cs
  72. +13
    -0
      src/TensorFlowNET.Core/System/GCItemType.cs
  73. +95
    -0
      src/TensorFlowNET.Core/System/GarbageCollector.cs
  74. +10
    -11
      src/TensorFlowNET.Core/TensorFlow.Binding.csproj
  75. +79
    -0
      src/TensorFlowNET.Core/Tensors/EagerTensorV2.cs
  76. +11
    -0
      src/TensorFlowNET.Core/Tensors/ITensor.cs
  77. +31
    -0
      src/TensorFlowNET.Core/Tensors/TF_BindingArray.cs
  78. +1
    -1
      src/TensorFlowNET.Core/Tensors/TF_Tensor.cs
  79. +9
    -0
      src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
  80. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs
  81. +23
    -8
      src/TensorFlowNET.Core/Tensors/Tensor.Value.cs
  82. +5
    -4
      src/TensorFlowNET.Core/Tensors/Tensor.cs
  83. +3
    -0
      src/TensorFlowNET.Core/Tensors/c_api.tensor.cs
  84. +26
    -3
      src/TensorFlowNET.Core/Tensors/constant_op.cs
  85. +1
    -0
      src/TensorFlowNET.Core/Tensors/dtypes.cs
  86. +4
    -0
      src/TensorFlowNET.Core/Tensors/tensor_util.cs
  87. +1
    -0
      src/TensorFlowNET.Core/Tensors/tf.constant.cs
  88. +1
    -1
      src/TensorFlowNET.Core/Training/AdamOptimizer.cs
  89. +10
    -10
      src/TensorFlowNET.Core/Training/Optimizer.cs
  90. +1
    -1
      src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs
  91. +1
    -1
      src/TensorFlowNET.Core/Training/Saving/ISaverBuilder.cs
  92. +2
    -2
      src/TensorFlowNET.Core/Training/Saving/Saver.cs
  93. +5
    -5
      src/TensorFlowNET.Core/Training/Saving/saveable_object_util.py.cs
  94. +2
    -2
      src/TensorFlowNET.Core/Training/Saving/saver.py.cs
  95. +3
    -3
      src/TensorFlowNET.Core/Training/SlotCreator.cs
  96. +18
    -7
      src/TensorFlowNET.Core/Training/Trackable.cs
  97. +1
    -1
      src/TensorFlowNET.Core/Training/TrainingUtil.cs
  98. +33
    -0
      src/TensorFlowNET.Core/Training/gen_training_ops.cs
  99. +50
    -0
      src/TensorFlowNET.Core/Util/BindingArray.cs
  100. +50
    -0
      src/TensorFlowNET.Core/Util/BindingTensorArray.cs

+ 2
- 2
README.md View File

@@ -9,7 +9,7 @@
[![Badge](https://img.shields.io/badge/link-996.icu-red.svg)](https://996.icu/#/en_US) [![Badge](https://img.shields.io/badge/link-996.icu-red.svg)](https://996.icu/#/en_US)
[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab)


*master branch is based on tensorflow 2.1 now, v0.15-tensorflow1.15 is from tensorflow1.15.*
*master branch is based on tensorflow 2.2 now, v0.15-tensorflow1.15 is from tensorflow1.15.*


TF.NET is a member project of [SciSharp STACK](https://github.com/SciSharp). TF.NET is a member project of [SciSharp STACK](https://github.com/SciSharp).


@@ -28,7 +28,7 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr


### How to use ### How to use


| TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.0 |
| TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.2 |
| ----------- | ------- | ------- | ------- | ------ | | ----------- | ------- | ------- | ------- | ------ |
| tf.net 0.20 | | | x | x | | tf.net 0.20 | | | x | x |
| tf.net 0.15 | | x | x | | | tf.net 0.15 | | x | x | |


+ 108
- 38
TensorFlow.NET.sln View File

@@ -13,98 +13,168 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\Ten
EndProject EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}" Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}"
EndProject EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowNET.Console", "src\TensorFlowNET.Console\TensorFlowNET.Console.csproj", "{03F06299-3F4B-4449-A709-3A647657BC0C}"
EndProject
Global Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU Debug|Any CPU = Debug|Any CPU
Debug|x64 = Debug|x64 Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Debug-Minimal|Any CPU = Debug-Minimal|Any CPU Debug-Minimal|Any CPU = Debug-Minimal|Any CPU
Debug-Minimal|x64 = Debug-Minimal|x64 Debug-Minimal|x64 = Debug-Minimal|x64
Debug-Minimal|x86 = Debug-Minimal|x86
Publish|Any CPU = Publish|Any CPU Publish|Any CPU = Publish|Any CPU
Publish|x64 = Publish|x64 Publish|x64 = Publish|x64
Publish|x86 = Publish|x86
Release|Any CPU = Release|Any CPU Release|Any CPU = Release|Any CPU
Release|x64 = Release|x64 Release|x64 = Release|x64
Release|x86 = Release|x86
EndGlobalSection EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution GlobalSection(ProjectConfigurationPlatforms) = postSolution
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|x64 {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|x64 {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x86.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x86.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x86.Build.0 = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|x64
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|x64
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x86.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x86.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|x64
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|x64
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x86.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|x64
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|x64
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x86.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x86.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|x64
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|x64
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x86.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x86.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x86.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x86.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x86.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|x64
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|x64
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x86.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x86.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|x64
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|x64
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x86.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|x64
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|x64
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x86.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x86.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|x64
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|x64
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x86.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x86.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|x64
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|x64
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x86.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x86.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|x64
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|x64
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x86.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|x64
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|x64
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x86.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x86.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|x64
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|x64
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x86.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x86.Build.0 = Release|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x64.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x64.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x86.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x86.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x86.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|Any CPU.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|Any CPU.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x64.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x64.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x86.ActiveCfg = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x86.Build.0 = Debug|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Release|Any CPU.Build.0 = Release|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x64.ActiveCfg = Release|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x64.Build.0 = Release|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x86.ActiveCfg = Release|Any CPU
{03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection EndGlobalSection
GlobalSection(SolutionProperties) = preSolution GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE HideSolutionNode = FALSE


BIN
docs/assets/tf2.jpg View File

Before After
Width: 1200  |  Height: 600  |  Size: 92 kB

BIN
docs/assets/tf2.psd View File


+ 1
- 1
docs/source/HelloWorld.md View File

@@ -72,5 +72,5 @@ Hello, TensorFlow!
Press any key to continue . . . Press any key to continue . . .
``` ```


This sample code can be found at [here](https://github.com/SciSharp/TensorFlow.NET/blob/master/test/TensorFlowNET.Examples/HelloWorld.cs).
This sample code can be found at [here](https://github.com/SciSharp/SciSharp-Stack-Examples/blob/master/src/TensorFlowNET.Examples/HelloWorld.cs).



+ 51
- 0
src/TensorFlowNET.Console/MemoryMonitor.cs View File

@@ -0,0 +1,51 @@
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Text;
using static Tensorflow.Binding;

namespace Tensorflow
{
public class MemoryMonitor
{
public void WarmUp()
{
print(tf.VERSION);
}

public void Execute(int epoch, int iterate, Action<int> process)
{
/*GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();*/

print($"{process.Method.Name} started...");
for (int i = 0; i < epoch; i++)
{
var initialMemory = Process.GetCurrentProcess().PrivateMemorySize64;// GC.GetTotalMemory(true);
process(iterate);
var finalMemory = Process.GetCurrentProcess().PrivateMemorySize64; //GC.GetTotalMemory(true);
print($"Epoch {i}: {Format(finalMemory - initialMemory)}.");
}

GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();

print($"Total {process.Method.Name} usage {Format(Process.GetCurrentProcess().PrivateMemorySize64)}");
}

private string Format(long usage)
{
if (usage < 0)
return $"-{Format(0 - usage)}";

if (usage <= 1024 && usage >= 0)
return $"{usage} Bytes";
else if (usage > 1024 && usage <= 1024 * 1024)
return $"{usage / 1024} KB";
else
return $"{usage / 1024 / 1024} MB";
}
}
}

+ 55
- 0
src/TensorFlowNET.Console/MemoryTestingCases.cs View File

@@ -0,0 +1,55 @@
using System;
using System.Collections.Generic;
using System.Text;
using static Tensorflow.Binding;

namespace Tensorflow
{
class MemoryTestingCases
{
/// <summary>
///
/// </summary>
public Action<int> Constant
=> (iterate) =>
{
for (int i = 0; i < iterate; i++)
{
var tensor = tf.constant(3112.0f);
}
};
public Action<int> Variable
=> (iterate) =>
{
for (int i = 0; i < iterate; i++)
{
var tensor = tf.Variable(3112.0f);
}
};

public Action<int> MathAdd
=> (iterate) =>
{
var x = tf.constant(3112.0f);
var y = tf.constant(3112.0f);

for (int i = 0; i < iterate; i++)
{
var z = x + y;
}
};

public Action<int> Gradient
=> (iterate) =>
{
for(int i = 0; i< iterate; i++)
{
var w = tf.constant(3112.0f);
using var tape = tf.GradientTape();
tape.watch(w);
var loss = w * w;
var grad = tape.gradient(loss, w);
}
};
}
}

+ 33
- 0
src/TensorFlowNET.Console/Program.cs View File

@@ -0,0 +1,33 @@
using System;

namespace Tensorflow
{
class Program
{
static void Main(string[] args)
{
// boot .net core 10.5M.
var mm = new MemoryMonitor();
// warm up tensorflow.net 28.5M.
mm.WarmUp();
var cases = new MemoryTestingCases();

int batchSize = 1000;

// 1 million float tensor 58.5M.
mm.Execute(10, 100 * batchSize, cases.Constant);

// 100K float variable 80.5M.
mm.Execute(10, 10 * batchSize, cases.Variable);

// 1 million math add 36.5M.
mm.Execute(10, 100 * batchSize, cases.MathAdd);

// 100K gradient 210M.
mm.Execute(10, 10 * batchSize, cases.Gradient);

Console.WriteLine("Finished.");
Console.ReadLine();
}
}
}

+ 18
- 0
src/TensorFlowNET.Console/TensorFlowNET.Console.csproj View File

@@ -0,0 +1,18 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp3.1</TargetFramework>
<RootNamespace>Tensorflow</RootNamespace>
<AssemblyName>Tensorflow</AssemblyName>
</PropertyGroup>

<ItemGroup>
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.2.0.1" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\TensorFlowNET.Core\Tensorflow.Binding.csproj" />
</ItemGroup>

</Project>

+ 2
- 2
src/TensorFlowNET.Core/APIs/c_api.cs View File

@@ -43,7 +43,7 @@ namespace Tensorflow
/// </summary> /// </summary>
public partial class c_api public partial class c_api
{ {
public const string TensorFlowLibName = "tensorflow";
public const string TensorFlowLibName = @"D:\SciSharp\tensorflow-google\bazel-bin\tensorflow\tensorflow.dll";


public static string StringPiece(IntPtr handle) public static string StringPiece(IntPtr handle)
{ {
@@ -51,7 +51,7 @@ namespace Tensorflow
} }


public delegate void Deallocator(IntPtr data, IntPtr size, ref DeallocatorArgs args); public delegate void Deallocator(IntPtr data, IntPtr size, ref DeallocatorArgs args);
public delegate void DeallocatorV2(IntPtr data, long size, IntPtr args);
public struct DeallocatorArgs public struct DeallocatorArgs
{ {
internal static unsafe c_api.DeallocatorArgs* EmptyPtr; internal static unsafe c_api.DeallocatorArgs* EmptyPtr;


+ 2
- 2
src/TensorFlowNET.Core/APIs/tf.gradients.cs View File

@@ -20,8 +20,8 @@ namespace Tensorflow
{ {
public partial class tensorflow public partial class tensorflow
{ {
public GradientActor GradientTape()
=> new GradientActor();
public GradientTape GradientTape()
=> new GradientTape();


public Tensor[] gradients(Tensor[] ys, public Tensor[] gradients(Tensor[] ys,
Tensor[] xs, Tensor[] xs,


+ 7
- 2
src/TensorFlowNET.Core/APIs/tf.math.cs View File

@@ -14,6 +14,7 @@
limitations under the License. limitations under the License.
******************************************************************************/ ******************************************************************************/


using Tensorflow.Eager;
using Tensorflow.Operations; using Tensorflow.Operations;


namespace Tensorflow namespace Tensorflow
@@ -259,7 +260,6 @@ namespace Tensorflow
public Tensor sub<Tx, Ty>(Tx a, Ty b, string name = null) public Tensor sub<Tx, Ty>(Tx a, Ty b, string name = null)
=> gen_math_ops.sub(a, b, name: name); => gen_math_ops.sub(a, b, name: name);



public Tensor divide(Tensor a, Tensor b) public Tensor divide(Tensor a, Tensor b)
=> a / b; => a / b;


@@ -348,6 +348,9 @@ namespace Tensorflow
public Tensor minimum<T1, T2>(T1 x, T2 y, string name = null) public Tensor minimum<T1, T2>(T1 x, T2 y, string name = null)
=> gen_math_ops.minimum(x, y, name: name); => gen_math_ops.minimum(x, y, name: name);


public Tensor multiply(Tensor x, Tensor y, string name = null)
=> gen_math_ops.mul(x, y, name: name);

/// <summary> /// <summary>
/// return x * y /// return x * y
/// </summary> /// </summary>
@@ -387,7 +390,7 @@ namespace Tensorflow
=> x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"); => x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y");


public Tensor pow<T1, T2>(T1 x, T2 y, string name = "pow") public Tensor pow<T1, T2>(T1 x, T2 y, string name = "pow")
=> gen_math_ops.pow(x, y, name: name);
=> math_ops.pow(x, y, name: name);


/// <summary> /// <summary>
/// Divides `x / y` elementwise, rounding toward the most negative integer. /// Divides `x / y` elementwise, rounding toward the most negative integer.
@@ -525,5 +528,7 @@ namespace Tensorflow


public Tensor square(Tensor x, string name = null) public Tensor square(Tensor x, string name = null)
=> gen_math_ops.square(x, name: name); => gen_math_ops.square(x, name: name);
public Tensor squared_difference(Tensor x, Tensor y, string name = null)
=> gen_math_ops.squared_difference(x: x, y: y, name: name);
} }
} }

+ 4
- 2
src/TensorFlowNET.Core/APIs/tf.nn.cs View File

@@ -116,6 +116,8 @@ namespace Tensorflow
public IActivation relu() => new relu(); public IActivation relu() => new relu();
public IActivation swish() => new swish(); public IActivation swish() => new swish();
public IActivation tanh() => new tanh(); public IActivation tanh() => new tanh();

public IActivation softmax() => new softmax();
public Tensor tanh(Tensor x, string name = null) public Tensor tanh(Tensor x, string name = null)
=> gen_nn_ops.tanh(x, name); => gen_nn_ops.tanh(x, name);


@@ -123,8 +125,8 @@ namespace Tensorflow
=> gen_nn_ops.relu(features, name); => gen_nn_ops.relu(features, name);


public Tensor[] fused_batch_norm(Tensor x, public Tensor[] fused_batch_norm(Tensor x,
VariableV1 scale,
VariableV1 offset,
IVariableV1 scale,
IVariableV1 offset,
Tensor mean = null, Tensor mean = null,
Tensor variance = null, Tensor variance = null,
float epsilon = 0.001f, float epsilon = 0.001f,


+ 27
- 16
src/TensorFlowNET.Core/APIs/tf.random.cs View File

@@ -18,22 +18,33 @@ namespace Tensorflow
{ {
public partial class tensorflow public partial class tensorflow
{ {
/// <summary>
/// Outputs random values from a normal distribution.
/// </summary>
/// <param name="shape"></param>
/// <param name="mean"></param>
/// <param name="stddev"></param>
/// <param name="dtype"></param>
/// <param name="seed"></param>
/// <param name="name"></param>
/// <returns></returns>
public Tensor random_normal(TensorShape shape,
float mean = 0.0f,
float stddev = 1.0f,
TF_DataType dtype = TF_DataType.TF_FLOAT,
int? seed = null,
string name = null) => random_ops.random_normal(shape, mean, stddev, dtype, seed, name);
public Random random => new Random();

public class Random
{
/// <summary>
/// Outputs random values from a normal distribution.
/// </summary>
/// <param name="shape"></param>
/// <param name="mean"></param>
/// <param name="stddev"></param>
/// <param name="dtype"></param>
/// <param name="seed"></param>
/// <param name="name"></param>
/// <returns></returns>
public Tensor normal(TensorShape shape,
float mean = 0.0f,
float stddev = 1.0f,
TF_DataType dtype = TF_DataType.TF_FLOAT,
int? seed = null,
string name = null) => random_ops.random_normal(shape, mean, stddev, dtype, seed, name);
public Tensor categorical(
Tensor logits,
int num_samples,
int? seed = null,
string name = null,
TF_DataType output_dtype = TF_DataType.DtInvalid) => random_ops.multinomial(logits, num_samples, seed: seed, name: name, output_dtype: output_dtype);
}


public Tensor random_uniform(TensorShape shape, public Tensor random_uniform(TensorShape shape,
float minval = 0, float minval = 0,


+ 4
- 4
src/TensorFlowNET.Core/APIs/tf.train.cs View File

@@ -38,8 +38,8 @@ namespace Tensorflow
public Optimizer GradientDescentOptimizer(Tensor learning_rate) public Optimizer GradientDescentOptimizer(Tensor learning_rate)
=> new GradientDescentOptimizer(learning_rate); => new GradientDescentOptimizer(learning_rate);


public Optimizer AdamOptimizer(float learning_rate, string name = "Adam")
=> new AdamOptimizer(learning_rate, name: name);
public Optimizer AdamOptimizer(float learning_rate, float epsilon = 1e-8f, string name = "Adam")
=> new AdamOptimizer(learning_rate, epsilon:epsilon, name: name);


public Optimizer AdamOptimizer(float learning_rate, TF_DataType dtype, string name = "Adam") public Optimizer AdamOptimizer(float learning_rate, TF_DataType dtype, string name = "Adam")
=> new AdamOptimizer(learning_rate, name: name, dtype: dtype); => new AdamOptimizer(learning_rate, name: name, dtype: dtype);
@@ -50,7 +50,7 @@ namespace Tensorflow
public ExponentialMovingAverage ExponentialMovingAverage(float decay) public ExponentialMovingAverage ExponentialMovingAverage(float decay)
=> new ExponentialMovingAverage(decay); => new ExponentialMovingAverage(decay);


public Saver Saver(VariableV1[] var_list = null, int max_to_keep = 5)
public Saver Saver(IVariableV1[] var_list = null, int max_to_keep = 5)
=> new Saver(var_list: var_list, max_to_keep: max_to_keep); => new Saver(var_list: var_list, max_to_keep: max_to_keep);


public string write_graph(Graph graph, string logdir, string name, bool as_text = true) public string write_graph(Graph graph, string logdir, string name, bool as_text = true)
@@ -68,7 +68,7 @@ namespace Tensorflow
clear_devices, clear_devices,
import_scope).Item1; import_scope).Item1;


public (MetaGraphDef, Dictionary<string, VariableV1>) export_meta_graph(string filename = "",
public (MetaGraphDef, Dictionary<string, IVariableV1>) export_meta_graph(string filename = "",
bool as_text = false, bool as_text = false,
bool clear_devices = false, bool clear_devices = false,
bool clear_extraneous_savers = false, bool clear_extraneous_savers = false,


+ 5
- 5
src/TensorFlowNET.Core/APIs/tf.variable.cs View File

@@ -21,9 +21,9 @@ namespace Tensorflow
{ {
public partial class tensorflow public partial class tensorflow
{ {
public VariableV1[] global_variables(string scope = null)
public IVariableV1[] global_variables(string scope = null)
{ {
return (ops.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) as List<VariableV1>)
return (ops.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) as List<IVariableV1>)
.ToArray(); .ToArray();
} }


@@ -33,7 +33,7 @@ namespace Tensorflow
/// <param name="var_list">List of `Variable` objects to initialize.</param> /// <param name="var_list">List of `Variable` objects to initialize.</param>
/// <param name="name">Optional name for the returned operation.</param> /// <param name="name">Optional name for the returned operation.</param>
/// <returns>An Op that run the initializers of all the specified variables.</returns> /// <returns>An Op that run the initializers of all the specified variables.</returns>
public Operation variables_initializer(VariableV1[] var_list, string name = "init")
public Operation variables_initializer(IVariableV1[] var_list, string name = "init")
=> variables.variables_initializer(var_list, name: name); => variables.variables_initializer(var_list, name: name);


public Operation global_variables_initializer() public Operation global_variables_initializer()
@@ -47,8 +47,8 @@ namespace Tensorflow
/// </summary> /// </summary>
/// <param name="scope"></param> /// <param name="scope"></param>
/// <returns></returns> /// <returns></returns>
public VariableV1[] trainable_variables(string scope = null)
=> (variables.trainable_variables() as List<VariableV1>).ToArray();
public IVariableV1[] trainable_variables(string scope = null)
=> (variables.trainable_variables() as List<IVariableV1>).ToArray();


public RefVariable get_variable(string name, public RefVariable get_variable(string name,
TensorShape shape = null, TensorShape shape = null,


+ 11
- 0
src/TensorFlowNET.Core/Binding.Util.cs View File

@@ -195,6 +195,17 @@ namespace Tensorflow
return (float)(DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalSeconds; return (float)(DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalSeconds;
} }


public static IEnumerable<(T1, T2)> zip<T1, T2>((T1, T1) t1, (T2, T2) t2)
{
for (int i = 0; i < 2; i++)
{
if (i == 0)
yield return (t1.Item1, t2.Item1);
else
yield return (t1.Item2, t2.Item2);
}
}

public static IEnumerable<(T, T)> zip<T>(NDArray t1, NDArray t2) public static IEnumerable<(T, T)> zip<T>(NDArray t1, NDArray t2)
where T : unmanaged where T : unmanaged
{ {


+ 52
- 0
src/TensorFlowNET.Core/Eager/EagerOperation.cs View File

@@ -0,0 +1,52 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Eager
{
public class EagerOperation : Operation
{
public int NumInputs;
public IntPtr[] InputHandles { get; set; }
public Tensor[] Inputs { get; set; }
public int NumOutputs;
public IntPtr[] OutputHandles { get; set; }
public Tensor[] Outputs { get; set; }
public int[] SkipInputIndices { get; set; }

public EagerOperation() : base(IntPtr.Zero) { }

public override InputList inputs
{
get
{
if (_inputs_val == null)
{
var retval = new Tensor[NumInputs];

for (int i = 0; i < NumInputs; i++)
{

}

_inputs_val = new InputList(Inputs);
}

return _inputs_val;
}
}

public override Tensor[] outputs
{
get
{
if (_outputs == null)
{
_outputs = Outputs;
}

return _outputs;
}
}
}
}

+ 73
- 0
src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs View File

@@ -0,0 +1,73 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.Text;
using static Tensorflow.Binding;

namespace Tensorflow.Eager
{
public partial class EagerTensor : Tensor
{
public EagerTensor() : base(IntPtr.Zero)
{
EagerTensorHandle = c_api.TFE_NewEagerTensor();
}

public EagerTensor(IntPtr handle) : base(IntPtr.Zero)
{
EagerTensorHandle = handle;
Resolve();
}

public EagerTensor(string value, string device_name) : base(value)
{
EagerTensorHandle = c_api.TFE_NewEagerTensor();
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle);
c_api.TFE_SetEagerTensorHandle(EagerTensorHandle, tfe_tensor_handle);
Resolve();
}
public EagerTensor(NDArray value, string device_name) : base(value)
{
EagerTensorHandle = c_api.TFE_NewEagerTensor();
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle);
c_api.TFE_SetEagerTensorHandle(EagerTensorHandle, tfe_tensor_handle);
Resolve();
}

public EagerTensor Resolve()
{
_id = c_api.TFE_EagerTensorId(EagerTensorHandle);

if (tfe_tensor_handle == IntPtr.Zero)
tfe_tensor_handle = c_api.TFE_EagerTensorHandle(EagerTensorHandle);

if (_handle == IntPtr.Zero)
_handle = c_api.TFE_TensorHandleResolve(tfe_tensor_handle, status.Handle);

/*print($"new Tensor {Id} {_handle.ToString("x16")}");
print($"new TensorHandle {Id} {tfe_tensor_handle.ToString("x16")}");
print($"new EagerTensor {Id} {EagerTensorHandle.ToString("x16")}");*/

GarbageCollector.Increase(_handle, GCItemType.TensorHandle);
GarbageCollector.Increase(tfe_tensor_handle, GCItemType.LocalTensorHandle);
GarbageCollector.Increase(EagerTensorHandle, GCItemType.EagerTensorHandle);

return this;
}

protected override void DisposeUnmanagedResources(IntPtr handle)
{
GarbageCollector.Decrease(_handle);
GarbageCollector.Decrease(tfe_tensor_handle);
GarbageCollector.Decrease(EagerTensorHandle);

/*print($"deleting DeleteTensorHandle {Id} {_handle.ToString("x16")}");
c_api.TF_DeleteTensor(_handle);
print($"deleting DeleteTensorHandle {Id} {tfe_tensor_handle.ToString("x16")}");
c_api.TFE_DeleteTensorHandle(tfe_tensor_handle);
print($"deleting DeleteEagerTensor {Id} {EagerTensorHandle.ToString("x16")}");
c_api.TFE_DeleteEagerTensor(EagerTensorHandle);*/
}
}
}

+ 2
- 2
src/TensorFlowNET.Core/Eager/EagerTensor.Implicit.cs View File

@@ -8,7 +8,7 @@ namespace Tensorflow.Eager
{ {
public partial class EagerTensor public partial class EagerTensor
{ {
public static explicit operator TFE_TensorHandle(EagerTensor tensor)
=> tensor.tfe_tensor_handle;
public static implicit operator IntPtr(EagerTensor tensor)
=> tensor.EagerTensorHandle;
} }
} }

+ 25
- 29
src/TensorFlowNET.Core/Eager/EagerTensor.cs View File

@@ -2,42 +2,34 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Text; using System.Text;
using static Tensorflow.Binding;


namespace Tensorflow.Eager namespace Tensorflow.Eager
{ {
public partial class EagerTensor : Tensor public partial class EagerTensor : Tensor
{ {
Status status = new Status(); Status status = new Status();
TFE_TensorHandle tfe_tensor_handle;
public EagerTensor(IntPtr handle) : base(handle)
{
tfe_tensor_handle = handle;
_handle = c_api.TFE_TensorHandleResolve(handle, status.Handle);
}
IntPtr tfe_tensor_handle;
public IntPtr EagerTensorHandle { get; set; }
public override string Device => c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(tfe_tensor_handle, status.Handle));


public EagerTensor(string value, string device_name) : base(value)
{
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle);
}
// public override int rank => c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, status);


public EagerTensor(int value, string device_name) : base(value)
public static int GetRank(IntPtr handle)
{ {
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle);
var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle);
using var status = new Status();
return c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, status.Handle);
} }


public EagerTensor(float[] value, string device_name) : base(value)
public static int[] GetDims(IntPtr handle)
{ {
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle);
}

public EagerTensor(double[] value, string device_name) : base(value)
{
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle);
}

public EagerTensor(NDArray value, string device_name) : base(value)
{
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle);
var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle);
using var status = new Status();
var dims = new int[c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, status.Handle)];
for (int i = 0; i < dims.Length; i++)
dims[i] = c_api.TFE_TensorHandleDim(tfe_tensor_handle, i, status.Handle);
return dims;
} }


public override string ToString() public override string ToString()
@@ -45,23 +37,27 @@ namespace Tensorflow.Eager
switch (rank) switch (rank)
{ {
case -1: case -1:
return $"tf.Tensor: shape=<unknown>, dtype={dtype.as_numpy_name()}, numpy={GetFormattedString()}";
return $"tf.Tensor: shape=<unknown>, dtype={dtype.as_numpy_name()}, numpy={GetFormattedString(dtype, numpy())}";
case 0: case 0:
return $"tf.Tensor: shape=(), dtype={dtype.as_numpy_name()}, numpy={GetFormattedString()}";
return $"tf.Tensor: shape=(), dtype={dtype.as_numpy_name()}, numpy={GetFormattedString(dtype, numpy())}";
default: default:
return $"tf.Tensor: shape=({string.Join(",", shape)}), dtype={dtype.as_numpy_name()}, numpy={GetFormattedString()}";
return $"tf.Tensor: shape=({string.Join(",", shape)}), dtype={dtype.as_numpy_name()}, numpy={GetFormattedString(dtype, numpy())}";
} }
} }


private string GetFormattedString()
public static string GetFormattedString(TF_DataType dtype, NDArray nd)
{ {
var nd = numpy();
if (nd.size == 0)
return "[]";

switch (dtype) switch (dtype)
{ {
case TF_DataType.TF_STRING: case TF_DataType.TF_STRING:
return $"b'{(string)nd}'"; return $"b'{(string)nd}'";
case TF_DataType.TF_BOOL: case TF_DataType.TF_BOOL:
return (nd.GetByte(0) > 0).ToString(); return (nd.GetByte(0) > 0).ToString();
case TF_DataType.TF_RESOURCE:
return "<unprintable>";
default: default:
return nd.ToString(); return nd.ToString();
} }


+ 17
- 14
src/TensorFlowNET.Core/Eager/Execute.cs View File

@@ -27,18 +27,26 @@ namespace Tensorflow.Eager
/// <param name="ctx">The value of context.context().</param> /// <param name="ctx">The value of context.context().</param>
/// <param name="name">Customized name for the operation.</param> /// <param name="name">Customized name for the operation.</param>
/// <returns>List of output Tensor objects. The list is empty if there are no outputs</returns> /// <returns>List of output Tensor objects. The list is empty if there are no outputs</returns>
public Tensor execute(Context ctx, string op_name, Tensor[] inputs, object[] attrs, string name = null)
public EagerTensor[] execute(Context ctx, string op_name, int num_outputs,
EagerTensor[] inputs, object[] attrs,
string name = null)
{ {
ctx.ensure_initialized(); ctx.ensure_initialized();
using (var status = new Status())
{
var retVals = wrap_tfe_src.TFE_Execute(ctx, ctx.device_name, op_name, inputs, attrs, 1, status);


return new EagerTensor(retVals[0]);
}
var results = Enumerable.Range(0, num_outputs).Select(x => new EagerTensor()).ToArray();
using Status status = new Status(c_api.TFE_QuickExecute(ctx,
ctx.device_name,
op_name,
inputs.Select(x => x.EagerTensorHandle).ToArray(),
inputs.Length,
op => wrap_tfe_src.SetOpAttrs(op, attrs),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);

return results.Select(x => x.Resolve()).ToArray();
} }


public (TF_DataType, Tensor[]) args_to_matching_eager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null)
public (TF_DataType, EagerTensor[]) args_to_matching_eager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null)
{ {
if (args.Length == 0 && default_dtype != TF_DataType.DtInvalid) if (args.Length == 0 && default_dtype != TF_DataType.DtInvalid)
return (default_dtype, null); return (default_dtype, null);
@@ -55,10 +63,10 @@ namespace Tensorflow.Eager


if (dtype == TF_DataType.DtInvalid) if (dtype == TF_DataType.DtInvalid)
{ {
var ret = new List<Tensor>();
var ret = new List<EagerTensor>();
foreach (var t in args) foreach (var t in args)
{ {
ret.Add(ops.convert_to_tensor(t, dtype, preferred_dtype: default_dtype, ctx: ctx));
ret.Add(ops.convert_to_tensor(t, dtype, preferred_dtype: default_dtype, ctx: ctx) as EagerTensor);
if (dtype == TF_DataType.DtInvalid) if (dtype == TF_DataType.DtInvalid)
dtype = ret.Last().dtype; dtype = ret.Last().dtype;
} }
@@ -68,10 +76,5 @@ namespace Tensorflow.Eager
else else
throw new NotImplementedException(""); throw new NotImplementedException("");
} }

public void record_gradient(string op_name, InputList inputs, Dictionary<string, object> attrs, Tensor[] results, string name = null)
{
wrap_tfe_src.RecordGradient(op_name, inputs._inputs, attrs, results, name);
}
} }
} }

+ 2
- 6
src/TensorFlowNET.Core/Eager/TFE_TensorHandle.cs View File

@@ -1,19 +1,15 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Text; using System.Text;


namespace Tensorflow.Eager namespace Tensorflow.Eager
{ {
[StructLayout(LayoutKind.Sequential)]
public struct TFE_TensorHandle public struct TFE_TensorHandle
{ {
IntPtr _handle; IntPtr _handle;


public TFE_TensorHandle(IntPtr handle)
=> _handle = handle;

public static implicit operator TFE_TensorHandle(IntPtr handle)
=> new TFE_TensorHandle(handle);

public static implicit operator IntPtr(TFE_TensorHandle tensor) public static implicit operator IntPtr(TFE_TensorHandle tensor)
=> tensor._handle; => tensor._handle;




+ 139
- 5
src/TensorFlowNET.Core/Eager/c_api.eager.cs View File

@@ -7,6 +7,46 @@ namespace Tensorflow
{ {
public partial class c_api public partial class c_api
{ {
[DllImport(TensorFlowLibName)]
public static extern void TFE_RegisterGradientFunction(gradient_function_callback gradientFunctionCallback,
delete_backward_function_callback deleteBackwardFunctionCallback);

/// <summary>
///
/// </summary>
/// <param name="op_name"></param>
/// <param name="op_inputs"></param>
/// <param name="op_outputs"></param>
/// <param name="num_attrs"></param>
/// <param name="output_grads">previous node ouput</param>
/// <param name="skip_input_indices"></param>
/// <returns></returns>
[UnmanagedFunctionPointer(CallingConvention.StdCall)]
public delegate IntPtr gradient_function_callback(string op_name,
IntPtr op_inputs,
IntPtr op_outputs,
int num_attrs,
IntPtr output_grads,
IntPtr skip_input_indices);

[UnmanagedFunctionPointer(CallingConvention.StdCall)]
public delegate void delete_backward_function_callback(string op_name,
IntPtr op_inputs,
IntPtr op_outputs);

[DllImport(TensorFlowLibName)]
public static extern IntPtr TFE_WrapGradientResult(IntPtr[] gradients, int num_gradients);

[DllImport(TensorFlowLibName)]
public static extern IntPtr VSpace_Handle(VSpace_callback_Ones ones, VSpace_callback_AggregateGrads aggregate_grads);
[UnmanagedFunctionPointer(CallingConvention.StdCall)]
public delegate IntPtr VSpace_callback_Ones(long[] shape, int dims, TF_DataType dtype);
[UnmanagedFunctionPointer(CallingConvention.StdCall)]
public delegate IntPtr VSpace_callback_AggregateGrads(TF_BindingArray gradients);

[DllImport(TensorFlowLibName)]
public static extern void TFE_RegisterVSpace(IntPtr vspace);
/// <summary> /// <summary>
/// Return a new options object. /// Return a new options object.
/// </summary> /// </summary>
@@ -102,14 +142,20 @@ namespace Tensorflow
public static extern TFE_Op TFE_NewOp(IntPtr ctx, string op_or_function_name, SafeStatusHandle status); public static extern TFE_Op TFE_NewOp(IntPtr ctx, string op_or_function_name, SafeStatusHandle status);


/// <summary> /// <summary>
///
/// Resets `op_to_reset` with `op_or_function_name` and `raw_device_name`. This
/// is for performance optimization by reusing an exiting unused op rather than
/// creating a new op every time. If `raw_device_name` is `NULL` or empty, it
/// does not set the device name. If it's not `NULL`, then it attempts to parse
/// and set the device name. It's effectively `TFE_OpSetDevice`, but it is faster
/// than separately calling it because if the existing op has the same
/// `raw_device_name`, it skips parsing and just leave as it is.
/// </summary> /// </summary>
/// <param name="ctx">TFE_Context*</param>
/// <param name="op_to_reset">TFE_Op*</param>
/// <param name="op_or_function_name">const char*</param> /// <param name="op_or_function_name">const char*</param>
/// <param name="raw_device_name">const char*</param>
/// <param name="status">TF_Status*</param> /// <param name="status">TF_Status*</param>
/// <param name="op_to_reset">TFE_Op*</param>
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern void TFE_OpReset(IntPtr ctx, string op_or_function_name, SafeStatusHandle status, IntPtr op_to_reset);
public static extern void TFE_OpReset(IntPtr op_to_reset, string op_or_function_name, string raw_device_name, SafeStatusHandle status);


/// <summary> /// <summary>
/// ///
@@ -180,6 +226,18 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern TFE_TensorHandle TFE_NewTensorHandle(IntPtr t, SafeStatusHandle status); public static extern TFE_TensorHandle TFE_NewTensorHandle(IntPtr t, SafeStatusHandle status);


[DllImport(TensorFlowLibName)]
public static extern IntPtr TFE_EagerTensorHandle(IntPtr t);

[DllImport(TensorFlowLibName)]
public static extern int TFE_EagerTensorId(IntPtr t);

[DllImport(TensorFlowLibName)]
public static extern IntPtr TFE_NewEagerTensor();

[DllImport(TensorFlowLibName)]
public static extern void TFE_SetEagerTensorHandle(IntPtr tensor, IntPtr handle);

/// <summary> /// <summary>
/// Sets the default execution mode (sync/async). Note that this can be /// Sets the default execution mode (sync/async). Note that this can be
/// overridden per thread using TFE_ContextSetExecutorForThread. /// overridden per thread using TFE_ContextSetExecutorForThread.
@@ -206,7 +264,8 @@ namespace Tensorflow
/// <param name="status">TF_Status*</param> /// <param name="status">TF_Status*</param>
/// <returns></returns> /// <returns></returns>
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern TF_Tensor TFE_TensorHandleResolve(IntPtr h, SafeStatusHandle status);
public static extern IntPtr TFE_TensorHandleResolve(IntPtr h, SafeStatusHandle status);



/// <summary> /// <summary>
/// This function will block till the operation that produces `h` has completed. /// This function will block till the operation that produces `h` has completed.
@@ -217,6 +276,9 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern int TFE_TensorHandleNumDims(IntPtr h, SafeStatusHandle status); public static extern int TFE_TensorHandleNumDims(IntPtr h, SafeStatusHandle status);


[DllImport(TensorFlowLibName)]
public static extern int TFE_TensorHandleDim(IntPtr h, int dim, SafeStatusHandle status);

/// <summary> /// <summary>
/// Returns the device of the operation that produced `h`. If `h` was produced by /// Returns the device of the operation that produced `h`. If `h` was produced by
/// a copy, returns the destination device of the copy. Note that the returned /// a copy, returns the destination device of the copy. Note that the returned
@@ -255,6 +317,19 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern void TFE_DeleteTensorHandle(IntPtr h); public static extern void TFE_DeleteTensorHandle(IntPtr h);


/// <summary>
///
/// </summary>
/// <param name="h">TFE_TensorHandle*</param>
[DllImport(TensorFlowLibName)]
public static extern void TFE_DeleteEagerTensor(IntPtr h);

[DllImport(TensorFlowLibName)]
public static extern void TF_DeleteBindingArray(IntPtr h);

[DllImport(TensorFlowLibName)]
public static extern void TFE_DeleteBindingTensorArray(IntPtr h);

/// <summary> /// <summary>
/// Creates a new eager Executor. Nodes in one executor are guaranteed to be /// Creates a new eager Executor. Nodes in one executor are guaranteed to be
/// executed in sequence. Assigning nodes to different executors allows executing /// executed in sequence. Assigning nodes to different executors allows executing
@@ -304,5 +379,64 @@ namespace Tensorflow
/// <returns>TFE_Executor*</returns> /// <returns>TFE_Executor*</returns>
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern TFE_Executor TFE_ContextGetExecutorForThread(IntPtr ctx); public static extern TFE_Executor TFE_ContextGetExecutorForThread(IntPtr ctx);

/// <summary>
///
/// </summary>
/// <param name="ctx"></param>
/// <param name="device_name"></param>
/// <param name="op_name"></param>
/// <param name="name"></param>
/// <param name="args"></param>
/// <param name="input_size"></param>
/// <param name="set_op_attrs"></param>
/// <param name="status"></param>
/// <returns>EagerTensorHandle</returns>
[DllImport(TensorFlowLibName)]
public static extern SafeStatusHandle TFE_FastPathExecute(IntPtr ctx,
string device_name,
string op_name,
string name,
IntPtr[] inputs,
int input_size,
TFE_FastPathExecute_SetOpAttrs set_op_attrs,
IntPtr[] outputs,
int output_size);
[UnmanagedFunctionPointer(CallingConvention.StdCall)]
public delegate void TFE_FastPathExecute_SetOpAttrs(IntPtr op);

[DllImport(TensorFlowLibName)]
public static extern SafeStatusHandle TFE_QuickExecute(IntPtr ctx,
string device_name,
string op_name,
IntPtr[] inputs,
int input_size,
TFE_FastPathExecute_SetOpAttrs set_op_attrs,
IntPtr[] outputs,
int output_size);

[DllImport(TensorFlowLibName)]
public static extern IntPtr TFE_TapeSetNew(bool persistent, bool watch_accessed_variables);

[DllImport(TensorFlowLibName)]
public static extern void TFE_TapeSetRemove(IntPtr tape);

[DllImport(TensorFlowLibName)]
public static extern void TFE_TapeWatch(IntPtr tape, IntPtr variable);

[DllImport(TensorFlowLibName)]
public static extern void TFE_TapeVariableAccessed(IntPtr variable);

[DllImport(TensorFlowLibName)]
public static extern IntPtr TFE_TapeWatchedVariables(IntPtr tape);

[DllImport(TensorFlowLibName)]
public static extern IntPtr ResourceVariable_Handle(IntPtr variable);

[DllImport(TensorFlowLibName)]
public static extern SafeStatusHandle TFE_TapeGradient(IntPtr tape,
IntPtr[] target, int target_size,
IntPtr[] sources, int source_size,
IntPtr[] outputs, int output_size);
} }
} }

+ 0
- 33
src/TensorFlowNET.Core/Eager/wrap_tfe_src.RecordGradient.cs View File

@@ -1,33 +0,0 @@
using System.Collections.Generic;
using System.Linq;
using System;
using Tensorflow.Gradients;

namespace Tensorflow.Eager
{
/// <summary>
/// python\eager\pywrap_tfe_src.cc
/// </summary>
public partial class wrap_tfe_src
{
public static void RecordGradient(string op_name, Tensor[] inputs, Dictionary<string, object> attrs, Tensor[] results, string name = null)
{
var input_ids = inputs.Select(x => x.Id).ToArray();
var input_dtypes = inputs.Select(x => x.dtype).ToArray();

bool should_record = false;
foreach (var input_dtype in input_dtypes)
{
if (Tape.IsDtypeTrainable(input_dtype.as_datatype_enum()))
{
should_record = true;
break;
}
}
if (!should_record) return;

var op_outputs = results;
var op_inputs = inputs;
}
}
}

+ 0
- 62
src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_Execute.cs View File

@@ -1,62 +0,0 @@
using System.Collections.Generic;
using System.Linq;
using System;
using static Tensorflow.OpDef.Types;

namespace Tensorflow.Eager
{
/// <summary>
/// python\eager\pywrap_tfe_src.cc
/// </summary>
public partial class wrap_tfe_src
{
public static IntPtr[] TFE_Execute(Context ctx,
string device_name,
string op_name,
Tensor[] inputs,
object[] attrs,
int num_outputs,
Status status)
=> TFE_ExecuteCancelable(ctx, device_name, op_name, inputs, attrs, num_outputs, status);

public static IntPtr[] TFE_ExecuteCancelable(Context ctx,
string device_name,
string op_name,
Tensor[] inputs,
object[] attrs,
int num_outputs,
Status status)
{
var op = GetOp(ctx, op_name, status);
status.Check(true);
c_api.TFE_OpSetDevice(op, device_name, status.Handle);
if(status.ok())
{
for (int i = 0; i < inputs.Length; ++i)
{
TFE_TensorHandle tensor_handle;
switch (inputs[i])
{
case EagerTensor et:
tensor_handle = (TFE_TensorHandle)et;
break;
default:
tensor_handle = c_api.TFE_NewTensorHandle(inputs[i], status.Handle);
break;
}
c_api.TFE_OpAddInput(op, tensor_handle, status.Handle);
}
}
if (status.ok())
SetOpAttrs(ctx, op, attrs, 0, status);

var outputs = new IntPtr[num_outputs];
if (status.ok())
{
c_api.TFE_Execute(op, outputs, ref num_outputs, status.Handle);
status.Check(true);
}
return outputs;
}
}
}

+ 11
- 170
src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_FastPathExecute.cs View File

@@ -2,6 +2,7 @@
using System.Linq; using System.Linq;
using System; using System;
using static Tensorflow.OpDef.Types; using static Tensorflow.OpDef.Types;
using static Tensorflow.Binding;


namespace Tensorflow.Eager namespace Tensorflow.Eager
{ {
@@ -10,183 +11,23 @@ namespace Tensorflow.Eager
/// </summary> /// </summary>
public partial class wrap_tfe_src public partial class wrap_tfe_src
{ {
static int kFastPathExecuteInputStartIndex = 0;
public static EagerTensor TFE_FastPathExecute(Context ctx,
string device_name,
string opName,
string name,
Action callbacks,
params object[] args)
{
int args_size = args.Length;
var attr_list_sizes = new Dictionary<string, long>();
using (var status = new Status())
{
var op = GetOp(ctx, opName, status);

var op_def = Graph.TFE_GetOpDef(opName);

// Set non-inferred attrs, including setting defaults if the attr is passed in
// as None.
for (int i = kFastPathExecuteInputStartIndex + op_def.InputArg.Count; i < args_size; i += 2)
{
var attr_name = args[i].ToString();
var attr_value = args[i + 1];

foreach(var attr in op_def.Attr)
{
if(attr_name == attr.Name)
{
SetOpAttrWithDefaults(ctx, op, attr, attr_name, attr_value, attr_list_sizes, status);
status.Check(true);
break;
}
}
}

c_api.TFE_OpSetDevice(op, device_name, status.Handle);
status.Check(true);

// Add inferred attrs and inputs.
for (int i = 0; i < op_def.InputArg.Count; i++)
{
var input_arg = op_def.InputArg[i];
if (!string.IsNullOrEmpty(input_arg.NumberAttr))
{
int len = (args[kFastPathExecuteInputStartIndex + i] as object[]).Length;
c_api.TFE_OpSetAttrInt(op, input_arg.NumberAttr, len);
attr_list_sizes[input_arg.NumberAttr] = len;

if (len > 0)
{
var fast_input_array = (object[])args[i];
// First item adds the type attr.
if (!AddInputToOp(fast_input_array[i], true, input_arg, op, status))
return null;

for (var j = 1; j < len; j++)
{
// Since the list is homogeneous, we don't need to re-add the attr.
if (!AddInputToOp(fast_input_array[j], false, input_arg, op, status))
return null;
}
}
}
else if (!string.IsNullOrEmpty(input_arg.TypeListAttr))
{

}
else
{
// The item is a single item.
AddInputToOp(args[i], true, input_arg, op, status);
}
}

int num_retvals = 0;
for (int i = 0; i < op_def.OutputArg.Count; i++)
{
var output_arg = op_def.OutputArg[i];
var delta = 1L;
if (!string.IsNullOrEmpty(output_arg.NumberAttr))
delta = attr_list_sizes[output_arg.NumberAttr];
else if (!string.IsNullOrEmpty(output_arg.TypeListAttr))
delta = attr_list_sizes[output_arg.TypeListAttr];
if(delta < 0)
throw new RuntimeError("Attributes suggest that the size of an output list is less than 0");
num_retvals += (int)delta;
}

var retVals = new IntPtr[num_retvals];
c_api.TFE_Execute(op, retVals, ref num_retvals, status.Handle);
status.Check(true);

return num_retvals == 0 ? null : new EagerTensor(retVals[0]);
}
}

private static TFE_Op GetOp(Context ctx, string op_or_function_name, Status status)
{
var maybe_op = ReleaseThreadLocalOp();
if (maybe_op != IntPtr.Zero)
{
c_api.TFE_OpReset(ctx, op_or_function_name, status.Handle, maybe_op);
}
else
{
maybe_op = c_api.TFE_NewOp(ctx, op_or_function_name, status.Handle);
op = maybe_op;
}

status.Check(true);
return maybe_op;
}

static TFE_Op op;
private static TFE_Op ReleaseThreadLocalOp()
{
return op;
}

/// <summary>
/// Adds input and type attr to the op, and to the list of flattened
/// inputs/attrs.
/// </summary>
/// <param name="inputs"></param>
/// <param name="add_type_attr"></param>
/// <param name="input_arg"></param>
/// <param name="op"></param>
/// <param name="status"></param>
/// <returns></returns>
private static bool AddInputToOp(object inputs,
bool add_type_attr,
ArgDef input_arg,
IntPtr op,
Status status)
{
TFE_TensorHandle input_handle;

// ConvertToTensor();
switch (inputs)
{
case EagerTensor input:
input_handle = (TFE_TensorHandle)input;
break;
case EagerTensor[] input_list:
input_handle = (TFE_TensorHandle)input_list[0];
break;
default:
throw new NotImplementedException("");
}

if(add_type_attr && !string.IsNullOrEmpty(input_arg.TypeAttr))
{
var dtype = c_api.TFE_TensorHandleDataType(input_handle);
c_api.TFE_OpSetAttrType(op, input_arg.TypeAttr, dtype);
}

c_api.TFE_OpAddInput(op, input_handle, status.Handle);
status.Check(true);

return true;
}

private static void SetOpAttrs(Context ctx, TFE_Op op, object[] attrs, int start_index, Status out_status)
public static void SetOpAttrs(TFE_Op op, params object[] attrs)
{ {
using var status = new Status();
var len = attrs.Length; var len = attrs.Length;
for (int i = 0; i < len; i += 2) for (int i = 0; i < len; i += 2)
{ {
var key = attrs[start_index + i].ToString();
var value = attrs[start_index + i + 1];
var key = attrs[i].ToString();
var value = attrs[i + 1];


byte is_list = 0;
var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, out_status.Handle);
if (!out_status.ok()) return;
byte is_list = 0;
var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, status.Handle);
if (!status.ok()) return;
if (is_list != 0) if (is_list != 0)
SetOpAttrList(ctx, op, key, value, type, null, out_status);
SetOpAttrList(tf.context, op, key, value, type, null, status);
else else
SetOpAttrScalar(ctx, op, key, value, type, null, out_status);
out_status.Check(true);
SetOpAttrScalar(tf.context, op, key, value, type, null, status);
status.Check(true);
} }
} }




+ 11
- 11
src/TensorFlowNET.Core/Framework/meta_graph.cs View File

@@ -35,7 +35,7 @@ namespace Tensorflow
return meta_graph_def; return meta_graph_def;
} }


public static (Dictionary<string, VariableV1>, ITensorOrOperation[]) import_scoped_meta_graph_with_return_elements(MetaGraphDef meta_graph_or_file,
public static (Dictionary<string, IVariableV1>, ITensorOrOperation[]) import_scoped_meta_graph_with_return_elements(MetaGraphDef meta_graph_or_file,
bool clear_devices = false, bool clear_devices = false,
string import_scope = "", string import_scope = "",
Dictionary<string, Tensor> input_map = null, Dictionary<string, Tensor> input_map = null,
@@ -77,7 +77,7 @@ namespace Tensorflow
return_elements: return_elements); return_elements: return_elements);


// Restores all the other collections. // Restores all the other collections.
var variable_objects = new Dictionary<ByteString, VariableV1>();
var variable_objects = new Dictionary<ByteString, IVariableV1>();
foreach (var col in meta_graph_def.CollectionDef.OrderBy(x => x.Key)) foreach (var col in meta_graph_def.CollectionDef.OrderBy(x => x.Key))
{ {
// Don't add unbound_inputs to the new graph. // Don't add unbound_inputs to the new graph.
@@ -99,7 +99,7 @@ namespace Tensorflow
{ {
foreach (var value in col.Value.BytesList.Value) foreach (var value in col.Value.BytesList.Value)
{ {
VariableV1 variable = null;
IVariableV1 variable = null;
if (!variable_objects.ContainsKey(value)) if (!variable_objects.ContainsKey(value))
{ {
var proto = VariableDef.Parser.ParseFrom(value); var proto = VariableDef.Parser.ParseFrom(value);
@@ -147,10 +147,10 @@ namespace Tensorflow
} }
} }


var variables = graph.get_collection<VariableV1>(tf.GraphKeys.GLOBAL_VARIABLES,
var variables = graph.get_collection<IVariableV1>(tf.GraphKeys.GLOBAL_VARIABLES,
scope: scope_to_prepend_to_names); scope: scope_to_prepend_to_names);
var var_list = new Dictionary<string, VariableV1>();
variables.ForEach(v => var_list[ops.strip_name_scope(v.name, scope_to_prepend_to_names)] = v);
var var_list = new Dictionary<string, IVariableV1>();
variables.ForEach(v => var_list[ops.strip_name_scope(v.Name, scope_to_prepend_to_names)] = v);


return (var_list, imported_return_elements); return (var_list, imported_return_elements);
} }
@@ -168,7 +168,7 @@ namespace Tensorflow
/// <param name="strip_default_attrs"></param> /// <param name="strip_default_attrs"></param>
/// <param name="meta_info_def"></param> /// <param name="meta_info_def"></param>
/// <returns></returns> /// <returns></returns>
public static (MetaGraphDef, Dictionary<string, VariableV1>) export_scoped_meta_graph(string filename = "",
public static (MetaGraphDef, Dictionary<string, IVariableV1>) export_scoped_meta_graph(string filename = "",
GraphDef graph_def = null, GraphDef graph_def = null,
bool as_text = false, bool as_text = false,
string unbound_inputs_col_name = "unbound_inputs", string unbound_inputs_col_name = "unbound_inputs",
@@ -180,14 +180,14 @@ namespace Tensorflow
{ {
var graph = ops.get_default_graph(); var graph = ops.get_default_graph();


var var_list = new Dictionary<string, VariableV1>();
var variables = graph.get_collection<VariableV1>(tf.GraphKeys.GLOBAL_VARIABLES);
var var_list = new Dictionary<string, IVariableV1>();
var variables = graph.get_collection<IVariableV1>(tf.GraphKeys.GLOBAL_VARIABLES);


if (variables != null) if (variables != null)
{ {
foreach (var v in variables) foreach (var v in variables)
{ {
var_list[v.name] = v;
var_list[v.Name] = v;
} }
} }


@@ -268,7 +268,7 @@ namespace Tensorflow


switch (graph.get_collection(key)) switch (graph.get_collection(key))
{ {
case List<VariableV1> collection_list:
case List<IVariableV1> collection_list:
col_def.BytesList = new Types.BytesList(); col_def.BytesList = new Types.BytesList();
foreach (var x in collection_list) foreach (var x in collection_list)
{ {


+ 0
- 63
src/TensorFlowNET.Core/Gradients/GradientActor.cs View File

@@ -1,63 +0,0 @@
using System;
using System.Collections.Generic;
using System.Text;
using static Tensorflow.Binding;

namespace Tensorflow.Gradients
{
/// <summary>
/// Record operations for automatic differentiation.
///
/// Operations are recorded if they are executed within this context manager and
/// at least one of their inputs is being "watched".
///
/// Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,
/// where `trainable=True` is default in both cases) are automatically watched.
/// Tensors can be manually watched by invoking the `watch` method on this context
/// manager.
/// </summary>
public class GradientActor : IDisposable
{
bool _recording;
bool _persistent;
bool _watch_accessed_variables;
bool _created_eagerly;
Tape _tape;
int tape_nesting_id_counter = 0;

public GradientActor(bool persistent = false,
bool watch_accessed_variables = true)
{
_persistent = persistent;
_watch_accessed_variables = watch_accessed_variables;
_created_eagerly = tf.context.executing_eagerly();
_push_tape();
}

private void _push_tape()
{
if (_recording)
throw new ValueError("Tape is still recording, This can happen if you try to " +
"re-enter an already-active tape.");

if (_tape == null)
{
_tape = new Tape();
_tape.tape = new GradientTape(_persistent, _watch_accessed_variables);
_tape.nesting_id = tape_nesting_id_counter++;
}

_recording = true;
}

public void watch(Tensor x)
{

}

public void Dispose()
{
}
}
}

+ 99
- 2
src/TensorFlowNET.Core/Gradients/GradientTape.cs View File

@@ -1,6 +1,10 @@
using System;
using Google.Protobuf.WellKnownTypes;
using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text; using System.Text;
using Tensorflow.Eager;
using static Tensorflow.Binding; using static Tensorflow.Binding;


namespace Tensorflow.Gradients namespace Tensorflow.Gradients
@@ -16,16 +20,109 @@ namespace Tensorflow.Gradients
/// Tensors can be manually watched by invoking the `watch` method on this context /// Tensors can be manually watched by invoking the `watch` method on this context
/// manager. /// manager.
/// </summary> /// </summary>
public class GradientTape
public class GradientTape : IDisposable
{ {
static bool _recording;
public static bool Recording => _recording;
bool _persistent; bool _persistent;
bool _watch_accessed_variables; bool _watch_accessed_variables;
ResourceVariable[] _watched_variables;
bool _created_eagerly;
Tape _tape;


public GradientTape(bool persistent = false, public GradientTape(bool persistent = false,
bool watch_accessed_variables = true) bool watch_accessed_variables = true)
{ {
_persistent = persistent; _persistent = persistent;
_watch_accessed_variables = watch_accessed_variables; _watch_accessed_variables = watch_accessed_variables;
_created_eagerly = tf.context.executing_eagerly();
_push_tape();
}

private void _push_tape()
{
if (_recording)
throw new ValueError("Tape is still recording, This can happen if you try to " +
"re-enter an already-active tape.");

if (_tape == null)
_tape = new Tape(_persistent, _watch_accessed_variables);
else
throw new NotImplementedException("");

_recording = true;
}

private void _pop_tape()
{
if (!_recording)
throw new ValueError("Tape is not recording.");
_tape.pop_tape(_tape);
_recording = false;
}

/// <summary>
/// Marks this tensor to be watched by the given tape.
/// </summary>
/// <param name="x"></param>
public void watch(Tensor x)
{
_tape.watch(x as EagerTensor);
}

public Tensor gradient(Tensor target, Tensor source)
{
if(_recording)
{
if (!_persistent)
_pop_tape();
}

var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_TapeGradient(_tape,
new [] { (target as EagerTensor).EagerTensorHandle }, 1,
new [] { (source as EagerTensor).EagerTensorHandle }, 1,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

public unsafe (Tensor, Tensor) gradient(Tensor target, (ResourceVariable, ResourceVariable) sources)
{
if (_recording)
{
if (!_persistent)
_pop_tape();
}

var results = new[] { new EagerTensor(), new EagerTensor() };
using Status status = new Status(c_api.TFE_TapeGradient(_tape,
new IntPtr[]
{
target as EagerTensor
}, 1,
new IntPtr[]
{
(sources.Item1.Handle as EagerTensor).EagerTensorHandle,
(sources.Item2.Handle as EagerTensor).EagerTensorHandle
}, 2,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);

if (!_persistent)
{
// Keep track of watched variables before setting tape to None
_watched_variables = _tape.watched_variables();
_tape = null;
}

return (results[0].Resolve(), results[1].Resolve());
}

public void Dispose()
{
if (_recording)
_pop_tape();
} }
} }
} }

+ 30
- 0
src/TensorFlowNET.Core/Gradients/RegisterGradientEager.cs View File

@@ -0,0 +1,30 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;

namespace Tensorflow.Gradients
{
public class RegisterGradientEager : Attribute
{
public string Name { get; set; }

public RegisterGradientEager(string name)
{
Name = name;
}
}
}

+ 43
- 2
src/TensorFlowNET.Core/Gradients/Tape.cs View File

@@ -1,14 +1,48 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text; using System.Text;
using Tensorflow.Eager;


namespace Tensorflow.Gradients namespace Tensorflow.Gradients
{ {
public class Tape
public class Tape : DisposableObject
{ {
public GradientTape tape { get; set; }
public int nesting_id { get; set; } public int nesting_id { get; set; }


public Tape(bool persistent, bool watch_accessed_variables)
{
_handle = c_api.TFE_TapeSetNew(persistent, watch_accessed_variables);
}

public void watch(EagerTensor x)
{
c_api.TFE_TapeWatch(_handle, x.EagerTensorHandle);
}

public void pop_tape(Tape tape)
{
c_api.TFE_TapeSetRemove(tape);
}

public static void variable_accessed(ResourceVariable variable)
{
c_api.TFE_TapeVariableAccessed(variable);
}

public unsafe ResourceVariable[] watched_variables()
{
BindingArray result = c_api.TFE_TapeWatchedVariables(_handle);
var variables = result.Data.Select(x =>
{
var tensor = c_api.ResourceVariable_Handle(x);
return new ResourceVariable(x, tensor);
}).ToArray();

return variables;
}

public static bool IsDtypeTrainable(DataType dtype) public static bool IsDtypeTrainable(DataType dtype)
{ {
switch (dtype) switch (dtype)
@@ -26,5 +60,12 @@ namespace Tensorflow.Gradients
return false; return false;
} }
} }

protected override void DisposeUnmanagedResources(IntPtr handle)
{
}

public static implicit operator IntPtr(Tape tape)
=> tape._handle;
} }
} }

+ 1
- 1
src/TensorFlowNET.Core/Gradients/control_flow_grad.cs View File

@@ -191,7 +191,7 @@ namespace Tensorflow.Gradients


grad_ctxt.Enter(); grad_ctxt.Enter();
var result = control_flow_ops._Enter( var result = control_flow_ops._Enter(
grad, grad_ctxt.name, is_constant: false,
grad, grad_ctxt.Name, is_constant: false,
parallel_iterations: grad_ctxt.parallel_iterations, parallel_iterations: grad_ctxt.parallel_iterations,
name: "b_exit"); name: "b_exit");




+ 100
- 16
src/TensorFlowNET.Core/Gradients/math_grad.cs View File

@@ -14,8 +14,10 @@
limitations under the License. limitations under the License.
******************************************************************************/ ******************************************************************************/


using NumSharp;
using System; using System;
using System.Linq; using System.Linq;
using Tensorflow.Eager;
using Tensorflow.Operations; using Tensorflow.Operations;
using static Tensorflow.Binding; using static Tensorflow.Binding;


@@ -168,10 +170,28 @@ namespace Tensorflow.Gradients
var x = op.inputs[0]; var x = op.inputs[0];
var y = op.inputs[1]; var y = op.inputs[1];
var grad = grads[0]; var grad = grads[0];
if (grad is Tensor &&

if (op is EagerOperation op_eager &&
op_eager.SkipInputIndices.Contains(1) &&
y.NDims == 0)
{
return new Tensor[]
{
gen_math_ops.mul(grad, math_ops.conj(y)),
null
};
}

if (grad is Tensor &&
_ShapesFullySpecifiedAndEqual(x, y, grad) && _ShapesFullySpecifiedAndEqual(x, y, grad) &&
new TF_DataType[] { tf.int32, tf.float32 }.Contains(grad.dtype)) new TF_DataType[] { tf.int32, tf.float32 }.Contains(grad.dtype))
return new Tensor[] { gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x) };
{
return new Tensor[]
{
gen_math_ops.mul(grad, y),
gen_math_ops.mul(grad, x)
};
}


var (sx, sy) = SmartBroadcastGradientArgs(x, y); var (sx, sy) = SmartBroadcastGradientArgs(x, y);
var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy);
@@ -179,15 +199,39 @@ namespace Tensorflow.Gradients
x = math_ops.conj(x); x = math_ops.conj(x);
y = math_ops.conj(y); y = math_ops.conj(y);


var mul1 = gen_math_ops.mul(grad, y);
var reduce_sum1 = math_ops.reduce_sum(mul1, rx);
var reshape1 = gen_array_ops.reshape(reduce_sum1, sx);
Tensor gx = null, gy = null;


var mul2 = gen_math_ops.mul(x, grad);
var reduce_sum2 = math_ops.reduce_sum(mul2, ry);
var reshape2 = gen_array_ops.reshape(reduce_sum2, sy);
if (op is EagerOperation op_eager1 &&
op_eager1.SkipInputIndices.Contains(0))
{
return new Tensor[]
{
gen_math_ops.mul(grad, math_ops.conj(y)),
null
};
}
// else if not must_reduce_x:
// gx = gen_math_ops.mul(grad, y)
else
{
gx = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx);
}


return new Tensor[] { reshape1, reshape2 };
if (op is EagerOperation op_eager2 &&
op_eager2.SkipInputIndices.Contains(1))
{

}
// else if not must_reduce_y:
// gy = gen_math_ops.mul(x, grad)
else
{
gy = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy);
}

return new Tensor[] { gx, gy };
} }


[RegisterGradient("MatMul")] [RegisterGradient("MatMul")]
@@ -266,11 +310,23 @@ namespace Tensorflow.Gradients
var input_shape = op.inputs[0]._shape_tuple(); var input_shape = op.inputs[0]._shape_tuple();
var output_shape = op.outputs[0]._shape_tuple(); var output_shape = op.outputs[0]._shape_tuple();


var input_shape_tensor = array_ops.shape(op.inputs[0]);
var output_shape_tensor = array_ops.shape(op.outputs[0]);
var factor = _safe_shape_div(math_ops.reduce_prod(input_shape_tensor), math_ops.reduce_prod(output_shape_tensor));
if(input_shape != null &&
output_shape != null)
{
var input_size = np.prod(input_shape);
var output_size = np.prod(output_shape);
var factor = (int)input_size / Math.Max((int)output_size, 1);
var factor_tensor = constant_op.constant((int)input_size, dtype: sum_grad.dtype);
return new Tensor[] { math_ops.truediv(sum_grad, math_ops.cast(factor_tensor, sum_grad.dtype)), null };
}
else
{
var input_shape_tensor = array_ops.shape(op.inputs[0]);
var output_shape_tensor = array_ops.shape(op.outputs[0]);
var factor = _safe_shape_div(math_ops.reduce_prod(input_shape_tensor), math_ops.reduce_prod(output_shape_tensor));


return new Tensor[] { math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), null };
return new Tensor[] { math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), null };
}
} }


/// <summary> /// <summary>
@@ -438,8 +494,18 @@ namespace Tensorflow.Gradients
var rank = input_0_shape.Length; var rank = input_0_shape.Length;
if (Enumerable.SequenceEqual(Enumerable.Range(0, rank), axes.Data<int>())) if (Enumerable.SequenceEqual(Enumerable.Range(0, rank), axes.Data<int>()))
{ {
var new_shape = range(rank).Select(x => 1).ToArray();
grad = array_ops.reshape(grad, new_shape);
if (tf.context.executing_eagerly())
{
// should add ones_rank_cache
var new_shape_tensor = constant_op.constant(np.array(new int[] { 1 }) * rank, dtype: TF_DataType.TF_INT32);
grad = array_ops.reshape(grad, new_shape_tensor);
}
else
{
var new_shape = range(rank).Select(x => 1).ToArray();
grad = array_ops.reshape(grad, new_shape);
}

// If shape is not fully defined (but rank is), we use Shape. // If shape is not fully defined (but rank is), we use Shape.
if (!input_0_shape.Contains(-1)) if (!input_0_shape.Contains(-1))
input_shape = constant_op.constant(input_0_shape); input_shape = constant_op.constant(input_0_shape);
@@ -447,7 +513,11 @@ namespace Tensorflow.Gradients
input_shape = array_ops.shape(op.inputs[0]); input_shape = array_ops.shape(op.inputs[0]);
return new Tensor[] { gen_array_ops.tile(grad, input_shape), null }; return new Tensor[] { gen_array_ops.tile(grad, input_shape), null };
} }
}
else
{

}
}
} }


input_shape = array_ops.shape(op.inputs[0]); input_shape = array_ops.shape(op.inputs[0]);
@@ -605,6 +675,20 @@ namespace Tensorflow.Gradients
var grad = grads[0]; var grad = grads[0];
var x = op.inputs[0]; var x = op.inputs[0];
var y = op.inputs[1]; var y = op.inputs[1];

if (op is EagerOperation op_eager &&
op_eager.SkipInputIndices.Contains(1) &&
y.NDims == 0)
{
x = math_ops.conj(x);
y = math_ops.conj(y);
return new Tensor[]
{
grad * y * math_ops.pow(x, y - 1),
null
};
}

var z = op.outputs[0]; var z = op.outputs[0];


var (sx, sy) = SmartBroadcastGradientArgs(x, y); var (sx, sy) = SmartBroadcastGradientArgs(x, y);


+ 74
- 0
src/TensorFlowNET.Core/Gradients/math_grad_eager.cs View File

@@ -0,0 +1,74 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Linq;
using Tensorflow.Eager;
using Tensorflow.Operations;
using static Tensorflow.Binding;

namespace Tensorflow.Gradients
{
/// <summary>
/// Gradients for operators defined in math_ops.py.
/// </summary>
[RegisterGradientEager("math_grad")]
public class math_grad_eager
{
[RegisterGradientEager("Mul")]
public static EagerTensor[] _MulGrad(EagerOperation op, IntPtr[] grads)
{
var x = op.InputHandles[0];
var y = op.InputHandles[1];
var grad = grads[0];

if (op.SkipInputIndices.Contains(1) &&
EagerTensor.GetRank(grad) == 0)
{
return new EagerTensor[]
{
null,//gen_math_ops.mul(grad, math_ops.conj(y)),
null
};
}

if (_ShapesFullySpecifiedAndEqual(x, y, grad))
{
return new EagerTensor[]
{
gen_math_ops.mul(grad, y),
gen_math_ops.mul(grad, x)
};
}

throw new NotImplementedException("");
}

public static bool _ShapesFullySpecifiedAndEqual(IntPtr x, IntPtr y, IntPtr grad)
{
var x_shape = EagerTensor.GetDims(x);
var y_shape = EagerTensor.GetDims(y);

var grad_shape = EagerTensor.GetDims(grad);
return x_shape != null &&
y_shape != null &&
Enumerable.SequenceEqual(x_shape, y_shape) &&
Enumerable.SequenceEqual(y_shape, grad_shape) &&
!x_shape.Contains(-1);
}
}
}

+ 2
- 2
src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs View File

@@ -24,9 +24,9 @@ namespace Tensorflow
{ {
public partial class ops public partial class ops
{ {
static Dictionary<string, Func<Operation, Tensor[], Tensor[]>> gradientFunctions = null;
public static Dictionary<string, Func<Operation, Tensor[], Tensor[]>> gradientFunctions = null;


private static void RegisterFromAssembly()
public static void RegisterFromAssembly()
{ {
if (gradientFunctions == null) if (gradientFunctions == null)
{ {


+ 101
- 0
src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping_eager.cs View File

@@ -0,0 +1,101 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;
using Tensorflow.Eager;
using Tensorflow.Gradients;

namespace Tensorflow
{
public partial class ops
{
public static Dictionary<string, Func<EagerOperation, IntPtr[], EagerTensor[]>> gradientFunctionsEager = null;

public static void RegisterFromAssemblyEager()
{
if (gradientFunctionsEager == null)
{
gradientFunctionsEager = new Dictionary<string, Func<EagerOperation, IntPtr[], EagerTensor[]>>();

var gradGroups = Assembly.GetExecutingAssembly()
.GetTypes()
.Where(x => x.GetCustomAttribute<RegisterGradientEager>() != null)
.ToArray();

foreach (var g in gradGroups)
{
var methods = g.GetMethods()
.Where(x => x.GetCustomAttribute<RegisterGradientEager>() != null)
.ToArray();

foreach (var m in methods)
{
RegisterGradientFunctionEager(m.GetCustomAttribute<RegisterGradientEager>().Name,
(oper, out_grads) =>
g.InvokeMember(m.Name,
BindingFlags.InvokeMethod,
null,
null,
args: new object[] { oper, out_grads }) as EagerTensor[]
);
}

// REGISTER_NO_GRADIENT_OP
methods = g.GetMethods()
.Where(x => x.GetCustomAttribute<RegisterNoGradient>() != null)
.ToArray();

foreach (var m in methods)
RegisterNoGradientFunctionEager(m.GetCustomAttribute<RegisterNoGradient>().Name);
}
}
}

/// <summary>
/// Regiter new gradient function
/// </summary>
/// <param name="name">operation type</param>
/// <param name="func">function delegate</param>
public static void RegisterGradientFunctionEager(string name, Func<EagerOperation, IntPtr[], EagerTensor[]> func)
{
RegisterFromAssemblyEager();

gradientFunctionsEager[name] = func;
}

public static void RegisterNoGradientFunctionEager(string name)
{
RegisterFromAssemblyEager();

gradientFunctionsEager[name] = null;
}

public static Func<EagerOperation, IntPtr[], EagerTensor[]> get_gradient_function_eager(EagerOperation op)
{
if (op.inputs == null) return null;

RegisterFromAssemblyEager();

if (!gradientFunctionsEager.ContainsKey(op.type))
throw new LookupError($"can't get graident function through get_gradient_function {op.type}");

return gradientFunctionsEager[op.type];
}
}
}

+ 32
- 0
src/TensorFlowNET.Core/Gradients/resource_variable_grad.cs View File

@@ -0,0 +1,32 @@
/*****************************************************************************
Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Gradients
{
[RegisterGradient("resource_variable_grad")]
public class resource_variable_grad
{
[RegisterGradient("ReadVariableOp")]
public static Tensor[] _ReadGrad(Operation op, Tensor[] grads)
{
return new Tensor[] { grads[0] };
}
}
}

+ 1
- 1
src/TensorFlowNET.Core/Graphs/Graph.cs View File

@@ -444,7 +444,7 @@ namespace Tensorflow
var collection = _collections.ContainsKey(name) ? _collections[name] : new List<T>(); var collection = _collections.ContainsKey(name) ? _collections[name] : new List<T>();
switch (collection) switch (collection)
{ {
case List<VariableV1> list:
case List<IVariableV1> list:
t = list.Select(x => (T)(object)x).ToList(); t = list.Select(x => (T)(object)x).ToList();
break; break;
case List<ResourceVariable> list: case List<ResourceVariable> list:


+ 2
- 2
src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs View File

@@ -37,8 +37,8 @@ namespace Tensorflow.Keras.Layers
private IInitializer gamma_initializer; private IInitializer gamma_initializer;
private IInitializer moving_mean_initializer; private IInitializer moving_mean_initializer;
private IInitializer moving_variance_initializer; private IInitializer moving_variance_initializer;
private VariableV1 gamma;
private VariableV1 beta;
private IVariableV1 gamma;
private IVariableV1 beta;
private RefVariable moving_mean; private RefVariable moving_mean;
private RefVariable moving_variance; private RefVariable moving_variance;




+ 1
- 1
src/TensorFlowNET.Core/Keras/Layers/Embedding.cs View File

@@ -23,7 +23,7 @@ namespace Tensorflow.Keras.Layers
private int input_dim; private int input_dim;
private int output_dim; private int output_dim;
private bool mask_zero; private bool mask_zero;
public VariableV1 embeddings;
public IVariableV1 embeddings;
public IInitializer embeddings_initializer; public IInitializer embeddings_initializer;
int input_length; int input_length;




+ 6
- 6
src/TensorFlowNET.Core/Keras/Layers/Layer.cs View File

@@ -51,8 +51,8 @@ namespace Tensorflow.Keras.Layers
/// </summary> /// </summary>
protected InputSpec input_spec; protected InputSpec input_spec;
protected bool supports_masking; protected bool supports_masking;
protected List<VariableV1> _trainable_weights;
protected List<VariableV1> _non_trainable_weights;
protected List<IVariableV1> _trainable_weights;
protected List<IVariableV1> _non_trainable_weights;
private string _name; private string _name;
public string name => _name; public string name => _name;
protected string _base_name; protected string _base_name;
@@ -84,8 +84,8 @@ namespace Tensorflow.Keras.Layers
this.supports_masking = false; this.supports_masking = false;


_init_set_name(name); _init_set_name(name);
_trainable_weights = new List<VariableV1>();
_non_trainable_weights = new List<VariableV1>();
_trainable_weights = new List<IVariableV1>();
_non_trainable_weights = new List<IVariableV1>();
_compute_previous_mask = false; _compute_previous_mask = false;
_updates = new List<Operation>(); _updates = new List<Operation>();


@@ -207,12 +207,12 @@ namespace Tensorflow.Keras.Layers
built = true; built = true;
} }


protected virtual VariableV1 add_weight(string name,
protected virtual IVariableV1 add_weight(string name,
int[] shape, int[] shape,
TF_DataType dtype = TF_DataType.DtInvalid, TF_DataType dtype = TF_DataType.DtInvalid,
IInitializer initializer = null, IInitializer initializer = null,
bool? trainable = null, bool? trainable = null,
Func<string, int[], TF_DataType, IInitializer, bool, VariableV1> getter = null)
Func<string, int[], TF_DataType, IInitializer, bool, IVariableV1> getter = null)
{ {
if (dtype == TF_DataType.DtInvalid) if (dtype == TF_DataType.DtInvalid)
dtype = TF_DataType.TF_FLOAT; dtype = TF_DataType.TF_FLOAT;


+ 25
- 0
src/TensorFlowNET.Core/Keras/Optimizers/DeviceDType.cs View File

@@ -0,0 +1,25 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Optimizers
{
public class DeviceDType : IEqualityComparer<DeviceDType>
{
public string Device { get; set; }
public TF_DataType DType { get; set; }

public bool Equals(DeviceDType x, DeviceDType y)
{
return x.ToString() == y.ToString();
}

public int GetHashCode(DeviceDType obj)
{
return 0;
}

public override string ToString()
=> $"{Device}, {DType}";
}
}

+ 209
- 0
src/TensorFlowNET.Core/Keras/Optimizers/OptimizerV2.cs View File

@@ -1,7 +1,12 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq;
using System.Text; using System.Text;
using Tensorflow.Keras.Utils;
using Tensorflow.Train; using Tensorflow.Train;
using static Tensorflow.Binding;
using Tensorflow;
using Tensorflow.Eager;


namespace Tensorflow.Keras.Optimizers namespace Tensorflow.Keras.Optimizers
{ {
@@ -10,5 +15,209 @@ namespace Tensorflow.Keras.Optimizers
/// </summary> /// </summary>
public class OptimizerV2 : Trackable, IOptimizer public class OptimizerV2 : Trackable, IOptimizer
{ {
protected bool _hypers_created;
protected virtual string _name { get; }

ResourceVariable _iterations;
List<ResourceVariable> _weight;
Dictionary<string, float> _hyper;
Dictionary<string, ResourceVariable> _hyper_variables;
protected bool _momentum;
protected float _initial_decay = 0.0f;
protected bool _use_locking = true;

Dictionary<DeviceDType, Dictionary<string, Tensor>> apply_state;

public OptimizerV2() : base()
{
_weight = new List<ResourceVariable>();
_hyper = new Dictionary<string, float>();
_hyper_variables = new Dictionary<string, ResourceVariable>();
apply_state = new Dictionary<DeviceDType, Dictionary<string, Tensor>>();
}

/// <summary>
/// Apply gradients to variables.
/// </summary>
/// <param name="grads_and_vars"></param>
/// <param name="name"></param>
/// <param name="experimental_aggregate_gradients"></param>
public void apply_gradients(IEnumerable<(Tensor, ResourceVariable)> grads_and_vars,
string name = null,
bool experimental_aggregate_gradients = true)
{
var var_list = grads_and_vars.Select(x => x.Item2).ToArray();
tf_with(ops.name_scope(_name), delegate
{
ops.init_scope();
_create_all_weights(var_list);
if (grads_and_vars == null || grads_and_vars.Count() == 0)
return control_flow_ops.no_op();

apply_state = _prepare(var_list);
if(experimental_aggregate_gradients)
{
// var reduced_grads = _aggregate_gradients(grads_and_vars);
_distributed_apply(grads_and_vars, name, apply_state);
}

return null;
});
}

void apply_grad_to_update_var(ResourceVariable var, EagerTensor grad)
{
_resource_apply_dense(var, grad, apply_state);
}

protected virtual Operation _resource_apply_dense(ResourceVariable var,
EagerTensor grad,
Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state)
{
throw new NotImplementedException("_resource_apply_dense");
}

void _distributed_apply(IEnumerable<(Tensor, ResourceVariable)> grads_and_vars,
string name,
Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state)
{
tf_with(ops.name_scope(name, "", new { skip_on_eager = true }), delegate
{
foreach(var (grad, var) in grads_and_vars)
{
tf_with(ops.name_scope("update"), delegate
{
apply_grad_to_update_var(var, grad as EagerTensor);
});
}

_iterations.assign_add(ops.convert_to_tensor(1, dtype: _iterations.dtype));
});
}

Tensor[] _aggregate_gradients(IEnumerable<(Tensor, ResourceVariable)> grads_and_vars)
{
return grads_and_vars.Select(x => x.Item1).ToArray();
}

Dictionary<DeviceDType, Dictionary<string, Tensor>> _prepare(ResourceVariable[] var_list)
{
var _apply_state = new Dictionary<DeviceDType, Dictionary<string, Tensor>>();
var keys = var_list.Select(x => new DeviceDType
{
Device = x.Device,
DType = x.dtype.as_base_dtype()
}).Distinct(new DeviceDType()).ToArray();

foreach(var device_dtype in keys)
{
_apply_state[device_dtype] = new Dictionary<string, Tensor>();
_prepare_local(device_dtype, _apply_state);
}

return _apply_state;
}

protected virtual void _prepare_local(DeviceDType device_dtype,
Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state)
{
if (_hyper.ContainsKey("learning_rate"))
{
var lr_t = array_ops.identity(_decayed_lr(device_dtype.DType));
_apply_state[device_dtype]["lr_t"] = lr_t;
}
}

Tensor _decayed_lr(TF_DataType var_dtype)
{
var lr_t = _get_hyper("learning_rate", var_dtype);
if(_initial_decay > 0.0f)
{
throw new NotImplementedException("");
}
return lr_t;
}

protected ResourceVariable _get_hyper(string name, TF_DataType dtype = TF_DataType.DtInvalid)
{
var value = _hyper_variables[name];
return math_ops.cast(value, dtype);
}

void _create_all_weights(ResourceVariable[] var_list)
{
if(_iterations == null)
{
_iterations = add_weight("iter",
shape: new int[0],
dtype: TF_DataType.TF_INT64,
trainable: false,
aggregation: VariableAggregation.OnlyFirstReplica);
_weight.Add(_iterations);
}

_create_hypers();
_create_slots(var_list);
}

protected void _set_hyper(string name, float value)
{
_hyper[name] = value;
}

void _create_hypers()
{
if (_hypers_created)
return;
foreach (var dict in _hyper)
{
var name = dict.Key;
var value = dict.Value;
_hyper_variables[name] = add_weight(
name,
shape: new int[0],
trainable: false,
initializer: tf.constant_initializer(value),
aggregation: VariableAggregation.OnlyFirstReplica);
}
_hypers_created = true;
}

void _create_slots(ResourceVariable[] var_list)
{
if(_momentum)
{
/*for var in var_list:
self.add_slot(var, "momentum")*/
}
}

ResourceVariable add_weight(string name,
TensorShape shape,
TF_DataType dtype = TF_DataType.TF_FLOAT,
IInitializer initializer = null,
bool trainable = false,
VariableSynchronization synchronization = VariableSynchronization.Auto,
VariableAggregation aggregation = VariableAggregation.None)
{
if (initializer == null)
initializer = tf.zeros_initializer;

if (dtype == TF_DataType.DtInvalid)
dtype = TF_DataType.TF_FLOAT;

var variable = _add_variable_with_custom_getter(name: name,
shape: shape,
getter: base_layer_utils.make_variable,
dtype: dtype,
overwrite: true,
initializer: initializer,
trainable: trainable,
use_resource: true,
synchronization: synchronization,
aggregation: aggregation);

return variable as ResourceVariable;
}
} }
} }

+ 41
- 2
src/TensorFlowNET.Core/Keras/Optimizers/SGD.cs View File

@@ -1,14 +1,53 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq;
using System.Text; using System.Text;
using Tensorflow.Eager;


namespace Tensorflow.Keras.Optimizers namespace Tensorflow.Keras.Optimizers
{ {
public class SGD
public class SGD : OptimizerV2
{ {
public SGD(float learning_rate)
protected override string _name => "SGD";
bool nesterov;

public SGD(float learning_rate,
float momentum = 0.0f,
bool nesterov = false,
float decay = 0.0f) : base()
{
_set_hyper("learning_rate", learning_rate);
_set_hyper("decay", decay);

_momentum = momentum > 0;

_set_hyper("momentum", momentum);

nesterov = nesterov;
}

protected override void _prepare_local(DeviceDType device_dtype,
Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state)
{
base._prepare_local(device_dtype, _apply_state);

_apply_state[device_dtype]["momentum"] = array_ops.identity(
_get_hyper("momentum", device_dtype.DType));
}

protected override Operation _resource_apply_dense(ResourceVariable var, EagerTensor grad, Dictionary<DeviceDType, Dictionary<string, Tensor>> _apply_state)
{ {
if (_momentum)
{
throw new NotImplementedException("_resource_apply_dense");
}
var device_dtype = _apply_state.Keys.FirstOrDefault(x => x.Device == var.Device && x.DType == var.dtype.as_base_dtype());


return gen_training_ops.resource_apply_gradient_descent(var.Handle as EagerTensor,
_apply_state[device_dtype]["lr_t"] as EagerTensor,
grad,
use_locking: _use_locking);
} }
} }
} }

+ 2
- 2
src/TensorFlowNET.Core/Keras/Utils/base_layer_utils.cs View File

@@ -32,7 +32,7 @@ namespace Tensorflow.Keras.Utils
/// <param name="initializer"></param> /// <param name="initializer"></param>
/// <param name="trainable"></param> /// <param name="trainable"></param>
/// <returns></returns> /// <returns></returns>
public static VariableV1 make_variable(string name,
public static IVariableV1 make_variable(string name,
int[] shape, int[] shape,
TF_DataType dtype = TF_DataType.TF_FLOAT, TF_DataType dtype = TF_DataType.TF_FLOAT,
IInitializer initializer = null, IInitializer initializer = null,
@@ -46,7 +46,7 @@ namespace Tensorflow.Keras.Utils
Func<Tensor> init_val = () => initializer.call(new TensorShape(shape), dtype: dtype); Func<Tensor> init_val = () => initializer.call(new TensorShape(shape), dtype: dtype);


var variable_dtype = dtype.as_base_dtype(); var variable_dtype = dtype.as_base_dtype();
var v = tf.Variable(init_val,
var v = tf.Variable(init_val,
dtype: dtype, dtype: dtype,
shape: shape, shape: shape,
name: name); name: name);


+ 3
- 3
src/TensorFlowNET.Core/Keras/backend.cs View File

@@ -42,14 +42,14 @@ namespace Tensorflow.Keras
/// Allows to give unique autogenerated names to layers, in a graph-specific way. /// Allows to give unique autogenerated names to layers, in a graph-specific way.
/// </summary> /// </summary>
public static Dictionary<Graph, Dictionary<(string, string), int>> PER_GRAPH_LAYER_NAME_UIDS = new Dictionary<Graph, Dictionary<(string, string), int>>(); public static Dictionary<Graph, Dictionary<(string, string), int>> PER_GRAPH_LAYER_NAME_UIDS = new Dictionary<Graph, Dictionary<(string, string), int>>();
public static Dictionary<string, VariableV1> _GRAPH_VARIABLES = new Dictionary<string, VariableV1>();
public static Dictionary<string, IVariableV1> _GRAPH_VARIABLES = new Dictionary<string, IVariableV1>();
public static Dictionary<string, Optimizer> _GRAPH_TF_OPTIMIZERS = new Dictionary<string, Optimizer>(); public static Dictionary<string, Optimizer> _GRAPH_TF_OPTIMIZERS = new Dictionary<string, Optimizer>();


public static _DummyEagerGraph _DUMMY_EAGER_GRAPH = new _DummyEagerGraph(); public static _DummyEagerGraph _DUMMY_EAGER_GRAPH = new _DummyEagerGraph();


public static void track_variable(VariableV1 v)
public static void track_variable(IVariableV1 v)
{ {
var graph = v.graph;
var graph = v.Graph;
_GRAPH_VARIABLES[graph.graph_key] = v; _GRAPH_VARIABLES[graph.graph_key] = v;
} }




+ 4
- 4
src/TensorFlowNET.Core/Layers/Layer.cs View File

@@ -42,8 +42,8 @@ namespace Tensorflow.Layers
this._reuse = _reuse; this._reuse = _reuse;


// Avoid an incorrect lint error // Avoid an incorrect lint error
_trainable_weights = new List<VariableV1>();
_non_trainable_weights = new List<VariableV1>();
_trainable_weights = new List<IVariableV1>();
_non_trainable_weights = new List<IVariableV1>();
this.built = false; this.built = false;
_keras_style = false; _keras_style = false;
} }
@@ -116,7 +116,7 @@ namespace Tensorflow.Layers
/// <param name="synchronization"></param> /// <param name="synchronization"></param>
/// <param name="aggregation"></param> /// <param name="aggregation"></param>
/// <returns></returns> /// <returns></returns>
protected virtual VariableV1 add_weight(string name,
protected virtual IVariableV1 add_weight(string name,
int[] shape, int[] shape,
TF_DataType dtype = TF_DataType.DtInvalid, TF_DataType dtype = TF_DataType.DtInvalid,
IInitializer initializer = null, IInitializer initializer = null,
@@ -126,7 +126,7 @@ namespace Tensorflow.Layers
{ {
var default_graph = ops.get_default_graph(); var default_graph = ops.get_default_graph();
Graph init_graph = null; Graph init_graph = null;
VariableV1[] existing_variables = null;
IVariableV1[] existing_variables = null;


if (synchronization == VariableSynchronization.OnRead) if (synchronization == VariableSynchronization.OnRead)
trainable = false; trainable = false;


+ 1
- 1
src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowContext.cs View File

@@ -77,7 +77,7 @@ namespace Tensorflow.Operations
_external_values = new Dictionary<string, ITensorOrOperation>(); _external_values = new Dictionary<string, ITensorOrOperation>();
} }


public string name { get => _name; }
public string Name { get => _name; }
protected string _name; protected string _name;


public void __init__(ValuesDef values_def = null, string import_scope = null) public void __init__(ValuesDef values_def = null, string import_scope = null)


+ 1
- 1
src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs View File

@@ -141,7 +141,7 @@ namespace Tensorflow.Operations.ControlFlows
parallel_iterations: forward_ctxt.parallel_iterations, parallel_iterations: forward_ctxt.parallel_iterations,
back_prop: forward_ctxt.back_prop, back_prop: forward_ctxt.back_prop,
swap_memory: forward_ctxt.swap_memory, swap_memory: forward_ctxt.swap_memory,
name: forward_ctxt.name,
name: forward_ctxt.Name,
grad_state: this); grad_state: this);
_grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state); _grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state);
if (outer_forward_ctxt != null) if (outer_forward_ctxt != null)


+ 2
- 2
src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs View File

@@ -21,8 +21,8 @@ namespace Tensorflow
bool _state_is_tuple; bool _state_is_tuple;
IActivation _activation; IActivation _activation;
LSTMStateTuple _state; LSTMStateTuple _state;
VariableV1 _kernel;
VariableV1 _bias;
IVariableV1 _kernel;
IVariableV1 _bias;
string _WEIGHTS_VARIABLE_NAME = "kernel"; string _WEIGHTS_VARIABLE_NAME = "kernel";
string _BIAS_VARIABLE_NAME = "bias"; string _BIAS_VARIABLE_NAME = "bias";




+ 2
- 2
src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs View File

@@ -28,9 +28,9 @@ namespace Tensorflow


public override object state_size => _num_units; public override object state_size => _num_units;
public override int output_size => _num_units; public override int output_size => _num_units;
public VariableV1 _kernel;
public IVariableV1 _kernel;
string _WEIGHTS_VARIABLE_NAME = "kernel"; string _WEIGHTS_VARIABLE_NAME = "kernel";
public VariableV1 _bias;
public IVariableV1 _bias;
string _BIAS_VARIABLE_NAME = "bias"; string _BIAS_VARIABLE_NAME = "bias";


public BasicRnnCell(int num_units, public BasicRnnCell(int num_units,


+ 20
- 8
src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs View File

@@ -14,6 +14,8 @@
limitations under the License. limitations under the License.
******************************************************************************/ ******************************************************************************/


using System;
using System.Linq;
using Tensorflow.Eager; using Tensorflow.Eager;
using static Tensorflow.Binding; using static Tensorflow.Binding;


@@ -466,10 +468,15 @@ namespace Tensorflow.Operations
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Relu", name, null,
features);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Relu", name, new IntPtr[]
{
features as EagerTensor,
}, 1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new { features }); var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new { features });
@@ -480,10 +487,15 @@ namespace Tensorflow.Operations
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Tanh", name, null,
x);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Tanh", name, new IntPtr[]
{
x as EagerTensor,
}, 1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Tanh", name: name, args: new { x }); var _op = _op_def_lib._apply_op_helper("Tanh", name: name, args: new { x });


+ 2
- 2
src/TensorFlowNET.Core/Operations/Operation.Input.cs View File

@@ -40,9 +40,9 @@ namespace Tensorflow
public int NumInputs => c_api.TF_OperationNumInputs(_handle); public int NumInputs => c_api.TF_OperationNumInputs(_handle);
private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray(); private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray();


private InputList _inputs_val;
protected InputList _inputs_val;


public InputList inputs
public virtual InputList inputs
{ {
get get
{ {


+ 2
- 2
src/TensorFlowNET.Core/Operations/Operation.Output.cs View File

@@ -38,8 +38,8 @@ namespace Tensorflow
return num; return num;
} }


private Tensor[] _outputs;
public Tensor[] outputs => _outputs;
protected Tensor[] _outputs;
public virtual Tensor[] outputs => _outputs;
public Tensor output => _outputs.FirstOrDefault(); public Tensor output => _outputs.FirstOrDefault();


public int NumControlOutputs => c_api.TF_OperationNumControlOutputs(_handle); public int NumControlOutputs => c_api.TF_OperationNumControlOutputs(_handle);


+ 1
- 0
src/TensorFlowNET.Core/Operations/Operation.cs View File

@@ -64,6 +64,7 @@ namespace Tensorflow


bool _is_stateful; bool _is_stateful;



public NodeDef node_def public NodeDef node_def
{ {
get get


+ 26
- 5
src/TensorFlowNET.Core/Operations/array_ops.cs View File

@@ -226,6 +226,21 @@ namespace Tensorflow
private static Tensor expand_dims_v2(Tensor input, int axis, string name = null) private static Tensor expand_dims_v2(Tensor input, int axis, string name = null)
=> gen_array_ops.expand_dims(input, axis, name); => gen_array_ops.expand_dims(input, axis, name);


/// <summary>
/// Creates a tensor filled with a scalar value.
/// This operation creates a tensor of shape `dims` and fills it with `value`.
/// </summary>
/// <param name="dims">A 1-D sequence of non-negative numbers.</param>
/// <param name="value">A value to fill the returned `tf.Tensor`.</param>
/// <param name="name">Optional string. The name of the output `tf.Tensor`.</param>
/// <returns>A `tf.Tensor` with shape `dims` and the same dtype as `value`.</returns>
public static Tensor fill(Tensor dims, Tensor value, string name = null)
{
var result = gen_array_ops.fill(dims, value, name: name);
// tensor_util.maybe_set_static_shape(result, dims)
return result;
}

/// <summary> /// <summary>
/// Returns the rank of a tensor. /// Returns the rank of a tensor.
/// </summary> /// </summary>
@@ -312,20 +327,26 @@ namespace Tensorflow
}); });
} }


public static Tensor ones(int[] dims, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
=> tf_with(ops.name_scope(name, "ones", new { dims }), scope =>
public static Tensor ones(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
=> tf_with(ops.name_scope(name, "ones", shape), scope =>
{ {
dtype = dtype.as_base_dtype(); dtype = dtype.as_base_dtype();
name = scope; name = scope;
var shape_tensor = constant_op._tensor_shape_tensor_conversion_function(shape);
Tensor ones = null;
switch (dtype) switch (dtype)
{ {
case TF_DataType.TF_DOUBLE: case TF_DataType.TF_DOUBLE:
return _constant_if_small(1.0d, dims, dtype, name);
ones = constant(1.0d);
break;
case TF_DataType.TF_FLOAT: case TF_DataType.TF_FLOAT:
return _constant_if_small(1.0f, dims, dtype, name);
ones = constant(1.0f);
break;
default: default:
return _constant_if_small(1, dims, dtype, name);
ones = constant(1);
break;
} }
return fill(shape_tensor, ones, name: name);
}); });


public static Tensor one_hot(Tensor indices, int depth, public static Tensor one_hot(Tensor indices, int depth,


+ 2
- 2
src/TensorFlowNET.Core/Operations/embedding_ops.cs View File

@@ -61,7 +61,7 @@ namespace Tensorflow
/// <param name="name"></param> /// <param name="name"></param>
/// <param name="max_norm"></param> /// <param name="max_norm"></param>
/// <returns></returns> /// <returns></returns>
public static Tensor _embedding_lookup_and_transform(VariableV1 @params,
public static Tensor _embedding_lookup_and_transform(IVariableV1 @params,
Tensor ids, Tensor ids,
string partition_strategy = "mod", string partition_strategy = "mod",
string name = null, string name = null,
@@ -131,7 +131,7 @@ namespace Tensorflow
max_norm: max_norm); max_norm: max_norm);
} }


public static Tensor embedding_lookup(VariableV1 @params, Tensor ids,
public static Tensor embedding_lookup(IVariableV1 @params, Tensor ids,
string partition_strategy = "mod", string partition_strategy = "mod",
string name = null, string name = null,
bool validate_indices = true, bool validate_indices = true,


+ 123
- 30
src/TensorFlowNET.Core/Operations/gen_array_ops.cs View File

@@ -54,17 +54,27 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
try
{
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"ConcatV2", name, null,
values, axis);
return _result;
}
catch (Exception)
{
return concat_v2_eager_fallback(values, axis, name, tf.context);
}
var results = new[] { new EagerTensor() };
Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"ConcatV2", name, new IntPtr[]
{
values as EagerTensor,
axis as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
return _op.output;
}

public static Tensor concat_v2(Tensor[] values, Tensor axis, string name = null)
{
if (tf.context.executing_eagerly())
{
return concat_v2_eager_fallback(values, axis, name, tf.context);
} }


var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
@@ -79,7 +89,7 @@ namespace Tensorflow
var _inputs_flat = input.concat(axis1); var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx }; var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx };


return _execute.execute(ctx, "ConcatV2", _inputs_flat, _attrs, name: name);
return _execute.execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0];
} }


public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null) public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null)
@@ -152,8 +162,14 @@ namespace Tensorflow
{ {
if(tf.context.executing_eagerly()) if(tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, "Pack", name, null, values, "axis", axis);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Pack", name,
values.Select(x => (x as EagerTensor).EagerTensorHandle).ToArray(), values.Length,
op => wrap_tfe_src.SetOpAttrs(op, "axis", axis),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis }); var _op = _op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis });
@@ -170,8 +186,6 @@ namespace Tensorflow
_attrs["dtype"] = _op.get_attr("dtype"); _attrs["dtype"] = _op.get_attr("dtype");
_attrs["shape"] = _op.get_attr("shape"); _attrs["shape"] = _op.get_attr("shape");


_execute.record_gradient("Placeholder", _inputs_flat, _attrs, _result, name);

return new Tensor(_op, 0, dtype); return new Tensor(_op, 0, dtype);
} }


@@ -214,6 +228,19 @@ namespace Tensorflow
/// <param name="name"></param> /// <param name="name"></param>
public static Tensor identity(Tensor input, string name = null) public static Tensor identity(Tensor input, string name = null)
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Identity", name, new IntPtr[]
{
input as EagerTensor
}, 1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Identity", name, new { input }); var _op = _op_def_lib._apply_op_helper("Identity", name, new { input });


return _op.output; return _op.output;
@@ -251,10 +278,16 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Fill", name, null,
dims, value);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Fill", name, new IntPtr[]
{
dims as EagerTensor,
value as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Fill", name, new { dims, value }); var _op = _op_def_lib._apply_op_helper("Fill", name, new { dims, value });
@@ -270,6 +303,20 @@ namespace Tensorflow
/// <returns>A tuple of `Tensor` objects (r0, r1).</returns> /// <returns>A tuple of `Tensor` objects (r0, r1).</returns>
public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "") public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "")
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor(), new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"BroadcastGradientArgs", name, new IntPtr[]
{
s0 as EagerTensor,
s1 as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return (results[0].Resolve(), results[1].Resolve());
}

var _op = _op_def_lib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 }); var _op = _op_def_lib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 });


return (_op.outputs[0], _op.outputs[1]); return (_op.outputs[0], _op.outputs[1]);
@@ -283,6 +330,20 @@ namespace Tensorflow


public static Tensor reshape<T1, T2>(T1 tensor, T2 shape, string name = null) public static Tensor reshape<T1, T2>(T1 tensor, T2 shape, string name = null)
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Reshape", name, new IntPtr[]
{
tensor as EagerTensor,
shape as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape }); var _op = _op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape });
return _op.output; return _op.output;
} }
@@ -360,10 +421,16 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Shape", name, null,
input, "out_type", out_type);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Shape", name, new IntPtr[]
{
input as EagerTensor,
}, 1,
op => wrap_tfe_src.SetOpAttrs(op, "out_type", out_type),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Shape", name, new { input, out_type }); var _op = _op_def_lib._apply_op_helper("Shape", name, new { input, out_type });
@@ -411,6 +478,20 @@ namespace Tensorflow


public static Tensor tile<T>(Tensor input, T multiples, string name = null) public static Tensor tile<T>(Tensor input, T multiples, string name = null)
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Tile", name, new IntPtr[]
{
input as EagerTensor,
multiples as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Tile", name, new { input, multiples }); var _op = _op_def_lib._apply_op_helper("Tile", name, new { input, multiples });
return _op.outputs[0]; return _op.outputs[0];
} }
@@ -444,12 +525,24 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"StridedSlice", name, null,
input, begin, end, strides, "begin_mask", begin_mask,
"end_mask", end_mask, "ellipsis_mask", ellipsis_mask,
"new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"StridedSlice", name, new IntPtr[]
{
input as EagerTensor,
begin as EagerTensor,
end as EagerTensor,
strides as EagerTensor,
}, 4,
op => wrap_tfe_src.SetOpAttrs(op,
"begin_mask", begin_mask,
"end_mask", end_mask,
"ellipsis_mask", ellipsis_mask,
"new_axis_mask", new_axis_mask,
"shrink_axis_mask", shrink_axis_mask),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("StridedSlice", name, new var _op = _op_def_lib._apply_op_helper("StridedSlice", name, new


+ 375
- 93
src/TensorFlowNET.Core/Operations/gen_math_ops.cs View File

@@ -16,12 +16,13 @@


using System; using System;
using System.Linq; using System.Linq;
using System.Runtime.InteropServices;
using Tensorflow.Eager; using Tensorflow.Eager;
using static Tensorflow.Binding; using static Tensorflow.Binding;


namespace Tensorflow namespace Tensorflow
{ {
public static class gen_math_ops
public static partial class gen_math_ops
{ {
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); public static OpDefLibrary _op_def_lib = new OpDefLibrary();
public static Execute _execute = new Execute(); public static Execute _execute = new Execute();
@@ -41,11 +42,35 @@ namespace Tensorflow
/// <returns></returns> /// <returns></returns>
public static Tensor add_n(Tensor[] inputs, string name = null) public static Tensor add_n(Tensor[] inputs, string name = null)
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AddN", name,
inputs.Select(x => (x as EagerTensor).EagerTensorHandle).ToArray(), inputs.Length,
null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("AddN", name, args: new { inputs }); var _op = _op_def_lib._apply_op_helper("AddN", name, args: new { inputs });


return _op.outputs[0]; return _op.outputs[0];
} }


public static IntPtr add_n(IntPtr[] inputs, string name = null)
{
var results = new[] { c_api.TFE_NewEagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AddN", name,
inputs, inputs.Length,
null,
results, results.Length));
status.Check(true);
return results[0];
}

/// <summary> /// <summary>
/// Returns the index with the largest value across dimensions of a tensor. /// Returns the index with the largest value across dimensions of a tensor.
/// </summary> /// </summary>
@@ -119,17 +144,18 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
try
{
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Mean", name, null,
input, axis, "keep_dims", keep_dims);
return _result;
}
catch (Exception)
{
return mean_eager_fallback(input as Tensor[], axis as Tensor, keep_dims: keep_dims, name: name, ctx: tf.context);
}
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Mean", name,
new IntPtr[]
{
input as EagerTensor,
axis as EagerTensor
}, 2,
op => wrap_tfe_src.SetOpAttrs(op, "keep_dims", keep_dims),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims }); var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
@@ -137,6 +163,18 @@ namespace Tensorflow
return _op.output; return _op.output;
} }


public static Tensor mean(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null)
{
if (tf.context.executing_eagerly())
{
return mean_eager_fallback(inputs, axis, keep_dims: keep_dims, name: name, ctx: tf.context);
}

var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { inputs, reduction_indices = axis, keep_dims = keep_dims });

return _op.output;
}

private static Tensor mean_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null) private static Tensor mean_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null)
{ {
var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { inputs }); var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { inputs });
@@ -144,7 +182,7 @@ namespace Tensorflow
var _inputs_flat = input.concat(axis1); var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };


return _execute.execute(ctx, "Mean", _inputs_flat, _attrs, name: name);
return _execute.execute(ctx, "Mean", 1, _inputs_flat, _attrs, name: name)[0];
} }


public static Tensor prod<T1, T2>(T1 input, T2 axis, bool keep_dims = false, string name = null) public static Tensor prod<T1, T2>(T1 input, T2 axis, bool keep_dims = false, string name = null)
@@ -153,10 +191,17 @@ namespace Tensorflow
{ {
try try
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Prod", name, null,
input, axis, "keep_dims", keep_dims);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Prod", name, new IntPtr[]
{
input as EagerTensor,
axis as EagerTensor
}, 2,
op => wrap_tfe_src.SetOpAttrs(op, "keep_dims", keep_dims),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }
catch (Exception) catch (Exception)
{ {
@@ -175,7 +220,7 @@ namespace Tensorflow
var _inputs_flat = input.concat(axis1); var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };


return _execute.execute(ctx, "Prod", _inputs_flat, _attrs, name: name);
return _execute.execute(ctx, "Prod", 1, _inputs_flat, _attrs, name: name)[0];
} }


public static Tensor acos(Tensor x, string name = null) public static Tensor acos(Tensor x, string name = null)
@@ -192,14 +237,41 @@ namespace Tensorflow
return _op.outputs[0]; return _op.outputs[0];
} }


public static Tensor add(Tensor x, Tensor y, string name = null)
{
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Add", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y });

return _op.output;
}

public static Tensor add<Tx, Ty>(Tx x, Ty y, string name = null) public static Tensor add<Tx, Ty>(Tx x, Ty y, string name = null)
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Add", name, null,
x, y);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Add", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y });
@@ -212,10 +284,16 @@ namespace Tensorflow
// forward_compatible(2019, 6, 25): // forward_compatible(2019, 6, 25):
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AddV2", name, null,
x, y);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AddV2", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("AddV2", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("AddV2", name, args: new { x, y });
@@ -241,10 +319,15 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sin", name, null,
x);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sin", name, new IntPtr[]
{
x as EagerTensor,
}, 1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Sin", name, args: new { x }); var _op = _op_def_lib._apply_op_helper("Sin", name, args: new { x });
@@ -270,10 +353,15 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sigmoid", name, null,
x);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sigmoid", name, new IntPtr[]
{
x as EagerTensor,
}, 1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, new { x }); var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, new { x });
@@ -358,10 +446,15 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Tan", name, null,
x);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Tan", name, new IntPtr[]
{
x as EagerTensor,
}, 1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Tan", name, args: new { x }); var _op = _op_def_lib._apply_op_helper("Tan", name, args: new { x });
@@ -434,6 +527,20 @@ namespace Tensorflow


public static Tensor less<Tx, Ty>(Tx x, Ty y, string name = null) public static Tensor less<Tx, Ty>(Tx x, Ty y, string name = null)
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Less", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y });


return _op.outputs[0]; return _op.outputs[0];
@@ -497,6 +604,19 @@ namespace Tensorflow
/// <returns> A `Tensor`. Has the same type as `x`.</returns> /// <returns> A `Tensor`. Has the same type as `x`.</returns>
public static Tensor square(Tensor x, string name = null) public static Tensor square(Tensor x, string name = null)
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Square", name, new IntPtr[]
{
x as EagerTensor,
}, 1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Square", name, args: new { x }); var _op = _op_def_lib._apply_op_helper("Square", name, args: new { x });


return _op.outputs[0]; return _op.outputs[0];
@@ -552,10 +672,14 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Cast", name, null,
x, "DstT", DstT, "Truncate", Truncate);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Cast", name,
new IntPtr[] { x as EagerTensor }, 1,
op => wrap_tfe_src.SetOpAttrs(op, "DstT", DstT, "Truncate", Truncate),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate });
@@ -567,10 +691,15 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Neg", name, null,
x);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Neg", name, new IntPtr[]
{
x as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x });
@@ -582,10 +711,15 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sqrt", name, null,
x);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sqrt", name, new IntPtr[]
{
x as EagerTensor,
}, 1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Sqrt", name, args: new { x }); var _op = _op_def_lib._apply_op_helper("Sqrt", name, args: new { x });
@@ -593,14 +727,41 @@ namespace Tensorflow
return _op.outputs[0]; return _op.outputs[0];
} }


public static Tensor sub(Tensor x, Tensor y, string name = null)
{
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sub", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y });

return _op.output;
}

public static Tensor sub<Tx, Ty>(Tx x, Ty y, string name = null) public static Tensor sub<Tx, Ty>(Tx x, Ty y, string name = null)
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sub", name, null,
x, y);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sub", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
@@ -619,10 +780,16 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Equal", name, null,
x, y);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Equal", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Equal", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("Equal", name, args: new { x, y });
@@ -642,10 +809,16 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"NotEqual", name, null,
x, y);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"NotEqual", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y });
@@ -657,24 +830,57 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Atan2", name, null,
y, x);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Atan2", name, new IntPtr[]
{
y as EagerTensor,
x as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Atan2", name, args: new { y, x }); var _op = _op_def_lib._apply_op_helper("Atan2", name, args: new { y, x });
return _op.output; return _op.output;
} }


public static Tensor mul(Tensor x, Tensor y, string name = null)
{
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Mul", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y });

return _op.output;
}

public static Tensor mul<Tx, Ty>(Tx x, Ty y, string name = null) public static Tensor mul<Tx, Ty>(Tx x, Ty y, string name = null)
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Mul", name, null,
x, y);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Mul", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor,
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
@@ -693,8 +899,16 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, "", "RealDiv", name, null, x, y);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"RealDiv", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y });
@@ -711,6 +925,20 @@ namespace Tensorflow


public static Tensor floor_mod(Tensor x, Tensor y, string name = null) public static Tensor floor_mod(Tensor x, Tensor y, string name = null)
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"FloorMod", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y });


return _op.outputs[0]; return _op.outputs[0];
@@ -720,8 +948,16 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, "", "FloorDiv", name, null, x, y);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"FloorDiv", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y });
@@ -742,10 +978,20 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"MatMul", name, null,
a, b, "transpose_a", transpose_a, "transpose_b", transpose_b);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"MatMul", name,
new IntPtr[]
{
a as EagerTensor,
b as EagerTensor
}, 2,
op => wrap_tfe_src.SetOpAttrs(op,
"transpose_a", transpose_a,
"transpose_b", transpose_b),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b });
@@ -839,6 +1085,20 @@ namespace Tensorflow


public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = null) public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = null)
{ {
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Pow", name, new IntPtr[]
{
x as EagerTensor,
y as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y }); var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y });


return _op.outputs[0]; return _op.outputs[0];
@@ -848,18 +1108,18 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
try
{
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sum", name, null,
input, axis, "keep_dims", keep_dims);
return _result;
}
catch (Exception)
{
return _sum_eager_fallback(input as Tensor[], axis as Tensor,
keep_dims: keep_dims, name: name, ctx: tf.context);
}
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sum", name,
new IntPtr[]
{
input as EagerTensor,
axis as EagerTensor
}, 2,
op => wrap_tfe_src.SetOpAttrs(op, "keep_dims", keep_dims),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims }); var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims });
@@ -867,6 +1127,19 @@ namespace Tensorflow
return _op.outputs[0]; return _op.outputs[0];
} }


public static Tensor _sum(Tensor[] inputs, Tensor axis = default, bool keep_dims = false, string name = null)
{
if (tf.context.executing_eagerly())
{
return _sum_eager_fallback(inputs, axis,
keep_dims: keep_dims, name: name, ctx: tf.context);
}

var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { inputs, reduction_indices = axis, keep_dims });

return _op.outputs[0];
}

private static Tensor _sum_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null) private static Tensor _sum_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null)
{ {
var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { inputs }); var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { inputs });
@@ -874,7 +1147,7 @@ namespace Tensorflow
var _inputs_flat = input.concat(axis1); var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };


return _execute.execute(ctx, "Sum", _inputs_flat, _attrs, name: name);
return _execute.execute(ctx, "Sum", 1, _inputs_flat, _attrs, name: name)[0];
} }


/// <summary> /// <summary>
@@ -889,8 +1162,17 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, "Range", name, null, start, limit, delta);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Range", name, new IntPtr[]
{
start as EagerTensor,
limit as EagerTensor,
delta as EagerTensor
}, 3, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta });


+ 26
- 0
src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs View File

@@ -0,0 +1,26 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Tensorflow.Eager;
using static Tensorflow.Binding;

namespace Tensorflow
{
public static partial class gen_math_ops
{
public static EagerTensor mul(IntPtr x, IntPtr y, string name = null)
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Mul", name, new IntPtr[]
{
x,
y,
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}
}
}

+ 21
- 0
src/TensorFlowNET.Core/Operations/gen_random_ops.cs View File

@@ -13,6 +13,10 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
******************************************************************************/ ******************************************************************************/
using System;
using System.Linq;
using Tensorflow.Eager;
using static Tensorflow.Binding;


namespace Tensorflow namespace Tensorflow
{ {
@@ -36,6 +40,23 @@ namespace Tensorflow
if (!seed2.HasValue) if (!seed2.HasValue)
seed2 = 0; seed2 = 0;


if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"RandomStandardNormal", name, new IntPtr[]
{
shape as EagerTensor,
}, 1,
op => wrap_tfe_src.SetOpAttrs(op,
"seed", seed,
"seed2", seed2,
"dtype", dtype),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("RandomStandardNormal", var _op = _op_def_lib._apply_op_helper("RandomStandardNormal",
name: name, name: name,
args: new { shape, dtype, seed, seed2 }); args: new { shape, dtype, seed, seed2 });


+ 86
- 17
src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs View File

@@ -14,6 +14,8 @@
limitations under the License. limitations under the License.
******************************************************************************/ ******************************************************************************/


using System;
using System.Linq;
using Tensorflow.Eager; using Tensorflow.Eager;
using static Tensorflow.Binding; using static Tensorflow.Binding;


@@ -23,14 +25,66 @@ namespace Tensorflow
{ {
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); public static OpDefLibrary _op_def_lib = new OpDefLibrary();


public static Operation assign_sub_variable_op(Tensor resource, Tensor value, string name = null)
{
if (tf.context.executing_eagerly())
{
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AssignSubVariableOp", name,
new IntPtr[]
{
resource as EagerTensor,
value as EagerTensor
}, 2, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
}

return null;
}

/// <summary>
/// Adds a value to the current value of a variable.
/// </summary>
/// <param name="resource"></param>
/// <param name="value"></param>
/// <param name="name"></param>
/// <returns></returns>
public static Operation assign_add_variable_op(Tensor resource, Tensor value, string name = null)
{
if (tf.context.executing_eagerly())
{
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AssignAddVariableOp", name,
new IntPtr[]
{
resource as EagerTensor,
value as EagerTensor
}, 2, null,
null, 0));
status.Check(true);
return null;
}

return null;
}

public static Operation assign_variable_op(Tensor resource, Tensor value, string name = null) public static Operation assign_variable_op(Tensor resource, Tensor value, string name = null)
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AssignVariableOp", name, null,
resource, value);
return _result;
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AssignVariableOp", name,
new IntPtr[]
{
resource as EagerTensor,
value as EagerTensor
}, 2, null,
null, 0));
status.Check(true);
return null;
} }


var _op = _op_def_lib._apply_op_helper("AssignVariableOp", name, new { resource, value }); var _op = _op_def_lib._apply_op_helper("AssignVariableOp", name, new { resource, value });
@@ -42,10 +96,14 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"VarIsInitializedOp", name, null,
resource);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"VarIsInitializedOp", name,
new IntPtr[] { resource as EagerTensor },
1, null,
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("VarIsInitializedOp", name, new { resource }); var _op = _op_def_lib._apply_op_helper("VarIsInitializedOp", name, new { resource });
@@ -65,12 +123,19 @@ namespace Tensorflow
public static Tensor var_handle_op(TF_DataType dtype, TensorShape shape, public static Tensor var_handle_op(TF_DataType dtype, TensorShape shape,
string container ="", string shared_name = "", string name = null) string container ="", string shared_name = "", string name = null)
{ {
if (tf.context.executing_eagerly())
if(tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"VarHandleOp", name, null,
"container", container, "shared_name", shared_name, "dtype", dtype, "shape", shape.dims);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"VarHandleOp", name, null, 0,
op => wrap_tfe_src.SetOpAttrs(op,
"container", container,
"shared_name", shared_name,
"dtype", dtype,
"shape", shape.dims),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("VarHandleOp", name, new { var _op = _op_def_lib._apply_op_helper("VarHandleOp", name, new {
@@ -94,10 +159,14 @@ namespace Tensorflow
{ {
if (tf.context.executing_eagerly()) if (tf.context.executing_eagerly())
{ {
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
"ReadVariableOp", name, null,
resource, "dtype", dtype);
return _result;
var results = new[] { new EagerTensor() };
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"ReadVariableOp", name,
new IntPtr[] { resource as EagerTensor }, 1,
op => wrap_tfe_src.SetOpAttrs(op, "dtype", dtype),
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length));
status.Check(true);
return results[0].Resolve();
} }


var _op = _op_def_lib._apply_op_helper("ReadVariableOp", name, new var _op = _op_def_lib._apply_op_helper("ReadVariableOp", name, new


+ 31
- 1
src/TensorFlowNET.Core/Operations/math_ops.cs View File

@@ -17,6 +17,7 @@
using NumSharp; using NumSharp;
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using Tensorflow.Eager;
using Tensorflow.Framework; using Tensorflow.Framework;
using static Tensorflow.Binding; using static Tensorflow.Binding;


@@ -84,6 +85,23 @@ namespace Tensorflow
}); });
} }


public static ResourceVariable cast(ResourceVariable x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null)
{
var base_type = dtype.as_base_dtype();
if (base_type == x.dtype)
return x;

return tf_with(ops.name_scope(name, "Cast", new { x }), scope =>
{
name = scope;
var t_x = ops.convert_to_tensor(x, name: "x");
if (t_x.dtype.as_base_dtype() != base_type)
t_x = gen_math_ops.cast(t_x, base_type, name: name);

return x;
});
}

public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null)
{ {
var base_type = dtype.as_base_dtype(); var base_type = dtype.as_base_dtype();
@@ -540,6 +558,11 @@ namespace Tensorflow
} }
else else
{ {
if(x is EagerTensor)
{
return constant_op.constant(np.arange(x.shape.Rank));
}

var rank = array_ops.rank(x); var rank = array_ops.rank(x);
return range(0, rank, 1); return range(0, rank, 1);
} }
@@ -588,7 +611,14 @@ namespace Tensorflow
=> gen_math_ops.rsqrt(x, name: name); => gen_math_ops.rsqrt(x, name: name);


public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = null) public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = null)
=> gen_math_ops.pow(x, y, name: name);
=> tf_with(ops.name_scope(name, "Pow", new { x, y }), scope =>
{
name = scope;
var x_tensor = ops.convert_to_tensor(x, name: "x");
var y_tensor = ops.convert_to_tensor(y, name: "y", dtype: x_tensor.dtype.as_base_dtype());

return gen_math_ops.pow(x_tensor, y_tensor, name: name);
});


public static Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range") public static Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range")
{ {


+ 2
- 2
src/TensorFlowNET.Core/Operations/nn_impl.py.cs View File

@@ -98,8 +98,8 @@ namespace Tensorflow
/// <param name="name"></param> /// <param name="name"></param>
/// <returns></returns> /// <returns></returns>
public static Tensor[] fused_batch_norm(Tensor x, public static Tensor[] fused_batch_norm(Tensor x,
VariableV1 scale,
VariableV1 offset,
IVariableV1 scale,
IVariableV1 offset,
Tensor mean, Tensor mean,
Tensor variance, Tensor variance,
float epsilon = 0.001f, float epsilon = 0.001f,


+ 1
- 0
src/TensorFlowNET.Core/Operations/random_ops.cs View File

@@ -47,6 +47,7 @@ namespace Tensorflow
var rnd = gen_random_ops.random_standard_normal(shape_tensor, dtype: dtype, seed: seed1, seed2: seed2); var rnd = gen_random_ops.random_standard_normal(shape_tensor, dtype: dtype, seed: seed1, seed2: seed2);
var mul = rnd * stddev_tensor; var mul = rnd * stddev_tensor;
var value = math_ops.add(mul, mean_tensor, name: name); var value = math_ops.add(mul, mean_tensor, name: name);
// tensor_util.maybe_set_static_shape(value, shape)
return value; return value;
}); });
} }


+ 26
- 20
src/TensorFlowNET.Core/Operations/resource_variable_ops.cs View File

@@ -15,6 +15,7 @@
******************************************************************************/ ******************************************************************************/


using System; using System;
using System.Linq;
using Tensorflow.Framework; using Tensorflow.Framework;
using static Tensorflow.CppShapeInferenceResult.Types; using static Tensorflow.CppShapeInferenceResult.Types;


@@ -70,7 +71,7 @@ namespace Tensorflow
throw new NotImplementedException(); throw new NotImplementedException();
} }


public static bool is_resource_variable(VariableV1 var)
public static bool is_resource_variable(IVariableV1 var)
{ {
return var is ResourceVariable; return var is ResourceVariable;
} }
@@ -128,14 +129,34 @@ namespace Tensorflow
// When in eager mode, explicitly ensure so here. When in graph mode, it's // When in eager mode, explicitly ensure so here. When in graph mode, it's
// ensured by always generating different variable names. // ensured by always generating different variable names.
var exists = gen_resource_variable_ops.var_is_initialized_op(handle); var exists = gen_resource_variable_ops.var_is_initialized_op(handle);
}


return handle;
// We create an assert Op instead of checking right away in order to be
// compatible with ASYNC execution mode. Further, since not all devices
// support string tensors, we encode the assertion string in the Op name
/*gen_logging_ops._assert(
math_ops.logical_not(exists), [exists], name = "EagerVariableNameReuse");*/
var handle_data = new HandleData();
handle_data.IsSet = true;
handle_data.ShapeAndType.Add(new HandleShapeAndType
{
Dtype = dtype.as_datatype_enum(),
Shape = shape.as_proto()
});
_set_handle_shapes_and_types(handle, handle_data, graph_mode);
return handle;
}
} }


private static void _set_handle_shapes_and_types(Tensor handle, HandleData full_handle_data, bool graph_mode)
/// <summary>
/// Sets the shape inference result HandleData on tensor.
/// </summary>
/// <param name="handle"></param>
/// <param name="full_handle_data"></param>
/// <param name="graph_mode"></param>
private static void _set_handle_shapes_and_types(Tensor handle, HandleData handle_data, bool graph_mode)
{ {

if (!graph_mode)
return;
} }


/// <summary> /// <summary>
@@ -171,20 +192,5 @@ namespace Tensorflow
return HandleData.Parser.ParseFrom(handle.BufferToArray()); return HandleData.Parser.ParseFrom(handle.BufferToArray());
} }
} }

/// <summary>
/// Represents a future for a read of a variable.
/// Pretends to be the tensor if anyone looks.
/// </summary>
public class _UnreadVariable : BaseResourceVariable
{
}

/// <summary>
/// A python variable from an existing handle.
/// </summary>
public class BaseResourceVariable : VariableV1
{
}
} }
} }

+ 1
- 1
src/TensorFlowNET.Core/Protobuf/IProtoBuf.cs View File

@@ -6,7 +6,7 @@
/// </summary> /// </summary>
public interface IProtoBuf<TProtoDef, TDef> public interface IProtoBuf<TProtoDef, TDef>
{ {
string name { get; }
string Name { get; }


/// <summary> /// <summary>
/// Converts a `Variable` to a `VariableDef` protocol buffer. /// Converts a `Variable` to a `VariableDef` protocol buffer.


+ 2
- 1
src/TensorFlowNET.Core/Sessions/BaseSession.cs View File

@@ -65,7 +65,8 @@ namespace Tensorflow


public virtual NDArray run(ITensorOrOperation fetche, params FeedItem[] feed_dict) public virtual NDArray run(ITensorOrOperation fetche, params FeedItem[] feed_dict)
{ {
return _run(fetche, feed_dict)[0];
var results = _run(fetche, feed_dict);
return fetche is Tensor ? results[0] : null;
} }


public virtual (NDArray, NDArray, NDArray, NDArray, NDArray) run( public virtual (NDArray, NDArray, NDArray, NDArray, NDArray) run(


+ 5
- 0
src/TensorFlowNET.Core/Status/Status.cs View File

@@ -54,6 +54,11 @@ namespace Tensorflow
Handle = TF_NewStatus(); Handle = TF_NewStatus();
} }


public Status(SafeStatusHandle handle)
{
Handle = handle ?? throw new ArgumentNullException(nameof(handle));
}

public void SetStatus(TF_Code code, string msg) public void SetStatus(TF_Code code, string msg)
{ {
TF_SetStatus(Handle, code, msg); TF_SetStatus(Handle, code, msg);


+ 17
- 0
src/TensorFlowNET.Core/System/GCItemCounter.cs View File

@@ -0,0 +1,17 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow
{
public class GCItemCounter
{
public GCItemType ItemType { get; set; }
public int RefCounter { get; set; }
public DateTime LastUpdateTime { get; set; }
public IntPtr Handle { get; set; }

public override string ToString()
=> $"{ItemType} {RefCounter} {LastUpdateTime}";
}
}

+ 13
- 0
src/TensorFlowNET.Core/System/GCItemType.cs View File

@@ -0,0 +1,13 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow
{
public enum GCItemType
{
TensorHandle = 0,
LocalTensorHandle = 1,
EagerTensorHandle = 2
}
}

+ 95
- 0
src/TensorFlowNET.Core/System/GarbageCollector.cs View File

@@ -0,0 +1,95 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Timers;
using static Tensorflow.Binding;

namespace Tensorflow
{
public class GarbageCollector
{
static Dictionary<IntPtr, GCItemCounter> container = new Dictionary<IntPtr, GCItemCounter>();

static object locker = new object();
public static void Init()
{
Task.Run(() =>
{
while (true)
{
Thread.Sleep(100);
Recycle();
}
});

}

public static void Increase(IntPtr handle, GCItemType type)
{
if (handle == IntPtr.Zero)
return;

if (container.ContainsKey(handle))
{
container[handle].RefCounter++;
container[handle].LastUpdateTime = DateTime.Now;
}
else
{
lock (locker)
{
container[handle] = new GCItemCounter
{
ItemType = type,
RefCounter = 1,
Handle = handle,
LastUpdateTime = DateTime.Now
};
}
}
}

public static void Decrease(IntPtr handle)
{
if (handle != IntPtr.Zero && container.ContainsKey(handle))
container[handle].RefCounter--;
}

private static void Recycle()
{
// dispose before 1 sec
lock (locker)
{
var items = container.Values
.Where(x => x.RefCounter <= 0 && (DateTime.Now - x.LastUpdateTime).TotalMilliseconds > 100)
.ToArray();

foreach (var item in items)
{
item.RefCounter = 0;
container.Remove(item.Handle);
switch (item.ItemType)
{
case GCItemType.TensorHandle:
// print($"c_api.TF_DeleteTensor({item.Handle.ToString("x16")})");
c_api.TF_DeleteTensor(item.Handle);
break;
case GCItemType.LocalTensorHandle:
// print($"c_api.TFE_DeleteTensorHandle({item.Handle.ToString("x16")})");
c_api.TFE_DeleteTensorHandle(item.Handle);
break;
case GCItemType.EagerTensorHandle:
// print($"c_api.TFE_DeleteEagerTensor({item.Handle.ToString("x16")})");
c_api.TFE_DeleteEagerTensor(item.Handle);
break;
default:
break;
}
}
}
}
}
}

+ 10
- 11
src/TensorFlowNET.Core/TensorFlow.Binding.csproj View File

@@ -4,8 +4,8 @@
<TargetFramework>netstandard2.0</TargetFramework> <TargetFramework>netstandard2.0</TargetFramework>
<AssemblyName>TensorFlow.NET</AssemblyName> <AssemblyName>TensorFlow.NET</AssemblyName>
<RootNamespace>Tensorflow</RootNamespace> <RootNamespace>Tensorflow</RootNamespace>
<TargetTensorFlow>2.01.0</TargetTensorFlow>
<Version>0.20.0</Version>
<TargetTensorFlow>2.2.0</TargetTensorFlow>
<Version>0.20.0-alpha2</Version>
<LangVersion>8.0</LangVersion> <LangVersion>8.0</LangVersion>
<Authors>Haiping Chen, Meinrad Recheis, Eli Belash</Authors> <Authors>Haiping Chen, Meinrad Recheis, Eli Belash</Authors>
<Company>SciSharp STACK</Company> <Company>SciSharp STACK</Company>
@@ -15,17 +15,15 @@
<RepositoryType>git</RepositoryType> <RepositoryType>git</RepositoryType>
<PackageProjectUrl>http://scisharpstack.org</PackageProjectUrl> <PackageProjectUrl>http://scisharpstack.org</PackageProjectUrl>
<PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&amp;v=4</PackageIconUrl> <PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&amp;v=4</PackageIconUrl>
<PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags>
<PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#, TF.NET</PackageTags>
<Description>Google's TensorFlow full binding in .NET Standard. <Description>Google's TensorFlow full binding in .NET Standard.
Building, training and infering deep learning models. Building, training and infering deep learning models.
https://tensorflownet.readthedocs.io</Description> https://tensorflownet.readthedocs.io</Description>
<AssemblyVersion>0.20.0.0</AssemblyVersion> <AssemblyVersion>0.20.0.0</AssemblyVersion>
<PackageReleaseNotes>Changes since v0.15.0:
1: Add TransformGraphWithStringInputs.
2: tf.trainer.load_graph, tf.trainer.freeze_graph
3: Import Protobuf.Text
4: Support YOLOv3 object detection
5: Add implicitation for Operation to RefVariable</PackageReleaseNotes>
<PackageReleaseNotes>tf.net 0.20.x and above are based on tensorflow native 2.x.
Eager Mode is added finally.
It's not stable at this moment and missing many APIs, tf.net 0.15.x is more stable for production.
Please be patient, we're working hard on missing functions, providing full tensorflow binding is our mission.</PackageReleaseNotes>
<FileVersion>0.20.0.0</FileVersion> <FileVersion>0.20.0.0</FileVersion>
<PackageLicenseFile>LICENSE</PackageLicenseFile> <PackageLicenseFile>LICENSE</PackageLicenseFile>
<PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> <PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance>
@@ -37,12 +35,12 @@ https://tensorflownet.readthedocs.io</Description>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'"> <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'">
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<DefineConstants>TRACE;DEBUG</DefineConstants> <DefineConstants>TRACE;DEBUG</DefineConstants>
<PlatformTarget>x64</PlatformTarget>
<PlatformTarget>AnyCPU</PlatformTarget>
</PropertyGroup> </PropertyGroup>


<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<DefineConstants>TRACE;DEBUG;SERIALIZABLE_</DefineConstants>
<DefineConstants>TRACE;DEBUG</DefineConstants>
<PlatformTarget>x64</PlatformTarget> <PlatformTarget>x64</PlatformTarget>
</PropertyGroup> </PropertyGroup>


@@ -64,6 +62,7 @@ https://tensorflownet.readthedocs.io</Description>
<None Remove="Distribute\**" /> <None Remove="Distribute\**" />
<None Remove="Models\**" /> <None Remove="Models\**" />
<None Remove="runtimes\**" /> <None Remove="runtimes\**" />
<Compile Remove="Util\BindingArray2.cs" />
<None Include="..\..\LICENSE"> <None Include="..\..\LICENSE">
<Pack>True</Pack> <Pack>True</Pack>
<PackagePath></PackagePath> <PackagePath></PackagePath>


+ 79
- 0
src/TensorFlowNET.Core/Tensors/EagerTensorV2.cs View File

@@ -0,0 +1,79 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;
using System.Runtime.InteropServices;
using System.Text;
using Tensorflow.Eager;
using static Tensorflow.Binding;

namespace Tensorflow
{
public class EagerTensorV2 : DisposableObject, ITensor
{
IntPtr tfe_tensor_handle;
public IntPtr EagerTensorHandle { get; set; }
public string Device => c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(tfe_tensor_handle, status.Handle));

static Status status = new Status();

public EagerTensorV2(IntPtr handle)
{
EagerTensorHandle = handle;
tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle);
_handle = c_api.TFE_TensorHandleResolve(tfe_tensor_handle, status.Handle);
}

public unsafe EagerTensorV2(NDArray nd, string device_name = "")
{
if (nd.typecode == NPTypeCode.String)
throw new NotImplementedException("Support for NDArray of type string not implemented yet");

var arraySlice = nd.Unsafe.Storage.Shape.IsContiguous ? nd.GetData() : nd.CloneData();

_handle = c_api.TF_NewTensor(nd.dtype.as_dtype(),
nd.shape.Select(i => (long)i).ToArray(),
nd.ndim,
new IntPtr(arraySlice.Address),
nd.size * nd.dtypesize,
deallocator: (IntPtr dataPtr, long len, IntPtr args) =>
{

}, IntPtr.Zero);

tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status.Handle);
EagerTensorHandle = c_api.TFE_NewEagerTensor();
}

/*public unsafe EagerTensorV2(float[,] value)
{
var dims = new long[] { value.Rank, value.Length / value.Rank };
fixed (float* pointer = &value[0, 0])
{
// The address stored in pointerToFirst
// is valid only inside this fixed statement block.
tensorHandle = c_api.TF_NewTensor(TF_DataType.TF_FLOAT,
dims,
value.Rank,
new IntPtr(pointer),
value.Length * sizeof(float),
deallocator: (IntPtr dataPtr, long len, IntPtr args) =>
{

}, IntPtr.Zero);


localTensorHandle = c_api.TFE_NewTensorHandle(tensorHandle, status);
_handle = c_api.TFE_EagerTensorFromHandle(tf.context, localTensorHandle);
}
}*/

protected override void DisposeUnmanagedResources(IntPtr handle)
{
c_api.TF_DeleteTensor(_handle);
c_api.TFE_DeleteTensorHandle(tfe_tensor_handle);
c_api.TFE_DeleteEagerTensor(EagerTensorHandle);
}
}
}

+ 11
- 0
src/TensorFlowNET.Core/Tensors/ITensor.cs View File

@@ -0,0 +1,11 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow
{
public interface ITensor
{

}
}

+ 31
- 0
src/TensorFlowNET.Core/Tensors/TF_BindingArray.cs View File

@@ -0,0 +1,31 @@
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Text;

namespace Tensorflow
{
[StructLayout(LayoutKind.Sequential)]
public struct TF_BindingArray
{
public IntPtr array;
public int length;

public static implicit operator TF_BindingArray(IntPtr handle)
=> handle == IntPtr.Zero ? default : Marshal.PtrToStructure<TF_BindingArray>(handle);

public unsafe IntPtr this[int index]
=> array == IntPtr.Zero ? IntPtr.Zero : *((IntPtr*)array + index);

public unsafe IntPtr[] Data
{
get
{
var results = new IntPtr[length];
for (int i = 0; i < length; i++)
results[i] = array == IntPtr.Zero ? IntPtr.Zero : *((IntPtr*)array + i);
return results;
}
}
}
}

+ 1
- 1
src/TensorFlowNET.Core/Tensors/TF_Tensor.cs View File

@@ -17,6 +17,6 @@ namespace Tensorflow
=> tensor._handle; => tensor._handle;


public override string ToString() public override string ToString()
=> $"TF_Tensor {_handle}";
=> $"TF_Tensor 0x{_handle.ToString("x16")}";
} }
} }

+ 9
- 0
src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs View File

@@ -23,6 +23,7 @@ using System.Runtime.CompilerServices;
using System.Runtime.InteropServices; using System.Runtime.InteropServices;
using System.Text; using System.Text;
using static Tensorflow.c_api; using static Tensorflow.c_api;
using static Tensorflow.Binding;


namespace Tensorflow namespace Tensorflow
{ {
@@ -59,6 +60,14 @@ namespace Tensorflow
//no need to set AllocationType = AllocationType.None; //no need to set AllocationType = AllocationType.None;
} }


public Tensor(int value)
{
unsafe
{
_handle = TF_NewTensor(tf.int32, dims: null, num_dims: 0, data: null, len: sizeof(int));
}
}

/// <summary> /// <summary>
/// Create a new Tensor from the given unmanaged memory pointer (which must be allocated, fixed or pinned by the caller) /// Create a new Tensor from the given unmanaged memory pointer (which must be allocated, fixed or pinned by the caller)
/// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor /// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs View File

@@ -54,7 +54,7 @@ namespace Tensorflow
#else #else
#region Compute #region Compute


public static Tensor operator +(Tensor lhs, ResourceVariable rhs) => BinaryOpWrapper("add", lhs, rhs);
public static Tensor operator +(Tensor lhs, Tensor rhs) => BinaryOpWrapper("add", lhs, rhs); public static Tensor operator +(Tensor lhs, Tensor rhs) => BinaryOpWrapper("add", lhs, rhs);
public static Tensor operator +(Tensor lhs, NDArray rhs) => BinaryOpWrapper("add", lhs, rhs); public static Tensor operator +(Tensor lhs, NDArray rhs) => BinaryOpWrapper("add", lhs, rhs);
public static Tensor operator +(NDArray lhs, Tensor rhs) => BinaryOpWrapper("add", lhs, rhs); public static Tensor operator +(NDArray lhs, Tensor rhs) => BinaryOpWrapper("add", lhs, rhs);


+ 23
- 8
src/TensorFlowNET.Core/Tensors/Tensor.Value.cs View File

@@ -1,4 +1,5 @@
using NumSharp; using NumSharp;
using NumSharp.Backends;
using NumSharp.Backends.Unmanaged; using NumSharp.Backends.Unmanaged;
using NumSharp.Utilities; using NumSharp.Utilities;
using System; using System;
@@ -43,7 +44,7 @@ namespace Tensorflow
{ {
//T can only be unmanaged, I believe it is safe to say that MemoryCopy is valid for all cases this method can be called. //T can only be unmanaged, I believe it is safe to say that MemoryCopy is valid for all cases this method can be called.
var src = (T*)buffer; var src = (T*)buffer;
len *= ((long)itemsize);
len *= (long)itemsize;
System.Buffer.MemoryCopy(src, dst, len, len); System.Buffer.MemoryCopy(src, dst, len, len);
} }
} }
@@ -150,26 +151,40 @@ namespace Tensorflow
/// Tensor has rank 0. /// Tensor has rank 0.
/// </returns> /// </returns>
public NDArray numpy() public NDArray numpy()
=> NDims == 0 ? GetScalar(dtype) : GetNDArray(dtype);
=> GetNDArray(dtype);


protected unsafe NDArray GetNDArray(TF_DataType dtype) protected unsafe NDArray GetNDArray(TF_DataType dtype)
{ {
UnmanagedStorage storage;
switch (dtype) switch (dtype)
{ {
case TF_DataType.TF_STRING: case TF_DataType.TF_STRING:
return StringData();
return (NDArray)StringData()[0];
case TF_DataType.TF_INT32: case TF_DataType.TF_INT32:
return ToArray<int>();
storage = new UnmanagedStorage(NPTypeCode.Int32);
break;
case TF_DataType.TF_INT64:
storage = new UnmanagedStorage(NPTypeCode.Int64);
break;
case TF_DataType.TF_FLOAT: case TF_DataType.TF_FLOAT:
return ToArray<float>();
storage = new UnmanagedStorage(NPTypeCode.Float);
break;
case TF_DataType.TF_DOUBLE: case TF_DataType.TF_DOUBLE:
return ToArray<double>();
storage = new UnmanagedStorage(NPTypeCode.Double);
break;
default: default:
return BufferToArray(); return BufferToArray();
} }

storage.Allocate(new Shape(shape));

var bytesize = (long)this.bytesize;
System.Buffer.MemoryCopy(buffer.ToPointer(), storage.Address, bytesize, bytesize);

return new NDArray(storage);
} }


protected unsafe NDArray GetScalar(TF_DataType dtype)
/*protected unsafe NDArray GetScalar(TF_DataType dtype)
{ {
switch(dtype) switch(dtype)
{ {
@@ -184,7 +199,7 @@ namespace Tensorflow
default: default:
return BufferToArray(); return BufferToArray();
} }
}
}*/


/// <summary> /// <summary>
/// Copies the memory of current buffer onto newly allocated array. /// Copies the memory of current buffer onto newly allocated array.


+ 5
- 4
src/TensorFlowNET.Core/Tensors/Tensor.cs View File

@@ -32,14 +32,15 @@ namespace Tensorflow
/// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes. /// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes.
/// </summary> /// </summary>
[SuppressMessage("ReSharper", "ConvertToAutoProperty")] [SuppressMessage("ReSharper", "ConvertToAutoProperty")]
public partial class Tensor : DisposableObject,
public partial class Tensor : DisposableObject,
ITensor,
ITensorOrOperation, ITensorOrOperation,
_TensorLike, _TensorLike,
ITensorOrTensorArray, ITensorOrTensorArray,
IPackable<Tensor>, IPackable<Tensor>,
ICanBeFlattened ICanBeFlattened
{ {
private readonly int _id;
protected int _id;
private readonly Operation _op; private readonly Operation _op;
private readonly int _value_index; private readonly int _value_index;
private TF_Output? _tf_output; private TF_Output? _tf_output;
@@ -82,7 +83,7 @@ namespace Tensorflow
/// <summary> /// <summary>
/// The name of the device on which this tensor will be produced, or null. /// The name of the device on which this tensor will be produced, or null.
/// </summary> /// </summary>
public string Device => op.Device;
public virtual string Device => op.Device;
public int[] dims => shape; public int[] dims => shape;


/// <summary> /// <summary>
@@ -170,7 +171,7 @@ namespace Tensorflow
/// n n-Tensor (you get the idea) /// n n-Tensor (you get the idea)
/// </summary> /// </summary>
/// <remarks>https://www.tensorflow.org/api_docs/python/tf/rank</remarks> /// <remarks>https://www.tensorflow.org/api_docs/python/tf/rank</remarks>
public int rank
public virtual int rank
{ {
get get
{ {


+ 3
- 0
src/TensorFlowNET.Core/Tensors/c_api.tensor.cs View File

@@ -78,6 +78,9 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern IntPtr TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, IntPtr data, UIntPtr len, Deallocator deallocator, ref DeallocatorArgs deallocator_arg); public static extern IntPtr TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, IntPtr data, UIntPtr len, Deallocator deallocator, ref DeallocatorArgs deallocator_arg);


[DllImport(TensorFlowLibName)]
public static extern TF_Tensor TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, IntPtr data, long len, DeallocatorV2 deallocator, IntPtr args);

/// <summary> /// <summary>
/// Return a new tensor that holds the bytes data[0,len-1] /// Return a new tensor that holds the bytes data[0,len-1]
/// </summary> /// </summary>


+ 26
- 3
src/TensorFlowNET.Core/Tensors/constant_op.cs View File

@@ -101,18 +101,37 @@ namespace Tensorflow
return op.outputs[0]; return op.outputs[0];
} }


private static Tensor _eager_fill(int[] dims, Tensor value, Context ctx)
private static Tensor _eager_fill(int[] dims, EagerTensor value, Context ctx)
{ {
var attr_t = value.dtype.as_datatype_enum(); var attr_t = value.dtype.as_datatype_enum();
var dims_t = convert_to_eager_tensor(dims, ctx, dtypes.int32); var dims_t = convert_to_eager_tensor(dims, ctx, dtypes.int32);
var inputs_flat = new[] { dims_t, value }; var inputs_flat = new[] { dims_t, value };
var attrs = new object[] { "T", attr_t, "index_type", TF_DataType.TF_INT32 }; var attrs = new object[] { "T", attr_t, "index_type", TF_DataType.TF_INT32 };
var result = _execute.execute(ctx, "Fill", inputs_flat, attrs);
return result;
var result = _execute.execute(ctx, "Fill", 1, inputs_flat, attrs);
return result[0];
} }


private static EagerTensor convert_to_eager_tensor(object value, Context ctx, TF_DataType dtype = TF_DataType.DtInvalid) private static EagerTensor convert_to_eager_tensor(object value, Context ctx, TF_DataType dtype = TF_DataType.DtInvalid)
{ {
// convert data type
if (dtype != TF_DataType.DtInvalid &&
value.GetType().Name != "NDArray" &&
value.GetType().BaseType.Name != "Array" &&
dtypes.as_base_dtype(dtype) != dtypes.as_dtype(value.GetType()))
{
switch (dtype)
{
case TF_DataType.TF_FLOAT:
value = Convert.ToSingle(value);
break;
case TF_DataType.TF_INT64:
value = Convert.ToInt64(value);
break;
default:
break;
}
}

switch (value) switch (value)
{ {
case NDArray val: case NDArray val:
@@ -125,8 +144,12 @@ namespace Tensorflow
return new EagerTensor(val, ctx.device_name); return new EagerTensor(val, ctx.device_name);
case int[,] val: case int[,] val:
return new EagerTensor(val, ctx.device_name); return new EagerTensor(val, ctx.device_name);
case long val:
return new EagerTensor(val, ctx.device_name);
case float val: case float val:
return new EagerTensor(val, ctx.device_name); return new EagerTensor(val, ctx.device_name);
case float[,] val:
return new EagerTensor(val, ctx.device_name);
case double val: case double val:
return new EagerTensor(val, ctx.device_name); return new EagerTensor(val, ctx.device_name);
case float[] val: case float[] val:


+ 1
- 0
src/TensorFlowNET.Core/Tensors/dtypes.cs View File

@@ -202,6 +202,7 @@ namespace Tensorflow
TF_DataType.TF_INT32 => "int32", TF_DataType.TF_INT32 => "int32",
TF_DataType.TF_FLOAT => "float32", TF_DataType.TF_FLOAT => "float32",
TF_DataType.TF_BOOL => "bool", TF_DataType.TF_BOOL => "bool",
TF_DataType.TF_RESOURCE => "resource",
_ => type.ToString() _ => type.ToString()
}; };




+ 4
- 0
src/TensorFlowNET.Core/Tensors/tensor_util.cs View File

@@ -19,6 +19,7 @@ using System;
using System.Linq; using System.Linq;
using NumSharp.Utilities; using NumSharp.Utilities;
using System.Text; using System.Text;
using Tensorflow.Eager;


namespace Tensorflow namespace Tensorflow
{ {
@@ -39,6 +40,9 @@ namespace Tensorflow
/// <returns></returns> /// <returns></returns>
public static NDArray constant_value(Tensor tensor, bool partial = false) public static NDArray constant_value(Tensor tensor, bool partial = false)
{ {
if (tensor is EagerTensor)
return tensor.numpy();

NDArray ret = _ConstantValue(tensor, partial); NDArray ret = _ConstantValue(tensor, partial);
if (!(ret is null)) if (!(ret is null))
tensor.graph.prevent_feeding(tensor); tensor.graph.prevent_feeding(tensor);


+ 1
- 0
src/TensorFlowNET.Core/Tensors/tf.constant.cs View File

@@ -15,6 +15,7 @@
******************************************************************************/ ******************************************************************************/


using NumSharp; using NumSharp;
using Tensorflow.Eager;


namespace Tensorflow namespace Tensorflow
{ {


+ 1
- 1
src/TensorFlowNET.Core/Training/AdamOptimizer.cs View File

@@ -111,7 +111,7 @@ namespace Tensorflow.Train


protected override void _create_slots(RefVariable[] var_list) protected override void _create_slots(RefVariable[] var_list)
{ {
var first_var = var_list.OrderBy(x => x.name).First();
var first_var = var_list.OrderBy(x => x.Name).First();
_create_non_slot_variable(initial_value: _beta1, name: "beta1_power", colocate_with: first_var); _create_non_slot_variable(initial_value: _beta1, name: "beta1_power", colocate_with: first_var);
_create_non_slot_variable(initial_value: _beta2, name: "beta2_power", colocate_with: first_var); _create_non_slot_variable(initial_value: _beta2, name: "beta2_power", colocate_with: first_var);




+ 10
- 10
src/TensorFlowNET.Core/Training/Optimizer.cs View File

@@ -44,7 +44,7 @@ namespace Tensorflow
public Tensor LearningRateTensor => _lr_t; public Tensor LearningRateTensor => _lr_t;
public bool _use_locking; public bool _use_locking;
public Dictionary<string, Dictionary<string, RefVariable>> _slots; public Dictionary<string, Dictionary<string, RefVariable>> _slots;
public Dictionary<string, VariableV1> _non_slot_dict;
public Dictionary<string, IVariableV1> _non_slot_dict;
public Dictionary<string, object> _deferred_slot_restorations; public Dictionary<string, object> _deferred_slot_restorations;
SlotCreator slot_creator = new SlotCreator(); SlotCreator slot_creator = new SlotCreator();


@@ -58,7 +58,7 @@ namespace Tensorflow
_lr = learning_rate; _lr = learning_rate;
// Dictionary of slots. // Dictionary of slots.
_slots = new Dictionary<string, Dictionary<string, RefVariable>>(); _slots = new Dictionary<string, Dictionary<string, RefVariable>>();
_non_slot_dict = new Dictionary<string, VariableV1>();
_non_slot_dict = new Dictionary<string, IVariableV1>();
_deferred_slot_restorations = new Dictionary<string, object>(); _deferred_slot_restorations = new Dictionary<string, object>();
} }


@@ -72,7 +72,7 @@ namespace Tensorflow
_lr_t = learning_rate; _lr_t = learning_rate;
// Dictionary of slots. // Dictionary of slots.
_slots = new Dictionary<string, Dictionary<string, RefVariable>>(); _slots = new Dictionary<string, Dictionary<string, RefVariable>>();
_non_slot_dict = new Dictionary<string, VariableV1>();
_non_slot_dict = new Dictionary<string, IVariableV1>();
_deferred_slot_restorations = new Dictionary<string, object>(); _deferred_slot_restorations = new Dictionary<string, object>();
} }


@@ -122,7 +122,7 @@ namespace Tensorflow
var vars_with_grad = grads_and_vars.Where(x => x.Item1 != null).Select(x => x.Item2).ToArray(); var vars_with_grad = grads_and_vars.Where(x => x.Item1 != null).Select(x => x.Item2).ToArray();
if (vars_with_grad.Length == 0) if (vars_with_grad.Length == 0)
throw new ValueError($"No gradients provided for any variable, check your graph for ops" + throw new ValueError($"No gradients provided for any variable, check your graph for ops" +
$" that do not support gradients, between variables {string.Join(",", vars_with_grad.Select(x => x.name))} and loss {loss}.");
$" that do not support gradients, between variables {string.Join(",", vars_with_grad.Select(x => x.Name))} and loss {loss}.");


return apply_gradients(grads_and_vars, global_step:global_step, name:name); return apply_gradients(grads_and_vars, global_step:global_step, name:name);
} }
@@ -175,7 +175,7 @@ namespace Tensorflow
if (grad == null) if (grad == null)
continue; continue;


var scope_name = var.op.name;
var scope_name = var.Op.name;
tf_with(ops.name_scope("update_" + scope_name), scope2 => tf_with(ops.name_scope("update_" + scope_name), scope2 =>
{ {
var op = processor.update_op(this, grad); var op = processor.update_op(this, grad);
@@ -241,10 +241,10 @@ namespace Tensorflow
/// <param name="initial_value"></param> /// <param name="initial_value"></param>
/// <param name="name"></param> /// <param name="name"></param>
/// <param name="colocate_with"></param> /// <param name="colocate_with"></param>
protected VariableV1 _create_non_slot_variable(float initial_value, string name, RefVariable colocate_with)
protected IVariableV1 _create_non_slot_variable(float initial_value, string name, RefVariable colocate_with)
{ {
// Recommendation: Use OptimizerV2 if your optimizer uses non-slot variables. // Recommendation: Use OptimizerV2 if your optimizer uses non-slot variables.
var graph = colocate_with.graph;
var graph = colocate_with.Graph;
var key = $"{name}.{graph.graph_key}"; var key = $"{name}.{graph.graph_key}";
var v = _non_slot_dict.ContainsKey(key) ? _non_slot_dict[key] : null; var v = _non_slot_dict.ContainsKey(key) ? _non_slot_dict[key] : null;
if(v == null) if(v == null)
@@ -333,10 +333,10 @@ namespace Tensorflow


private string _var_key(RefVariable var) private string _var_key(RefVariable var)
{ {
return $"{var.op.graph.graph_key}.{var.op.name}";
return $"{var.Op.graph.graph_key}.{var.Op.name}";
} }


protected VariableV1 _get_non_slot_variable(string name, Graph graph = null)
protected IVariableV1 _get_non_slot_variable(string name, Graph graph = null)
{ {
var key = $"{name}.{graph.graph_key}"; var key = $"{name}.{graph.graph_key}";
var non_slot = _non_slot_dict.ContainsKey(key) ? _non_slot_dict[key] : null; var non_slot = _non_slot_dict.ContainsKey(key) ? _non_slot_dict[key] : null;
@@ -385,7 +385,7 @@ namespace Tensorflow
case List<RefVariable> values: case List<RefVariable> values:
var_list = values.Concat(vars).ToList(); var_list = values.Concat(vars).ToList();
break; break;
case List<VariableV1> values:
case List<IVariableV1> values:
var_list = values.Select(x => x as RefVariable).Concat(vars).ToList(); var_list = values.Select(x => x as RefVariable).Concat(vars).ToList();
break; break;
} }


+ 1
- 1
src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs View File

@@ -79,7 +79,7 @@ namespace Tensorflow
return gen_io_ops.restore_v2(filename_tensor, names.ToArray(), slices.ToArray(), dtypes.ToArray()); return gen_io_ops.restore_v2(filename_tensor, names.ToArray(), slices.ToArray(), dtypes.ToArray());
} }


public virtual SaverDef _build_internal(VariableV1[] names_to_saveables,
public virtual SaverDef _build_internal(IVariableV1[] names_to_saveables,
bool reshape = false, bool reshape = false,
bool sharded = false, bool sharded = false,
int max_to_keep = 5, int max_to_keep = 5,


+ 1
- 1
src/TensorFlowNET.Core/Training/Saving/ISaverBuilder.cs View File

@@ -22,7 +22,7 @@ namespace Tensorflow


Tensor[] bulk_restore(Tensor filename_tensor, SaveableObject[] saveables, int preferred_shard, bool restore_sequentially); Tensor[] bulk_restore(Tensor filename_tensor, SaveableObject[] saveables, int preferred_shard, bool restore_sequentially);


SaverDef _build_internal(VariableV1[] names_to_saveables,
SaverDef _build_internal(IVariableV1[] names_to_saveables,
bool reshape = false, bool reshape = false,
bool sharded = false, bool sharded = false,
int max_to_keep = 5, int max_to_keep = 5,


+ 2
- 2
src/TensorFlowNET.Core/Training/Saving/Saver.cs View File

@@ -29,7 +29,7 @@ namespace Tensorflow
/// </summary> /// </summary>
public class Saver public class Saver
{ {
private VariableV1[] _var_list;
private IVariableV1[] _var_list;
private bool _reshape; private bool _reshape;
private bool _sharded; private bool _sharded;
private int _max_to_keep; private int _max_to_keep;
@@ -50,7 +50,7 @@ namespace Tensorflow
private Dictionary<string, float> _last_checkpoints; private Dictionary<string, float> _last_checkpoints;
private Dictionary<string, float> _checkpoints_to_be_deleted; private Dictionary<string, float> _checkpoints_to_be_deleted;


public Saver(VariableV1[] var_list = null,
public Saver(IVariableV1[] var_list = null,
bool reshape = false, bool reshape = false,
bool sharded = false, bool sharded = false,
int max_to_keep = 5, int max_to_keep = 5,


+ 5
- 5
src/TensorFlowNET.Core/Training/Saving/saveable_object_util.py.cs View File

@@ -28,7 +28,7 @@ namespace Tensorflow
/// </summary> /// </summary>
/// <param name="names_to_saveables"></param> /// <param name="names_to_saveables"></param>
/// <returns></returns> /// <returns></returns>
public static SaveableObject[] validate_and_slice_inputs(VariableV1[] names_to_saveables)
public static SaveableObject[] validate_and_slice_inputs(IVariableV1[] names_to_saveables)
{ {
var names_to_saveables_dict = op_list_to_dict(names_to_saveables); var names_to_saveables_dict = op_list_to_dict(names_to_saveables);
var saveables = new List<SaveableObject>(); var saveables = new List<SaveableObject>();
@@ -76,9 +76,9 @@ namespace Tensorflow
} }
} }


public static Dictionary<string, Tensor> op_list_to_dict(VariableV1[] op_list, bool convert_variable_to_tensor = true)
public static Dictionary<string, Tensor> op_list_to_dict(IVariableV1[] op_list, bool convert_variable_to_tensor = true)
{ {
op_list = op_list.OrderBy(x => x.name).ToArray();
op_list = op_list.OrderBy(x => x.Name).ToArray();
var names_to_saveables = new Dictionary<string, Tensor>(); var names_to_saveables = new Dictionary<string, Tensor>();


foreach(var var in op_list) foreach(var var in op_list)
@@ -103,7 +103,7 @@ namespace Tensorflow
if (convert_variable_to_tensor) if (convert_variable_to_tensor)
{ {
if (var is ResourceVariable) if (var is ResourceVariable)
tensor = var.graph_element;
tensor = var.GraphElement;
else else
tensor = ops.internal_convert_to_tensor(var, as_ref: true); tensor = ops.internal_convert_to_tensor(var, as_ref: true);
} }
@@ -111,7 +111,7 @@ namespace Tensorflow
if (tensor.op.type == "ReadVariableOp") if (tensor.op.type == "ReadVariableOp")
name = tensor.op.inputs[0].op.name; name = tensor.op.inputs[0].op.name;
else else
name = var.op.name;
name = var.Op.name;


if (names_to_saveables.ContainsKey(name)) if (names_to_saveables.ContainsKey(name))
throw new ValueError($"At least two variables have the same name: {name}"); throw new ValueError($"At least two variables have the same name: {name}");


+ 2
- 2
src/TensorFlowNET.Core/Training/Saving/saver.py.cs View File

@@ -53,7 +53,7 @@ namespace Tensorflow
/// <returns></returns> /// <returns></returns>
public static Saver _create_saver_from_imported_meta_graph(MetaGraphDef meta_graph_def, public static Saver _create_saver_from_imported_meta_graph(MetaGraphDef meta_graph_def,
string import_scope, string import_scope,
Dictionary<string, VariableV1> imported_vars)
Dictionary<string, IVariableV1> imported_vars)
{ {
if(meta_graph_def.SaverDef != null) if(meta_graph_def.SaverDef != null)
{ {
@@ -64,7 +64,7 @@ namespace Tensorflow
{ {
var sample_key = var_names[0]; var sample_key = var_names[0];
var sample_var = imported_vars[sample_key]; var sample_var = imported_vars[sample_key];
scope = string.Join("", sample_var.name.Skip(sample_key.Length));
scope = string.Join("", sample_var.Name.Skip(sample_key.Length));
} }
return new Saver(saver_def: meta_graph_def.SaverDef, name: scope); return new Saver(saver_def: meta_graph_def.SaverDef, name: scope);
} }


+ 3
- 3
src/TensorFlowNET.Core/Training/SlotCreator.cs View File

@@ -33,7 +33,7 @@ namespace Tensorflow.Train
public RefVariable create_slot(RefVariable primary, Tensor val, string name, bool colocate_with_primary = true) public RefVariable create_slot(RefVariable primary, Tensor val, string name, bool colocate_with_primary = true)
{ {
var validate_shape = val.TensorShape.is_fully_defined(); var validate_shape = val.TensorShape.is_fully_defined();
var prefix = primary.op.name;
var prefix = primary.Op.name;
return tf_with(tf.variable_scope(name: null, prefix + "/" + name), delegate return tf_with(tf.variable_scope(name: null, prefix + "/" + name), delegate
{ {
return _create_slot_var(primary, val, "", validate_shape, null, TF_DataType.DtInvalid); return _create_slot_var(primary, val, "", validate_shape, null, TF_DataType.DtInvalid);
@@ -74,7 +74,7 @@ namespace Tensorflow.Train
TF_DataType dtype, string name, bool colocate_with_primary = true) TF_DataType dtype, string name, bool colocate_with_primary = true)
{ {
var validate_shape = shape.is_fully_defined(); var validate_shape = shape.is_fully_defined();
var prefix = primary.op.name;
var prefix = primary.Op.name;
return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate
{ {
return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype); return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype);
@@ -91,7 +91,7 @@ namespace Tensorflow.Train
/// <param name="shape"></param> /// <param name="shape"></param>
/// <param name="dtype"></param> /// <param name="dtype"></param>
/// <returns></returns> /// <returns></returns>
private RefVariable _create_slot_var(VariableV1 primary, object val, string scope, bool validate_shape,
private RefVariable _create_slot_var(IVariableV1 primary, object val, string scope, bool validate_shape,
TensorShape shape, TF_DataType dtype) TensorShape shape, TF_DataType dtype)
{ {
bool use_resource = primary is ResourceVariable; bool use_resource = primary is ResourceVariable;


+ 18
- 7
src/TensorFlowNET.Core/Training/Trackable.cs View File

@@ -15,6 +15,7 @@
******************************************************************************/ ******************************************************************************/


using System; using System;
using static Tensorflow.Binding;


namespace Tensorflow.Train namespace Tensorflow.Train
{ {
@@ -26,16 +27,26 @@ namespace Tensorflow.Train
/// Restore-on-create for a variable be saved with this `Checkpointable`. /// Restore-on-create for a variable be saved with this `Checkpointable`.
/// </summary> /// </summary>
/// <returns></returns> /// <returns></returns>
protected virtual VariableV1 _add_variable_with_custom_getter(string name,
protected virtual IVariableV1 _add_variable_with_custom_getter(string name,
int[] shape, int[] shape,
TF_DataType dtype = TF_DataType.TF_FLOAT, TF_DataType dtype = TF_DataType.TF_FLOAT,
IInitializer initializer = null, IInitializer initializer = null,
Func<string, int[], TF_DataType, IInitializer, bool, VariableV1> getter = null,
Func<string, int[], TF_DataType, IInitializer, bool, IVariableV1> getter = null,
bool overwrite = false, bool overwrite = false,
bool trainable = false)
bool trainable = false,
bool use_resource = false,
VariableSynchronization synchronization = VariableSynchronization.Auto,
VariableAggregation aggregation = VariableAggregation.None)
{ {
var checkpoint_initializer = true;
var new_variable = getter(name, shape, dtype, initializer, trainable);
ops.init_scope();
IInitializer checkpoint_initializer = null;
if (tf.context.executing_eagerly())
;
else
checkpoint_initializer = null;

IVariableV1 new_variable;
new_variable = getter(name, shape, dtype, initializer, trainable);


// If we set an initializer and the variable processed it, tracking will not // If we set an initializer and the variable processed it, tracking will not
// assign again. It will add this variable to our dependencies, and if there // assign again. It will add this variable to our dependencies, and if there
@@ -53,13 +64,13 @@ namespace Tensorflow.Train
/// </summary> /// </summary>
/// <param name="name"></param> /// <param name="name"></param>
/// <param name="trackable"></param> /// <param name="trackable"></param>
protected void _handle_deferred_dependencies(string name, VariableV1 trackable)
protected void _handle_deferred_dependencies(string name, IVariableV1 trackable)
{ {
_maybe_initialize_trackable(); _maybe_initialize_trackable();
// TODO // TODO
} }


protected VariableV1 _track_checkpointable(VariableV1 checkpointable, string name, bool overwrite = false)
protected IVariableV1 _track_checkpointable(IVariableV1 checkpointable, string name, bool overwrite = false)
{ {
return checkpointable; return checkpointable;
} }


+ 1
- 1
src/TensorFlowNET.Core/Training/TrainingUtil.cs View File

@@ -62,7 +62,7 @@ namespace Tensorflow.Train


var g = graph.as_default(); var g = graph.as_default();
g.name_scope(null); g.name_scope(null);
g.name_scope(global_step_tensor.op.name + "/");
g.name_scope(global_step_tensor.Op.name + "/");
// using initialized_value to ensure that global_step is initialized before // using initialized_value to ensure that global_step is initialized before
// this run. This is needed for example Estimator makes all model_fn build // this run. This is needed for example Estimator makes all model_fn build
// under global_step_read_tensor dependency. // under global_step_read_tensor dependency.


src/TensorFlowNET.Core/Training/gen_training_ops.py.cs → src/TensorFlowNET.Core/Training/gen_training_ops.cs View File

@@ -14,6 +14,11 @@
limitations under the License. limitations under the License.
******************************************************************************/ ******************************************************************************/


using System;
using System.Linq;
using Tensorflow.Eager;
using static Tensorflow.Binding;

namespace Tensorflow namespace Tensorflow
{ {
public class gen_training_ops public class gen_training_ops
@@ -55,5 +60,33 @@ namespace Tensorflow


return _op.outputs[0]; return _op.outputs[0];
} }

public static Operation resource_apply_gradient_descent(EagerTensor var, EagerTensor alpha, EagerTensor delta, bool use_locking = false, string name = null)
{
if (tf.context.executing_eagerly())
{
using Status status = new Status(c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"ResourceApplyGradientDescent", name, new IntPtr[]
{
var,
alpha,
delta
}, 3,
op => wrap_tfe_src.SetOpAttrs(op, "use_locking", use_locking),
null, 0));
status.Check(true);
return null;
}

var _op = _op_def_lib._apply_op_helper("ResourceApplyGradientDescent", name, new
{
var,
alpha,
delta,
use_locking
});

return _op.outputs[0];
}
} }
} }

+ 50
- 0
src/TensorFlowNET.Core/Util/BindingArray.cs View File

@@ -0,0 +1,50 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Runtime.InteropServices;

namespace Tensorflow
{
public class BindingArray : DisposableObject
{
TF_BindingArray data;
public IntPtr Address => data.array;
public int Length => data.length;

public BindingArray(IntPtr handle) : base(handle)
{
if (_handle != IntPtr.Zero)
data = Marshal.PtrToStructure<TF_BindingArray>(_handle);
else
data = default;
}

public static implicit operator BindingArray(IntPtr handle)
=> new BindingArray(handle);

public unsafe IntPtr this[int index]
=> data[index];

public unsafe IntPtr[] Data
=> data.Data;

protected override void DisposeUnmanagedResources(IntPtr handle)
{
c_api.TF_DeleteBindingArray(_handle);
}
}
}

+ 50
- 0
src/TensorFlowNET.Core/Util/BindingTensorArray.cs View File

@@ -0,0 +1,50 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Runtime.InteropServices;

namespace Tensorflow
{
public class BindingTensorArray : DisposableObject
{
TF_BindingArray data;
public IntPtr Address => data.array;
public int Length => data.length;

public BindingTensorArray(IntPtr handle) : base(handle)
{
if (_handle != IntPtr.Zero)
data = Marshal.PtrToStructure<TF_BindingArray>(_handle);
else
data = default;
}

public static implicit operator BindingTensorArray(IntPtr handle)
=> new BindingTensorArray(handle);

public unsafe IntPtr this[int index]
=> data[index];

public unsafe IntPtr[] Data
=> data.Data;

protected override void DisposeUnmanagedResources(IntPtr handle)
{
c_api.TFE_DeleteBindingTensorArray(_handle);
}
}
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save