Browse Source

Merge branch 'master' into v0.20-tensorflow2.0

tags/v0.20
Oceania2018 5 years ago
parent
commit
0c2ddfe36f
100 changed files with 3102 additions and 296 deletions
  1. +1
    -0
      .gitignore
  2. +3
    -3
      README.md
  3. +79
    -11
      TensorFlow.NET.sln
  4. +2
    -1
      docs/source/ImageRecognition.md
  5. +5
    -0
      docs/source/MnistInRnn.md
  6. +13
    -1
      src/TensorFlowNET.Core/APIs/tf.data_flow.cs
  7. +1
    -1
      src/TensorFlowNET.Core/APIs/tf.graph.cs
  8. +2
    -1
      src/TensorFlowNET.Core/APIs/tf.io.cs
  9. +2
    -1
      src/TensorFlowNET.Core/APIs/tf.layers.cs
  10. +5
    -4
      src/TensorFlowNET.Core/APIs/tf.math.cs
  11. +4
    -0
      src/TensorFlowNET.Core/APIs/tf.nn.cs
  12. +35
    -0
      src/TensorFlowNET.Core/APIs/tf.scan.cs
  13. +6
    -0
      src/TensorFlowNET.Core/APIs/tf.train.cs
  14. +0
    -3
      src/TensorFlowNET.Core/Binding.FuncTools.cs
  15. +4
    -1
      src/TensorFlowNET.Core/Binding.Util.cs
  16. +52
    -4
      src/TensorFlowNET.Core/Framework/importer.cs
  17. +13
    -1
      src/TensorFlowNET.Core/Framework/meta_graph.cs
  18. +10
    -0
      src/TensorFlowNET.Core/Framework/tensor_shape.cs
  19. +27
    -0
      src/TensorFlowNET.Core/Gradients/array_grad.cs
  20. +73
    -4
      src/TensorFlowNET.Core/Gradients/math_grad.cs
  21. +31
    -26
      src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs
  22. +46
    -0
      src/TensorFlowNET.Core/GraphTransformation/GraphTransformer.cs
  23. +33
    -0
      src/TensorFlowNET.Core/GraphTransformation/c_api.transform_graph.cs
  24. +2
    -2
      src/TensorFlowNET.Core/Graphs/Graph.Import.cs
  25. +6
    -0
      src/TensorFlowNET.Core/Graphs/Graph.cs
  26. +54
    -5
      src/TensorFlowNET.Core/Graphs/TF_ImportGraphDefResults.cs
  27. +7
    -0
      src/TensorFlowNET.Core/Interfaces/IFromMergeVars.cs
  28. +2
    -1
      src/TensorFlowNET.Core/Keras/Layers/Dense.cs
  29. +10
    -3
      src/TensorFlowNET.Core/Keras/Optimizers/PolynomialDecay.cs
  30. +4
    -2
      src/TensorFlowNET.Core/Layers/Layer.cs
  31. +5
    -7
      src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs
  32. +2
    -0
      src/TensorFlowNET.Core/Operations/Initializers/GlorotUniform.cs
  33. +172
    -0
      src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
  34. +2
    -1
      src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
  35. +13
    -1
      src/TensorFlowNET.Core/Operations/NnOps/BodyItemInRnnWhileLoop.cs
  36. +35
    -0
      src/TensorFlowNET.Core/Operations/NnOps/LSTMStateTuple.cs
  37. +0
    -0
      src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs
  38. +18
    -14
      src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs
  39. +149
    -2
      src/TensorFlowNET.Core/Operations/NnOps/rnn.cs
  40. +0
    -4
      src/TensorFlowNET.Core/Operations/Operation.Control.cs
  41. +25
    -0
      src/TensorFlowNET.Core/Operations/array_ops.cs
  42. +45
    -0
      src/TensorFlowNET.Core/Operations/clip_ops.cs
  43. +1
    -1
      src/TensorFlowNET.Core/Operations/control_flow_ops.cs
  44. +67
    -0
      src/TensorFlowNET.Core/Operations/ctc_ops.cs
  45. +238
    -0
      src/TensorFlowNET.Core/Operations/functional_ops.cs
  46. +1
    -1
      src/TensorFlowNET.Core/Operations/gen_array_ops.cs
  47. +38
    -0
      src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs
  48. +13
    -0
      src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs
  49. +0
    -0
      src/TensorFlowNET.Core/Operations/gen_image_ops.cs
  50. +1
    -1
      src/TensorFlowNET.Core/Operations/gen_io_ops.cs
  51. +110
    -10
      src/TensorFlowNET.Core/Operations/map_fn.cs
  52. +18
    -4
      src/TensorFlowNET.Core/Operations/math_ops.cs
  53. +1
    -1
      src/TensorFlowNET.Core/Operations/random_ops.cs
  54. +10
    -2
      src/TensorFlowNET.Core/Sessions/BaseSession.cs
  55. +1
    -2
      src/TensorFlowNET.Core/Sessions/Session.cs
  56. +0
    -33
      src/TensorFlowNET.Core/Sessions/_ElementFetchMapper.cs
  57. +3
    -0
      src/TensorFlowNET.Core/Sessions/_FetchHandler.cs
  58. +1
    -3
      src/TensorFlowNET.Core/Status/Status.cs
  59. +11
    -11
      src/TensorFlowNET.Core/TensorFlow.Binding.csproj
  60. +6
    -0
      src/TensorFlowNET.Core/Tensors/Dimension.cs
  61. +411
    -0
      src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs
  62. +0
    -14
      src/TensorFlowNET.Core/Tensors/Tensor.Implicit.cs
  63. +2
    -3
      src/TensorFlowNET.Core/Tensors/Tensor.cs
  64. +1
    -1
      src/TensorFlowNET.Core/Tensors/TensorShape.cs
  65. +1
    -1
      src/TensorFlowNET.Core/Tensors/dtypes.cs
  66. +2
    -1
      src/TensorFlowNET.Core/Tensors/tensor_util.cs
  67. +8
    -6
      src/TensorFlowNET.Core/Training/Saving/Saver.cs
  68. +20
    -21
      src/TensorFlowNET.Core/Training/Saving/checkpoint_management.py.cs
  69. +36
    -3
      src/TensorFlowNET.Core/Training/Saving/saver.py.cs
  70. +0
    -8
      src/TensorFlowNET.Core/Util/nest.py.cs
  71. +5
    -0
      src/TensorFlowNET.Core/Variables/VariableScope.cs
  72. +1
    -0
      src/TensorFlowNET.Core/Variables/_ReuseMode.cs
  73. +0
    -1
      src/TensorFlowNET.Core/Variables/variables.py.cs
  74. +0
    -60
      src/TensorFlowNET.Core/ops.cs
  75. +152
    -0
      src/TensorFlowNET.Core/ops.threading.cs
  76. +7
    -4
      src/TensorFlowNET.Core/tensorflow.cs
  77. +53
    -0
      src/TensorFlowNET.Core/tensorflow.threading.cs
  78. +13
    -0
      src/TensorFlowNET.Hub/DataSetBase.cs
  79. +46
    -0
      src/TensorFlowNET.Hub/Datasets.cs
  80. +13
    -0
      src/TensorFlowNET.Hub/IDataSet.cs
  81. +14
    -0
      src/TensorFlowNET.Hub/IModelLoader.cs
  82. +88
    -0
      src/TensorFlowNET.Hub/MnistDataSet.cs
  83. +184
    -0
      src/TensorFlowNET.Hub/MnistModelLoader.cs
  84. +20
    -0
      src/TensorFlowNET.Hub/ModelLoadSetting.cs
  85. +5
    -0
      src/TensorFlowNET.Hub/README.md
  86. +27
    -0
      src/TensorFlowNET.Hub/Tensorflow.Hub.csproj
  87. +137
    -0
      src/TensorFlowNET.Hub/Utils.cs
  88. +10
    -0
      src/TensorFlowNET.Keras/Activations.cs
  89. +35
    -0
      src/TensorFlowNET.Keras/Applications/Densenet.cs
  90. +60
    -0
      src/TensorFlowNET.Keras/Applications/Efficientnet.cs
  91. +22
    -0
      src/TensorFlowNET.Keras/Applications/ImagenetUtils.cs
  92. +22
    -0
      src/TensorFlowNET.Keras/Applications/InceptionResnetV2.cs
  93. +19
    -0
      src/TensorFlowNET.Keras/Applications/InceptionV3.cs
  94. +18
    -0
      src/TensorFlowNET.Keras/Applications/Mobilenet.cs
  95. +21
    -0
      src/TensorFlowNET.Keras/Applications/MobilenetV2.cs
  96. +31
    -0
      src/TensorFlowNET.Keras/Applications/Nasnet.cs
  97. +41
    -0
      src/TensorFlowNET.Keras/Applications/Resnet.cs
  98. +25
    -0
      src/TensorFlowNET.Keras/Applications/ResnetV2.cs
  99. +17
    -0
      src/TensorFlowNET.Keras/Applications/Vgg16.cs
  100. +17
    -0
      src/TensorFlowNET.Keras/Applications/Vgg19.cs

+ 1
- 0
.gitignore View File

@@ -336,3 +336,4 @@ test/TensorFlowNET.Examples/mnist

# training model resources
.resources
/redist

+ 3
- 3
README.md View File

@@ -29,10 +29,10 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr

| TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.0 |
| ----------- | ------- | ------- | ------- | ------ |
| tf.net 0.12 | | x | | |
| tf.net 0.14 | | x | x | |
| tf.net 0.13 | | x | x | |
| tf.net 0.12 | x | x | | |
| tf.net 0.11 | x | x | | |
| tf.net 0.10 | x | x | | |
| tf.net 0.9 | x | | | |

Install TF.NET and TensorFlow binary through NuGet.
```sh


+ 79
- 11
TensorFlow.NET.sln View File

@@ -3,16 +3,24 @@ Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 16
VisualStudioVersion = 16.0.29102.190
MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlow.Binding", "src\TensorFlowNET.Core\TensorFlow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}"
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Benchmark", "src\TensorFlowNet.Benchmarks\Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}"
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "UnitTest", "test\TensorFlowNET.UnitTest\UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}"
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorFlowNET.Hub\Tensorflow.Hub.csproj", "{95B077C1-E21B-486F-8BDD-1C902FE687AB}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Debug|x64 = Debug|x64
Debug-Minimal|Any CPU = Debug-Minimal|Any CPU
Debug-Minimal|x64 = Debug-Minimal|x64
Publish|Any CPU = Publish|Any CPU
Publish|x64 = Publish|x64
Release|Any CPU = Release|Any CPU
@@ -23,6 +31,10 @@ Global
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU
@@ -35,10 +47,14 @@ Global
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU
@@ -47,14 +63,66 @@ Global
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.Build.0 = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.ActiveCfg = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.Build.0 = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.Build.0 = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.ActiveCfg = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.Build.0 = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.ActiveCfg = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.Build.0 = Debug|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.ActiveCfg = Release|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.Build.0 = Release|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.ActiveCfg = Release|Any CPU
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE


+ 2
- 1
docs/source/ImageRecognition.md View File

@@ -133,4 +133,5 @@ grace_hopper.jpg: 466 bulletproof vest, 0.005350832
2/18/2019 3:56:25 AM Completed InceptionArchGoogLeNet
```

You can find the full source code from [github](https://github.com/SciSharp/TensorFlow.NET/tree/master/test/TensorFlowNET.Examples/ImageProcess).
You can find the full source code from [github](https://github.com/SciSharp/TensorFlow.NET-Examples/tree/master/src/TensorFlowNET.Examples/ImageProcessing).


+ 5
- 0
docs/source/MnistInRnn.md View File

@@ -0,0 +1,5 @@
# Chapter. MNIST In RNN

### Recurrent Neural Networks

Recurrent Neural Networks (RNNs) are popular models that have shown great promise in sequential data classification task. The traditional neural network model cannot make the next prediction input based on the knowledge that has been learned before.

+ 13
- 1
src/TensorFlowNET.Core/APIs/tf.data_flow.cs View File

@@ -27,7 +27,19 @@ namespace Tensorflow
/// <param name="data"></param>
/// <param name="name"></param>
/// <returns></returns>
public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null)
public Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null)
=> gen_data_flow_ops.dynamic_stitch(indices, data, name: name);

/// <summary>
/// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
/// </summary>
/// <param name="data"></param>
/// <param name="partitions"></param>
/// <param name="num_partitions">The number of partitions to output.</param>
/// <param name="name"></param>
/// <returns></returns>
public Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions,
string name = null)
=> gen_data_flow_ops.dynamic_partition(data, partitions, num_partitions, name: name);
}
}

+ 1
- 1
src/TensorFlowNET.Core/APIs/tf.graph.cs View File

@@ -21,7 +21,7 @@ namespace Tensorflow
public partial class tensorflow
{
public graph_util_impl graph_util => new graph_util_impl();
public GraphTransformer graph_transforms => new GraphTransformer();
public GraphKeys GraphKeys { get; } = new GraphKeys();

public void reset_default_graph()


+ 2
- 1
src/TensorFlowNET.Core/APIs/tf.io.cs View File

@@ -23,8 +23,9 @@ namespace Tensorflow
{
public GFile gfile = new GFile();
public Tensor read_file(string filename, string name = null) => gen_io_ops.read_file(filename, name);
public Tensor read_file(Tensor filename, string name = null) => gen_io_ops.read_file(filename, name);

public void import_graph_def(GraphDef graph_def,
public ITensorOrOperation[] import_graph_def(GraphDef graph_def,
Dictionary<string, Tensor> input_map = null,
string[] return_elements = null,
string name = null,


+ 2
- 1
src/TensorFlowNET.Core/APIs/tf.layers.cs View File

@@ -177,7 +177,8 @@ namespace Tensorflow
use_bias: use_bias,
bias_initializer: bias_initializer,
kernel_initializer: kernel_initializer,
trainable: trainable);
trainable: trainable,
name: name);

return layer.apply(inputs).Item1;
}


+ 5
- 4
src/TensorFlowNET.Core/APIs/tf.math.cs View File

@@ -251,10 +251,11 @@ namespace Tensorflow
/// greater than <c>clip_value_max</c> are set to <c>clip_value_max</c>.
/// </remarks>
public Tensor clip_by_value (Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = "ClipByValue")
=> gen_ops.clip_by_value(t, clip_value_min, clip_value_max, name);
=> clip_ops.clip_by_value(t, clip_value_min, clip_value_max, name);
public Tensor sub<Tx, Ty>(Tx a, Ty b, string name = null)
=> gen_math_ops.sub(a, b, name: name);

public Tensor sub(Tensor a, Tensor b)
=> gen_math_ops.sub(a, b);

public Tensor divide(Tensor a, Tensor b)
=> gen_math_ops.real_div(a, b);
@@ -474,7 +475,7 @@ namespace Tensorflow
public Tensor reduce_mean(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null, int? reduction_indices = null)
=> math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices);

public Tensor reduce_mean(Tensor[] input_tensors, int axis, bool keepdims = false, string name = null)
public Tensor reduce_mean(Tensor[] input_tensors, int? axis = null, bool keepdims = false, string name = null)
=> math_ops.reduce_mean(input_tensors, axis: axis, keepdims: keepdims, name: name);

public Tensor round(Tensor x, string name = null)


+ 4
- 0
src/TensorFlowNET.Core/APIs/tf.nn.cs View File

@@ -46,6 +46,9 @@ namespace Tensorflow
return gen_nn_ops.conv2d(parameters);
}

public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null)
=> gen_ctc_ops.ctc_greedy_decoder(inputs, sequence_length, merge_repeated: merge_repeated, name: name);

/// <summary>
/// Computes dropout.
/// </summary>
@@ -112,6 +115,7 @@ namespace Tensorflow

public IActivation relu() => new relu();
public IActivation swish() => new swish();
public IActivation tanh() => new tanh();

public Tensor relu(Tensor features, string name = null) => gen_nn_ops.relu(features, name);



+ 35
- 0
src/TensorFlowNET.Core/APIs/tf.scan.cs View File

@@ -0,0 +1,35 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;

namespace Tensorflow
{
public partial class tensorflow
{
public Tensor scan(
Func<Tensor, Tensor, Tensor> fn,
Tensor elems,
Tensor initializer = null,
int parallel_iterations = 10,
bool back_prop = true,
bool swap_memory = false,
bool infer_shape = true,
bool reverse = false,
string name = null) => functional_ops.scan(fn, elems, initializer, parallel_iterations, back_prop,
swap_memory, infer_shape, reverse, name);
}
}

+ 6
- 0
src/TensorFlowNET.Core/APIs/tf.train.cs View File

@@ -53,6 +53,12 @@ namespace Tensorflow
public string write_graph(Graph graph, string logdir, string name, bool as_text = true)
=> graph_io.write_graph(graph, logdir, name, as_text);

public Graph load_graph(string freeze_graph_pb)
=> saver.load_graph(freeze_graph_pb);

public string freeze_graph(string checkpoint_dir, string output_pb_name, string[] output_node_names)
=> saver.freeze_graph(checkpoint_dir, output_pb_name, output_node_names);

public Saver import_meta_graph(string meta_graph_or_file,
bool clear_devices = false,
string import_scope = "") => saver._import_meta_graph_with_return_elements(meta_graph_or_file,


+ 0
- 3
src/TensorFlowNET.Core/Binding.FuncTools.cs View File

@@ -16,9 +16,6 @@ namespace Tensorflow
args = arg,
invoke = func
};

public static Func<Tin1, Tin2, Tout> partial<Tin1, Tin2, Tout>(Func<Tin1, Tin2, Tout> func, (Tin1, Tin2) args)
=> (arg1, arg2) => func(args.Item1, args.Item2);
}

public class PartialFunc<Tin, Tout>


+ 4
- 1
src/TensorFlowNET.Core/Binding.Util.cs View File

@@ -88,13 +88,16 @@ namespace Tensorflow
case ICollection arr:
return arr.Count;
case NDArray ndArray:
return ndArray.shape[0];
return ndArray.ndim == 0 ? 1 : ndArray.shape[0];
case IEnumerable enumerable:
return enumerable.OfType<object>().Count();
}
throw new NotImplementedException("len() not implemented for type: " + a.GetType());
}

public static float min(float a, float b)
=> Math.Min(a, b);

public static T[] list<T>(IEnumerable<T> list)
=> list.ToArray();



src/TensorFlowNET.Core/Framework/importer.py.cs → src/TensorFlowNET.Core/Framework/importer.cs View File

@@ -54,6 +54,7 @@ namespace Tensorflow
input_map = _ConvertInputMapValues(name, input_map);
});

TF_ImportGraphDefResults results = null;
var bytes = graph_def.ToByteString().ToArray();
using (var buffer = c_api_util.tf_buffer(bytes))
using (var scoped_options = c_api_util.ScopedTFImportGraphDefOptions())
@@ -61,9 +62,8 @@ namespace Tensorflow
{
_PopulateTFImportGraphDefOptions(scoped_options, prefix, input_map, return_elements);
// need to create a class ImportGraphDefWithResults with IDisposal
var results = c_api.TF_GraphImportGraphDefWithResults(graph, buffer, scoped_options, status);
results = c_api.TF_GraphImportGraphDefWithResults(graph, buffer, scoped_options, status);
status.Check(true);
c_api.TF_DeleteImportGraphDefResults(results);
}

_ProcessNewOps(graph);
@@ -71,7 +71,34 @@ namespace Tensorflow
if (return_elements == null)
return null;
else
throw new NotImplementedException("import_graph_def return_elements");
return _GatherReturnElements(return_elements, graph, results);
}

private static ITensorOrOperation[] _GatherReturnElements(string[] requested_return_elements,
Graph graph,
TF_ImportGraphDefResults results)
{
var return_outputs = results.return_tensors;
var return_opers = results.return_opers;

var combined_return_elements = new List<ITensorOrOperation>();
int outputs_idx = 0;
int opers_idx = 0;
foreach(var name in requested_return_elements)
{
if (name.Contains(":"))
{
combined_return_elements.append(graph.get_tensor_by_tf_output(return_outputs[outputs_idx]));
outputs_idx += 1;
}
else
{
throw new NotImplementedException("_GatherReturnElements");
// combined_return_elements.append(graph._get_operation_by_tf_operation(return_opers[opers_idx]));
}
}

return combined_return_elements.ToArray();
}

private static void _ProcessNewOps(Graph graph)
@@ -100,8 +127,29 @@ namespace Tensorflow

foreach (var name in return_elements)
{
throw new NotImplementedException("_PopulateTFImportGraphDefOptions");
if(name.Contains(":"))
{
var (op_name, index) = _ParseTensorName(name);
c_api.TF_ImportGraphDefOptionsAddReturnOutput(options, op_name, index);
}
else
{
c_api.TF_ImportGraphDefOptionsAddReturnOperation(options, name);
}
}

// c_api.TF_ImportGraphDefOptionsSetValidateColocationConstraints(options, validate_colocation_constraints);
}

private static (string, int) _ParseTensorName(string tensor_name)
{
var components = tensor_name.Split(':');
if (components.Length == 2)
return (components[0], int.Parse(components[1]));
else if (components.Length == 1)
return (components[0], 0);
else
throw new ValueError($"Cannot convert {tensor_name} to a tensor name.");
}

public static Dictionary<string, Tensor> _ConvertInputMapValues(string name, Dictionary<string, Tensor> input_map)

+ 13
- 1
src/TensorFlowNET.Core/Framework/meta_graph.cs View File

@@ -142,7 +142,8 @@ namespace Tensorflow

break;
default:
throw new NotImplementedException("import_scoped_meta_graph_with_return_elements");
Console.WriteLine($"Cannot identify data type for collection {col.Key}. Skipping.");
break;
}
}

@@ -267,6 +268,17 @@ namespace Tensorflow

switch (graph.get_collection(key))
{
case List<VariableV1> collection_list:
col_def.BytesList = new Types.BytesList();
foreach (var x in collection_list)
{
if(x is RefVariable x_ref_var)
{
var proto = x_ref_var.to_proto(export_scope);
col_def.BytesList.Value.Add(proto.ToByteString());
}
}
break;
case List<RefVariable> collection_list:
col_def.BytesList = new Types.BytesList();
foreach (var x in collection_list)


+ 10
- 0
src/TensorFlowNET.Core/Framework/tensor_shape.cs View File

@@ -24,6 +24,16 @@ namespace Tensorflow.Framework
}
}

public static Dimension dimension_at_index(TensorShape shape, int index)
{
return shape.rank < 0 ?
new Dimension(-1) :
new Dimension(shape.dims[index]);
}

public static int dimension_value(Dimension dimension)
=> dimension.value;

public static TensorShape as_shape(this Shape shape)
=> new TensorShape(shape.Dimensions);
}


+ 27
- 0
src/TensorFlowNET.Core/Gradients/array_grad.cs View File

@@ -231,6 +231,33 @@ namespace Tensorflow.Gradients
return new Tensor[] { x_grad, null };
}

[RegisterGradient("Split")]
public static Tensor[] _SplitGrad(Operation op, Tensor[] grads)
{
return new Tensor[] { null, array_ops.concat(list(grads), op.inputs[0]) };
}

[RegisterGradient("Slice")]
public static Tensor[] _SliceGrad(Operation op, Tensor[] grads)
{
var grad = grads[0];
var input_vec = op.inputs[0];
var begin_vec = op.inputs[1];
var input_rank = array_ops.rank(input_vec);
var slice_size = array_ops.shape(op.outputs[0]);

var shape = array_ops.stack(new Tensor[] { input_rank, new Tensor(1) });
var before_pad = array_ops.reshape(begin_vec, shape);
var after_pad = array_ops.reshape(array_ops.shape(input_vec) - slice_size - begin_vec, shape);
var paddings = array_ops.concat(new Tensor[] { before_pad, after_pad }, 1);
return new Tensor[]
{
array_ops.pad(grad, paddings),
null,
null
};
}

[RegisterGradient("Squeeze")]
public static Tensor[] _SqueezeGrad(Operation op, Tensor[] grads)
{


+ 73
- 4
src/TensorFlowNET.Core/Gradients/math_grad.cs View File

@@ -319,7 +319,7 @@ namespace Tensorflow.Gradients
[RegisterGradient("Maximum")]
public static Tensor[] _MaximumGrad(Operation op, Tensor[] grads)
{
return _MaximumMinimumGrad(op, grads[0]);
return _MaximumMinimumGrad(true, op, grads[0]);
}

/// <summary>
@@ -331,7 +331,7 @@ namespace Tensorflow.Gradients
[RegisterGradient("Minimum")]
public static Tensor[] _MinimumGrad(Operation op, Tensor[] grads)
{
return _MaximumMinimumGrad(op, grads[0]);
return _MaximumMinimumGrad(false, op, grads[0]);
}

/// <summary>
@@ -340,7 +340,7 @@ namespace Tensorflow.Gradients
/// <param name="op"></param>
/// <param name="grad"></param>
/// <returns></returns>
private static Tensor[] _MaximumMinimumGrad(Operation op, Tensor grad)
private static Tensor[] _MaximumMinimumGrad(bool isMaximum, Operation op, Tensor grad)
{
var x = op.inputs[0];
var y = op.inputs[1];
@@ -349,7 +349,10 @@ namespace Tensorflow.Gradients
var sy = array_ops.shape(y);
var gradshape = array_ops.shape(grad);
var zeros = array_ops.zeros(gradshape, gdtype);
var xmask = gen_math_ops.greater_equal(x, y);
var xmask =
isMaximum
? gen_math_ops.greater_equal(x, y)
: gen_math_ops.less_equal(x, y);
var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy);
var xgrad = array_ops.where(xmask, grad, zeros);
var ygrad = array_ops.where(xmask, zeros, grad);
@@ -512,6 +515,72 @@ namespace Tensorflow.Gradients
});
}

[RegisterGradient("Sqrt")]
public static Tensor[] _SqrtGrad(Operation op, Tensor[] grads)
{
var grad = grads[0];
var y = op.outputs[0];

return tf_with(ops.control_dependencies(grads), delegate
{
y = math_ops.conj(y);
var factor = constant_op.constant(0.5f, dtype: y.dtype);
return new Tensor[] { grad * (factor * math_ops.reciprocal(y)) };
});
}

[RegisterGradient("Sin")]
public static Tensor[] _SinGrad(Operation op, Tensor[] grads)
{
var grad = grads[0];
var x = op.inputs[0];

return tf_with(ops.control_dependencies(grads), delegate
{
x = math_ops.conj(x);
return new Tensor[] { math_ops.multiply(grad, gen_math_ops.cos(x)) };
});
}

[RegisterGradient("Sinh")]
public static Tensor[] _SinhGrad(Operation op, Tensor[] grads)
{
var grad = grads[0];
var x = op.inputs[0];

return tf_with(ops.control_dependencies(grads), delegate
{
x = math_ops.conj(x);
return new Tensor[] { math_ops.multiply(grad, gen_math_ops.cosh(x)) };
});
}

[RegisterGradient("Cos")]
public static Tensor[] _CosGrad(Operation op, Tensor[] grads)
{
var grad = grads[0];
var x = op.inputs[0];

return tf_with(ops.control_dependencies(grads), delegate
{
x = math_ops.conj(x);
return new Tensor[] { math_ops.multiply(grad, -gen_math_ops.sin(x)) };
});
}

[RegisterGradient("Cosh")]
public static Tensor[] _CoshGrad(Operation op, Tensor[] grads)
{
var grad = grads[0];
var x = op.inputs[0];

return tf_with(ops.control_dependencies(grads), delegate
{
x = math_ops.conj(x);
return new Tensor[] { math_ops.multiply(grad, gen_math_ops.sinh(x)) };
});
}

[RegisterGradient("Tanh")]
public static Tensor[] _TanhGrad(Operation op, Tensor[] grads)
{


+ 31
- 26
src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs View File

@@ -26,31 +26,8 @@ namespace Tensorflow
{
static Dictionary<string, Func<Operation, Tensor[], Tensor[]>> gradientFunctions = null;

/// <summary>
/// Regiter new gradient function
/// </summary>
/// <param name="name">operation type</param>
/// <param name="func">function delegate</param>
public static void RegisterGradientFunction(string name, Func<Operation, Tensor[], Tensor[]> func)
private static void RegisterFromAssembly()
{
if(gradientFunctions == null)
gradientFunctions = new Dictionary<string, Func<Operation, Tensor[], Tensor[]>>();

gradientFunctions[name] = func;
}

public static void RegisterNoGradientFunction(string name)
{
if (gradientFunctions == null)
gradientFunctions = new Dictionary<string, Func<Operation, Tensor[], Tensor[]>>();

gradientFunctions[name] = null;
}

public static Func<Operation, Tensor[], Tensor[]> get_gradient_function(Operation op)
{
if (op.inputs == null) return null;

if (gradientFunctions == null)
{
gradientFunctions = new Dictionary<string, Func<Operation, Tensor[], Tensor[]>>();
@@ -62,7 +39,8 @@ namespace Tensorflow

foreach (var g in gradGroups)
{
var methods = g.GetMethods().Where(x => x.GetCustomAttribute<RegisterGradient>() != null)
var methods = g.GetMethods()
.Where(x => x.GetCustomAttribute<RegisterGradient>() != null)
.ToArray();

foreach (var m in methods)
@@ -78,13 +56,40 @@ namespace Tensorflow
}

// REGISTER_NO_GRADIENT_OP
methods = g.GetMethods().Where(x => x.GetCustomAttribute<RegisterNoGradient>() != null)
methods = g.GetMethods()
.Where(x => x.GetCustomAttribute<RegisterNoGradient>() != null)
.ToArray();

foreach (var m in methods)
RegisterNoGradientFunction(m.GetCustomAttribute<RegisterNoGradient>().Name);
}
}
}

/// <summary>
/// Regiter new gradient function
/// </summary>
/// <param name="name">operation type</param>
/// <param name="func">function delegate</param>
public static void RegisterGradientFunction(string name, Func<Operation, Tensor[], Tensor[]> func)
{
RegisterFromAssembly();

gradientFunctions[name] = func;
}

public static void RegisterNoGradientFunction(string name)
{
RegisterFromAssembly();

gradientFunctions[name] = null;
}

public static Func<Operation, Tensor[], Tensor[]> get_gradient_function(Operation op)
{
if (op.inputs == null) return null;

RegisterFromAssembly();

if (!gradientFunctions.ContainsKey(op.type))
throw new LookupError($"can't get graident function through get_gradient_function {op.type}");


+ 46
- 0
src/TensorFlowNET.Core/GraphTransformation/GraphTransformer.cs View File

@@ -0,0 +1,46 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
using Google.Protobuf;

namespace Tensorflow
{
public class GraphTransformer
{
/// <summary>
/// Graph Transform Tool
/// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md
/// </summary>
/// <param name="input_graph_def">GraphDef object containing a model to be transformed</param>
/// <param name="inputs">the model inputs</param>
/// <param name="outputs">the model outputs</param>
/// <param name="transforms">transform names and parameters</param>
/// <returns></returns>
public GraphDef TransformGraph(GraphDef input_graph_def,
string[] inputs,
string[] outputs,
string[] transforms)
{
var input_graph_def_string = input_graph_def.ToByteArray();
var inputs_string = string.Join(",", inputs);
var outputs_string = string.Join(",", outputs);
var transforms_string = string.Join(" ", transforms);
using (var status = new Status())
{
var buffer = new Buffer();
var len = c_api.TransformGraphWithStringInputs(input_graph_def_string,
input_graph_def_string.Length,
inputs_string,
outputs_string,
transforms_string,
buffer,
status);

status.Check(false);
var bytes = buffer.ToArray();
return GraphDef.Parser.ParseFrom(bytes);
}
}
}
}

+ 33
- 0
src/TensorFlowNET.Core/GraphTransformation/c_api.transform_graph.cs View File

@@ -0,0 +1,33 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Runtime.InteropServices;

namespace Tensorflow
{
public partial class c_api
{
[DllImport(TensorFlowLibName)]
public static extern int TransformGraphWithStringInputs(byte[] graph_def_string,
int graph_def_string_len,
string inputs_string,
string outputs_string,
string transforms_string,
IntPtr output_buffer,
IntPtr status);
}
}

+ 2
- 2
src/TensorFlowNET.Core/Graphs/Graph.Import.cs View File

@@ -60,12 +60,12 @@ namespace Tensorflow
}
}

/*public Graph Import(string file_path, string name = null)
public Graph ImportGraphDef(string file_path, string name = null)
{
as_default();
var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(file_path));
importer.import_graph_def(graph_def, name: name);
return this;
}*/
}
}
}

+ 6
- 0
src/TensorFlowNET.Core/Graphs/Graph.cs View File

@@ -494,6 +494,12 @@ namespace Tensorflow
c_api.TF_DeleteGraph(handle);
}

public Tensor get_tensor_by_tf_output(TF_Output tf_output)
{
var op = _get_operation_by_tf_operation(tf_output.oper);
return op.outputs[tf_output.index];
}

/// <summary>
/// Returns the <see cref="Tensor"/> with the given <paramref name="name"/>.
/// This method may be called concurrently from multiple threads.


+ 54
- 5
src/TensorFlowNET.Core/Graphs/TF_ImportGraphDefResults.cs View File

@@ -3,13 +3,62 @@ using System.Runtime.InteropServices;

namespace Tensorflow
{
[StructLayout(LayoutKind.Sequential)]
public struct TF_ImportGraphDefResults
public class TF_ImportGraphDefResults : DisposableObject
{
public IntPtr return_tensors;
public IntPtr return_nodes;
/*public IntPtr return_nodes;
public IntPtr missing_unused_key_names;
public IntPtr missing_unused_key_indexes;
public IntPtr missing_unused_key_names_data;
public IntPtr missing_unused_key_names_data;*/

public TF_ImportGraphDefResults(IntPtr handle)
{
_handle = handle;
}

public TF_Output[] return_tensors
{
get
{
IntPtr return_output_handle = IntPtr.Zero;
int num_outputs = -1;
c_api.TF_ImportGraphDefResultsReturnOutputs(_handle, ref num_outputs, ref return_output_handle);
TF_Output[] return_outputs = new TF_Output[num_outputs];
unsafe
{
var tf_output_ptr = (TF_Output*)return_output_handle;
for (int i = 0; i < num_outputs; i++)
return_outputs[i] = *(tf_output_ptr + i);
return return_outputs;
}
}
}

public TF_Operation[] return_opers
{
get
{
return new TF_Operation[0];
/*TF_Operation return_output_handle = new TF_Operation();
int num_outputs = -1;
c_api.TF_ImportGraphDefResultsReturnOperations(_handle, ref num_outputs, ref return_output_handle);
TF_Operation[] return_outputs = new TF_Operation[num_outputs];
unsafe
{
var tf_output_ptr = (TF_Operation*)return_output_handle;
for (int i = 0; i < num_outputs; i++)
return_outputs[i] = *(tf_output_ptr + i);
return return_outputs;
}*/
}
}

public static implicit operator TF_ImportGraphDefResults(IntPtr handle)
=> new TF_ImportGraphDefResults(handle);

public static implicit operator IntPtr(TF_ImportGraphDefResults results)
=> results._handle;

protected override void DisposeUnmanagedResources(IntPtr handle)
=> c_api.TF_DeleteImportGraphDefResults(handle);
}
}

+ 7
- 0
src/TensorFlowNET.Core/Interfaces/IFromMergeVars.cs View File

@@ -0,0 +1,7 @@
namespace Tensorflow
{
public interface IFromMergeVars<T>
{
T FromMergeVars(ITensorOrTensorArray[] mergeVars);
}
}

+ 2
- 1
src/TensorFlowNET.Core/Keras/Layers/Dense.cs View File

@@ -35,10 +35,11 @@ namespace Tensorflow.Keras.Layers

public Dense(int units,
IActivation activation,
string name = null,
bool use_bias = true,
bool trainable = false,
IInitializer kernel_initializer = null,
IInitializer bias_initializer = null) : base(trainable: trainable)
IInitializer bias_initializer = null) : base(trainable: trainable, name: name)
{
this.units = units;
this.activation = activation;


+ 10
- 3
src/TensorFlowNET.Core/Keras/Optimizers/PolynomialDecay.cs View File

@@ -36,7 +36,7 @@ namespace Tensorflow.Keras.Optimizers

public Tensor __call__(RefVariable step)
{
tf_with(ops.name_scope(name ?? "PolynomialDecay"), scope =>
return tf_with(ops.name_scope(name ?? "PolynomialDecay"), scope =>
{
name = scope;
var initial_learning_rate_tensor = ops.convert_to_tensor(initial_learning_rate, name: "initial_learning_rate");
@@ -53,10 +53,17 @@ namespace Tensorflow.Keras.Optimizers
}
else
{

// Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps);
}

var p = tf.divide(global_step_recomp, decay_steps_recomp);
var pow = tf.pow(1 - p, power_tensor);
var m = math_ops.multiply(initial_learning_rate_tensor - end_learning_rate_tensor, pow);
return math_ops.add(m,
end_learning_rate_tensor,
name: name);
});
throw new NotImplementedException("");
}
}
}

+ 4
- 2
src/TensorFlowNET.Core/Layers/Layer.cs View File

@@ -65,7 +65,9 @@ namespace Tensorflow.Layers
variable_scope scope_context_manager = null;
if (built)
{

scope_context_manager = tf.variable_scope(_scope,
reuse: true,
auxiliary_name_scope: false);
}
else
{
@@ -181,7 +183,7 @@ namespace Tensorflow.Layers
return _current_scope.original_name_scope;
}

private void _set_scope(VariableScope scope = null)
protected void _set_scope(VariableScope scope = null)
{
if (_scope == null)
{


+ 5
- 7
src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs View File

@@ -118,7 +118,7 @@ namespace Tensorflow.Operations
Func<LoopVar<TItem>, LoopVar<TItem>> body,
LoopVar<TItem> loop_vars,
TensorShape[] shape_invariants,
bool return_same_structure)
bool return_same_structure) where TItem : IFromMergeVars<TItem>, new()
{
// Keep original_loop_vars to identify which are TensorArrays
var original_loop_vars = loop_vars;
@@ -178,7 +178,7 @@ namespace Tensorflow.Operations
Func<LoopVar<TItem>, LoopVar<TItem>> body,
LoopVar<TItem> original_loop_vars,
Tensor[] loop_vars,
TensorShape[] shape_invariants)
TensorShape[] shape_invariants) where TItem : IFromMergeVars<TItem>, new()
{
var flat_loop_vars = nest.flatten2(original_loop_vars)
.Select(x => (ITensorOrTensorArray)x)
@@ -235,11 +235,9 @@ namespace Tensorflow.Operations

// Build the graph for pred.
var merge_vars_with_tensor_arrays = _convert_flows_to_tensorarrays(flat_loop_vars, merge_vars);
//var packed_vars = nest.pack_sequence_as(original_loop_vars, merge_vars_with_tensor_arrays, expand_composites: true);
var packed_vars = new LoopVar<TItem>((Tensor)merge_vars_with_tensor_arrays[0],
(TItem)(object)new BodyItemInRnnWhileLoop((Tensor)merge_vars_with_tensor_arrays[1],
new[] { (TensorArray)merge_vars_with_tensor_arrays[2] },
(Tensor)merge_vars_with_tensor_arrays[3]));
var packed_vars = new LoopVar<TItem>(
(Tensor) merge_vars_with_tensor_arrays[0],
new TItem().FromMergeVars(merge_vars_with_tensor_arrays));
var pp = pred(packed_vars);
var c = ops.convert_to_tensor(pp);
_pivot = gen_control_flow_ops.loop_cond(c, name: "LoopCond");


+ 2
- 0
src/TensorFlowNET.Core/Operations/Initializers/GlorotUniform.cs View File

@@ -20,9 +20,11 @@ namespace Tensorflow.Operations.Initializers
{
public GlorotUniform(float scale = 1.0f,
string mode = "FAN_AVG",
bool uniform = true,
int? seed = null,
TF_DataType dtype = TF_DataType.TF_FLOAT) : base(factor: scale,
mode: mode,
uniform: uniform,
seed: seed,
dtype: dtype)
{


+ 172
- 0
src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs View File

@@ -0,0 +1,172 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using static Tensorflow.Binding;
using Tensorflow.Operations.Activation;
using Tensorflow.Keras.Engine;
using Tensorflow.Operations;

namespace Tensorflow
{
/// <summary>
/// Basic LSTM recurrent network cell.
/// The implementation is based on: http://arxiv.org/abs/1409.2329.
/// </summary>
public class BasicLstmCell : LayerRnnCell
{
int _num_units;
float _forget_bias;
bool _state_is_tuple;
IActivation _activation;
LSTMStateTuple _state;
VariableV1 _kernel;
VariableV1 _bias;
string _WEIGHTS_VARIABLE_NAME = "kernel";
string _BIAS_VARIABLE_NAME = "bias";

/// <summary>
/// Initialize the basic LSTM cell.
/// </summary>
/// <param name="num_units">The number of units in the LSTM cell.</param>
/// <param name="forget_bias"></param>
/// <param name="state_is_tuple"></param>
/// <param name="activation"></param>
/// <param name="reuse"></param>
/// <param name="name"></param>
/// <param name="dtype"></param>
public BasicLstmCell(int num_units, float forget_bias = 1.0f, bool state_is_tuple = true,
IActivation activation = null, bool? reuse = null, string name = null,
TF_DataType dtype = TF_DataType.DtInvalid) : base(_reuse: reuse, name: name, dtype: dtype)
{
input_spec = new InputSpec(ndim: 2);
_num_units = num_units;
_forget_bias = forget_bias;
_state_is_tuple = state_is_tuple;
_activation = activation;
if (_activation == null)
_activation = tf.nn.tanh();
}

protected override void build(TensorShape input_shape)
{
var input_depth = input_shape.dims.Last();
var h_depth = _num_units;
_kernel = add_weight(_WEIGHTS_VARIABLE_NAME,
shape: new[] { input_depth + h_depth, 4 * _num_units });
_bias = add_weight(_BIAS_VARIABLE_NAME,
shape: new[] { 4 * _num_units },
initializer: tf.zeros_initializer);
built = true;
}

public Tensor[] __call__(Tensor inputs, LSTMStateTuple state)
{
_state = state;
return base.__call__(inputs);
}

/// <summary>
/// Long short-term memory cell (LSTM).
/// </summary>
/// <param name="inputs"></param>
/// <param name="training"></param>
/// <param name="state"></param>
/// <returns></returns>
protected override Tensor[] call(Tensor inputs, Tensor training = null, Tensor state = null)
{
var one = constant_op.constant(1, dtype: dtypes.int32);
// Parameters of gates are concatenated into one multiply for efficiency.
Tensor c = null;
Tensor h = null;
if(_state_is_tuple)
(c, h) = ((Tensor)_state.c, (Tensor)_state.h);
else
{
// array_ops.split(value: state, num_or_size_splits: 2, axis: one);
throw new NotImplementedException("BasicLstmCell call");
}
var gate_inputs = math_ops.matmul(array_ops.concat(new[] { inputs, h }, 1), _kernel as RefVariable);
gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable);

// i = input_gate, j = new_input, f = forget_gate, o = output_gate
var tensors = array_ops.split(value: gate_inputs, num_or_size_splits: 4, axis: one);
var (i, j, f, o) = (tensors[0], tensors[1], tensors[2], tensors[3]);

var forget_bias_tensor = constant_op.constant(_forget_bias, dtype: f.dtype);
// Note that using `add` and `multiply` instead of `+` and `*` gives a
// performance improvement. So using those at the cost of readability.
var new_c = gen_math_ops.add(
math_ops.multiply(c, math_ops.sigmoid(gen_math_ops.add(f, forget_bias_tensor))),
math_ops.multiply(math_ops.sigmoid(i), _activation.Activate(j)));

var new_h = math_ops.multiply(_activation.Activate(new_c), math_ops.sigmoid(o));


if (_state_is_tuple)
return new[] { new_c, new_h };
else
return new[] { array_ops.concat(new[] { new_c, new_h }, 1) };
}

public override object get_initial_state(Tensor inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid)
{
if (inputs != null)
throw new NotImplementedException("get_initial_state input is not null");

return zero_state(batch_size, dtype);
}

/// <summary>
/// Return zero-filled state tensor(s).
/// </summary>
/// <param name="batch_size"></param>
/// <param name="dtype"></param>
/// <returns></returns>
private LSTMStateTuple zero_state(Tensor batch_size, TF_DataType dtype)
{
LSTMStateTuple output = null;
tf_with(ops.name_scope($"{GetType().Name}ZeroState", values: new { batch_size }), delegate
{
output = _zero_state_tensors(state_size, batch_size, dtype);
});

return output;
}

private LSTMStateTuple _zero_state_tensors(object state_size, Tensor batch_size, TF_DataType dtype)
{
if (state_size is LSTMStateTuple state_size_tuple)
{
var outputs = state_size_tuple.Flatten()
.Select(x => (int)x)
.Select(s =>
{
var c = rnn_cell_impl._concat(batch_size, s);
var size = array_ops.zeros(c, dtype: dtype);

var c_static = rnn_cell_impl._concat(batch_size, s, @static: true);
size.set_shape(c_static);

return size;
}).ToArray();

return new LSTMStateTuple(outputs[0], outputs[1]);
}

throw new NotImplementedException("_zero_state_tensors");
}

public override object state_size
{
get
{
if (_state_is_tuple)
return new LSTMStateTuple(_num_units, _num_units);
else
return 2 * _num_units;
}
}
}
}

src/TensorFlowNET.Core/Operations/BasicRNNCell.cs → src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs View File

@@ -16,6 +16,7 @@

using System;
using Tensorflow.Keras.Engine;
using Tensorflow.Operations;
using static Tensorflow.Binding;

namespace Tensorflow
@@ -25,7 +26,7 @@ namespace Tensorflow
int _num_units;
Func<Tensor, string, Tensor> _activation;

public override int state_size => _num_units;
public override object state_size => _num_units;
public override int output_size => _num_units;
public VariableV1 _kernel;
string _WEIGHTS_VARIABLE_NAME = "kernel";

+ 13
- 1
src/TensorFlowNET.Core/Operations/NnOps/BodyItemInRnnWhileLoop.cs View File

@@ -4,7 +4,7 @@ using System.Text;

namespace Tensorflow.Operations
{
internal class BodyItemInRnnWhileLoop : ICanBeFlattened, IPackable<BodyItemInRnnWhileLoop>
internal class BodyItemInRnnWhileLoop : ICanBeFlattened, IPackable<BodyItemInRnnWhileLoop>, IFromMergeVars<BodyItemInRnnWhileLoop>
{
/// <summary>
/// int32 scalar Tensor.
@@ -19,6 +19,10 @@ namespace Tensorflow.Operations
/// </summary>
public Tensor state { get; set; }

public BodyItemInRnnWhileLoop()
{
}

public BodyItemInRnnWhileLoop(Tensor time, TensorArray[] output_ta_t, Tensor state)
{
this.time = time;
@@ -45,5 +49,13 @@ namespace Tensorflow.Operations

return new BodyItemInRnnWhileLoop(time, output_ta_t, state);
}

public BodyItemInRnnWhileLoop FromMergeVars(ITensorOrTensorArray[] mergeVars)
{
time = (Tensor) mergeVars[1];
output_ta_t = new[] {(TensorArray) mergeVars[2]};
state = (Tensor)mergeVars[3];
return this;
}
}
}

+ 35
- 0
src/TensorFlowNET.Core/Operations/NnOps/LSTMStateTuple.cs View File

@@ -0,0 +1,35 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Operations
{
/// <summary>
/// Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
///
/// Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state
/// and `h` is the output.
///
/// Only used when `state_is_tuple=True`.
/// </summary>
public class LSTMStateTuple : ICanBeFlattened
{
public object c;
public object h;

public LSTMStateTuple(int c, int h)
{
this.c = c;
this.h = h;
}

public LSTMStateTuple(Tensor c, Tensor h)
{
this.c = c;
this.h = h;
}

public object[] Flatten()
=> new[] { c, h };
}
}

src/TensorFlowNET.Core/Operations/LayerRNNCell.cs → src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs View File


src/TensorFlowNET.Core/Operations/RNNCell.cs → src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs View File

@@ -49,7 +49,7 @@ namespace Tensorflow
/// difference between TF and Keras RNN cell.
/// </summary>
protected bool _is_tf_rnn_cell = false;
public virtual int state_size { get; }
public virtual object state_size { get; }

public virtual int output_size { get; }

@@ -64,7 +64,7 @@ namespace Tensorflow
_is_tf_rnn_cell = true;
}

public virtual Tensor get_initial_state(Tensor inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid)
public virtual object get_initial_state(Tensor inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid)
{
if (inputs != null)
throw new NotImplementedException("get_initial_state input is not null");
@@ -78,11 +78,10 @@ namespace Tensorflow
/// <param name="batch_size"></param>
/// <param name="dtype"></param>
/// <returns></returns>
public Tensor zero_state(Tensor batch_size, TF_DataType dtype)
private Tensor zero_state(Tensor batch_size, TF_DataType dtype)
{
Tensor output = null;
var state_size = this.state_size;
tf_with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate
tf_with(ops.name_scope($"{GetType().Name}ZeroState", values: new { batch_size }), delegate
{
output = _zero_state_tensors(state_size, batch_size, dtype);
});
@@ -90,20 +89,25 @@ namespace Tensorflow
return output;
}

private Tensor _zero_state_tensors(int state_size, Tensor batch_size, TF_DataType dtype)
private Tensor _zero_state_tensors(object state_size, Tensor batch_size, TF_DataType dtype)
{
var output = nest.map_structure(s =>
if(state_size is int state_size_int)
{
var c = rnn_cell_impl._concat(batch_size, s);
var size = array_ops.zeros(c, dtype: dtype);
var output = nest.map_structure(s =>
{
var c = rnn_cell_impl._concat(batch_size, s);
var size = array_ops.zeros(c, dtype: dtype);

var c_static = rnn_cell_impl._concat(batch_size, s, @static: true);
size.set_shape(c_static);
var c_static = rnn_cell_impl._concat(batch_size, s, @static: true);
size.set_shape(c_static);

return size;
}, state_size);
return size;
}, state_size_int);

return output;
return output;
}

throw new NotImplementedException("_zero_state_tensors");
}
}
}

+ 149
- 2
src/TensorFlowNET.Core/Operations/NnOps/rnn.cs View File

@@ -18,13 +18,160 @@ using NumSharp;
using System;
using System.Collections.Generic;
using System.Linq;
using Tensorflow.Framework;
using Tensorflow.Util;
using static Tensorflow.Binding;

namespace Tensorflow.Operations
{
internal class rnn
public class rnn
{
/// <summary>
/// Creates a bidirectional recurrent neural network.
/// </summary>
public static (Tensor[], LSTMStateTuple, LSTMStateTuple) static_bidirectional_rnn(BasicLstmCell cell_fw,
BasicLstmCell cell_bw,
Tensor[] inputs,
Tensor initial_state_fw = null,
Tensor initial_state_bw = null,
TF_DataType dtype = TF_DataType.DtInvalid,
Tensor sequence_length = null,
string scope = null)
{
if (inputs == null || inputs.Length == 0)
throw new ValueError("inputs must not be empty");

Tensor[] output_fw = null;
Tensor[] output_bw = null;
LSTMStateTuple output_state_fw = null;
LSTMStateTuple output_state_bw = null;

tf_with(tf.variable_scope(scope ?? "bidirectional_rnn"), delegate
{
// Forward direction
tf_with(tf.variable_scope("fw"), fw_scope =>
{
(output_fw, output_state_fw) = static_rnn(
cell_fw,
inputs,
initial_state_fw,
dtype,
sequence_length,
scope: fw_scope);
});

// backward direction
tf_with(tf.variable_scope("bw"), bw_scope =>
{
var reversed_inputs = _reverse_seq(inputs, sequence_length);
(output_bw, output_state_bw) = static_rnn(
cell_bw,
reversed_inputs,
initial_state_bw,
dtype,
sequence_length,
scope: bw_scope);
});
});

output_bw = _reverse_seq(output_bw, sequence_length);

var flat_outputs = zip(output_fw, output_bw)
.Select(x => array_ops.concat(new[] { x.Item1, x.Item2 }, 1))
.ToArray();

return (flat_outputs, output_state_fw, output_state_bw);
}

private static Tensor[] _reverse_seq(Tensor[] input_seq, Tensor lengths)
{
if (lengths == null)
return input_seq.Reverse().ToArray();

throw new NotImplementedException("_reverse_seq");
}

public static (Tensor[], LSTMStateTuple) static_rnn(BasicLstmCell cell,
Tensor[] inputs,
Tensor initial_state,
TF_DataType dtype = TF_DataType.DtInvalid,
Tensor sequence_length = null,
VariableScope scope = null)
{
List<Tensor> outputs = new List<Tensor>();
object state = null;

// Create a new scope in which the caching device is either
// determined by the parent scope, or is set to place the cached
// Variable using the same placement as for the rest of the RNN.
if (scope == null)
tf_with(tf.variable_scope("rnn"), varscope =>
{
throw new NotImplementedException("static_rnn");
});
else
tf_with(tf.variable_scope(scope), scope1 =>
{
Dimension fixed_batch_size = null;
Dimension batch_size = null;
Tensor batch_size_tensor = null;
VariableScope varscope = scope1;
// Obtain the first sequence of the input
var first_input = inputs[0];
if (first_input.TensorShape.rank != 1)
{
var input_shape = first_input.TensorShape.with_rank_at_least(2);
fixed_batch_size = input_shape.dims[0];
var flat_inputs = nest.flatten2(inputs);
foreach (var flat_input in flat_inputs)
{
input_shape = flat_input.TensorShape.with_rank_at_least(2);
batch_size = tensor_shape.dimension_at_index(input_shape, 0);
var input_size = input_shape[1];
fixed_batch_size.merge_with(batch_size);
foreach (var (i, size) in enumerate(input_size.dims))
{
if (size < 0)
throw new ValueError($"Input size (dimension {i} of inputs) must be accessible via " +
"shape inference, but saw value None.");
}
}
}
else
fixed_batch_size = first_input.TensorShape.with_rank_at_least(1).dims[0];

if (tensor_shape.dimension_value(fixed_batch_size) >= 0)
batch_size = tensor_shape.dimension_value(fixed_batch_size);
else
batch_size_tensor = array_ops.shape(first_input)[0];

if (initial_state != null)
state = initial_state;
else
{
state = cell.get_initial_state(batch_size: batch_size_tensor, dtype: dtype);
}

Tensor output = null;
if (state is LSTMStateTuple state_tuple)
{
foreach (var (time, input_) in enumerate(inputs))
{
if (time > 0)
varscope.reuse_variables();
if (sequence_length != null)
throw new NotImplementedException("static_rnn");

var results = cell.__call__(input_, state_tuple);
(output, state_tuple) = (results[1], new LSTMStateTuple(results[0], results[1]));
outputs.Add(output);
}
}
});

return (outputs.ToArray(), state as LSTMStateTuple);
}

public static (Tensor, Tensor) dynamic_rnn(RnnCell cell, Tensor inputs_tensor,
Tensor sequence_length = null, Tensor initial_state = null,
TF_DataType dtype = TF_DataType.DtInvalid,
@@ -52,7 +199,7 @@ namespace Tensorflow.Operations
if (initial_state != null)
state = initial_state;
else
state = cell.get_initial_state(batch_size: batch_size, dtype: dtype);
state = cell.get_initial_state(batch_size: batch_size, dtype: dtype) as Tensor;

var inputs = nest.pack_sequence_as(structure: inputs_tensor, flat_sequence: flat_input);



+ 0
- 4
src/TensorFlowNET.Core/Operations/Operation.Control.cs View File

@@ -52,10 +52,6 @@ namespace Tensorflow
public void _set_control_flow_context(ControlFlowContext ctx)
{
if (name.Contains("gradients/rnn/while/basic_rnn_cell/Tanh_grad/TanhGrad/f_acc"))
{
}
_control_flow_context = ctx;
}


+ 25
- 0
src/TensorFlowNET.Core/Operations/array_ops.cs View File

@@ -228,6 +228,16 @@ namespace Tensorflow
public static Tensor rank(Tensor input, string name = null)
=> rank_internal(input, name, optimize: true);

public static Tensor rank(Tensor[] inputs, string name = null)
{
return tf_with(ops.name_scope(name, "Rank", new { inputs }), scope =>
{
name = scope;
var input_tensor = ops.convert_to_tensor(inputs);
return constant_op.constant(input_tensor.NDims, dtype: tf.int32, name: name);
});
}

public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true)
{
return tf_with(ops.name_scope(name, "Rank", new List<Tensor> { input }), scope =>
@@ -594,6 +604,11 @@ namespace Tensorflow
return gen_array_ops.concat_v2(values, axis, name: name);
}

public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat")
{
return gen_array_ops.concat_v2(values, axis, name: name);
}

public static Tensor concat(object[] values, int axis, string name = "concat")
{
return gen_array_ops.concat_v2(values, axis, name: name);
@@ -619,6 +634,16 @@ namespace Tensorflow
});
}

public static Tensor[] split(Tensor value, int num_or_size_splits, Tensor axis,
string name = "split")
{
var size_splits = ops.convert_to_tensor(num_or_size_splits);
return gen_array_ops.split(axis: axis,
num_split: num_or_size_splits,
value: value,
name: name);
}

public static Tensor slice<Tb, Ts>(Tensor input, Tb begin, Ts size, string name = null)
=> gen_array_ops.slice(input, begin, size, name: name);



+ 45
- 0
src/TensorFlowNET.Core/Operations/clip_ops.cs View File

@@ -0,0 +1,45 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using static Tensorflow.Binding;

namespace Tensorflow
{
public class clip_ops
{
public static Tensor clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
{
return tf_with(ops.name_scope(name, "clip_by_value", new { t, clip_value_min, clip_value_max }), delegate
{
var values = ops.convert_to_tensor(t, name: "t");
// Go through list of tensors, for each value in each tensor clip
var t_min = math_ops.minimum(values, clip_value_max);
// Assert that the shape is compatible with the initial shape,
// to prevent unintentional broadcasting.
_ = values.TensorShape.merge_with(t_min.shape);
var t_max = math_ops.maximum(t_min, clip_value_min, name: name);
_ = values.TensorShape.merge_with(t_max.shape);

return t_max;
});
}
}
}

+ 1
- 1
src/TensorFlowNET.Core/Operations/control_flow_ops.cs View File

@@ -625,7 +625,7 @@ namespace Tensorflow
bool swap_memory = false,
string name = null,
Tensor maximum_iterations = null,
bool return_same_structure = false)
bool return_same_structure = false) where TItem : IFromMergeVars<TItem>, new()
{
return tf_with(ops.name_scope(name, "while", loop_vars), scope =>
{


+ 67
- 0
src/TensorFlowNET.Core/Operations/ctc_ops.cs View File

@@ -0,0 +1,67 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Linq;
using Tensorflow.Operations;
using static Tensorflow.Binding;

namespace Tensorflow
{
public class ctc_ops
{
/// <summary>
/// Performs greedy decoding on the logits given in inputs.
/// </summary>
/// <param name="inputs">
/// 3-D, shape: <c>(max_time x batch_size x num_classes)</c>, the logits.
/// </param>
/// <param name="sequence_length">
/// A vector containing sequence lengths, size <c>(batch_size)</c>.
/// </param>
/// <param name="name">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'CTCGreedyDecoder'.
/// </param>
/// <param name="merge_repeated">
/// If True, merge repeated classes in output.
/// </param>
/// <returns>
/// Returns a tuple with multiple values, as follows:
/// decoded_indices : Indices matrix, size <c>(total_decoded_outputs x 2)</c>,
/// of a <c>SparseTensor&amp;lt;int64, 2&amp;gt;</c>. The rows store: [batch, time].
/// decoded_values : Values vector, size: <c>(total_decoded_outputs)</c>,
/// of a <c>SparseTensor&amp;lt;int64, 2&amp;gt;</c>. The vector stores the decoded classes.
/// decoded_shape : Shape vector, size <c>(2)</c>, of the decoded SparseTensor.
/// Values are: <c>[batch_size, max_decoded_length]</c>.
/// log_probability : Matrix, size <c>(batch_size x 1)</c>, containing sequence
/// log-probabilities.
/// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property.
/// </returns>
/// <remarks>
/// A note about the attribute merge_repeated: if enabled, when
/// consecutive logits' maximum indices are the same, only the first of
/// these is emitted. Labeling the blank '*', the sequence "A B B * B B"
/// becomes "A B B" if merge_repeated = True and "A B B B B" if
/// merge_repeated = False.
///
/// Regardless of the value of merge_repeated, if the maximum index of a given
/// time and batch corresponds to the blank, index <c>(num_classes - 1)</c>, no new
/// element is emitted.
/// </remarks>
public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null)
=> gen_ctc_ops.ctc_greedy_decoder(inputs, sequence_length, merge_repeated: merge_repeated, name: name);
}
}

+ 238
- 0
src/TensorFlowNET.Core/Operations/functional_ops.cs View File

@@ -0,0 +1,238 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Collections.Generic;
using System.Linq;
using NumSharp;
using Tensorflow.Framework;
using Tensorflow.Util;
using static Tensorflow.Binding;

namespace Tensorflow
{
public class functional_ops
{
public static Tensor scan(
Func<Tensor, Tensor, Tensor> fn,
Tensor elems,
Tensor initializer = null,
int parallel_iterations = 10,
bool back_prop = true,
bool swap_memory = false,
bool infer_shape = true,
bool reverse = false,
string name = null)
{
bool input_is_sequence = nest.is_sequence(elems);

Tensor[] input_flatten(Tensor x) => input_is_sequence ? nest.flatten(x).ToArray() : new [] {x};
Tensor input_pack(Tensor[] x) => input_is_sequence ? (Tensor)nest.pack_sequence_as(elems, x) : x[0];

bool output_is_sequence;
Func<Tensor, Tensor[]> output_flatten;
Func<Tensor[], Tensor> output_pack;
if (initializer == null)
{
output_is_sequence = input_is_sequence;
output_flatten = input_flatten;
output_pack = input_pack;
}
else
{
output_is_sequence = nest.is_sequence(initializer);
output_flatten = (x) => output_is_sequence ? nest.flatten(x).ToArray() : new [] {x};
output_pack = (x) => output_is_sequence ? (Tensor)nest.pack_sequence_as(initializer, x) : x[0];
}

var elems_flat = input_flatten(elems);

bool in_graph_mode = tf.context.executing_eagerly();

return tf_with(ops.name_scope(name, "scan", new { elems_flat }), scope =>
{
if (in_graph_mode)
{
// todo tf.net doesn't expose .caching_device
//// Any get_variable calls in fn will cache the first call locally
//// and not issue repeated network I/O requests for each iteration.
//var varscope = variable_scope.get_variable_scope();
//bool varscope_caching_device_was_none = false;
//if (varscope.caching_device = null)
//{
// // varscope.set_caching_device(lambda op: op.device)
// // varscope_caching_device_was_none = True
//}
}

elems_flat = elems_flat.Select(elem => ops.convert_to_tensor(elem, name: "elem")).ToArray();

var n = tensor_shape.dimension_value(elems_flat[0].shape[0]);

// todo python had the below but dimension_value returns int which can't be null
//if (n == null)
//{
// n = array_ops.shape(elems_flat[0])[0];
//}

var elems_ta = elems_flat.Select(elem => new TensorArray(
elem.dtype,
size: tf.constant(n),
dynamic_size: false,
element_shape: elem.shape.Skip(1).ToArray(),
infer_shape: true)).ToList();

for (int index = 0; index < elems_ta.Count; index++)
{
elems_ta[index].unstack(elems_flat[index]);
}

Tensor[] a_flat;
int i;
if (initializer == null)
{
a_flat = elems_ta.Select(elem => elem.read(tf.constant(reverse ? n - 1 : 0))).ToArray();
i = 1;
}
else
{
Tensor[] initializer_flat = output_flatten(initializer);
a_flat = initializer_flat.Select(init => ops.convert_to_tensor(init)).ToArray();
i = 0;
}

var accs_ta = a_flat.Select(init => new TensorArray(
dtype: init.dtype,
size: tf.constant(n),
element_shape: infer_shape ? init.shape : null,
dynamic_size: false,
infer_shape: infer_shape)).ToArray();

if (initializer == null)
{
for (int index = 0; index < accs_ta.Length; index++)
{
accs_ta[index].write(tf.constant(reverse ? n - 1 : 0), a_flat[index]);
}
}

BodyItem compute(BodyItem item)
{
var packed_elems = input_pack(elems_ta.Select(elem_ta => elem_ta.read(item.I)).ToArray());
var packed_a = output_pack(item.A_Flat);
var a_out = fn(packed_a, packed_elems);

var flat_a_out = output_flatten(a_out);
for (int j = 0; j < item.Accs_ta.Length; j++)
{
item.Accs_ta[j].write(item.I, flat_a_out[j]);
}

var next_i = reverse ? item.I - 1 : item.I + 1;
return new BodyItem(next_i, flat_a_out, item.Accs_ta);
}

int initial_i;
Func<BodyItem, Tensor> condition;
if (reverse)
{
initial_i = n - 1 - i;
condition = x => x.I >= 0;
}
else
{
initial_i = i;
condition = x => x.I < n;
}

BodyItem bodyItem =
control_flow_ops.while_loop(
condition,
compute,
new BodyItem(tf.constant(initial_i), a_flat, accs_ta),
parallel_iterations: parallel_iterations,
back_prop: back_prop,
swap_memory: swap_memory,
maximum_iterations: tf.constant(n));

var results_flat = bodyItem.Accs_ta.Select(r => r.stack()).ToArray();

var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].TensorShape.with_rank_at_least(1).dims[0]));
foreach (var elem in elems_flat.Skip(1))
{
n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.TensorShape.with_rank_at_least(1).dims[0])));
}

foreach (Tensor r in results_flat)
{
r.set_shape(new TensorShape(n_static).concatenate(r.dims.Skip(1).ToArray()));
}

// todo get working when the above caching_device is fixed
//if (in_graph_mode && varscope_caching_device_was_none) {
// varscope.set_caching_device(None);
//}

return output_pack(results_flat);
});
}

internal class BodyItem : ICanBeFlattened, IPackable<BodyItem>, IFromMergeVars<BodyItem>
{
public Tensor I { get; set; }
public Tensor[] A_Flat { get; set; }
public TensorArray[] Accs_ta { get; set; }

public BodyItem()
{
}

public BodyItem(Tensor i, Tensor[] a_flat, TensorArray[] accs_ta)
{
I = i;
A_Flat = a_flat;
Accs_ta = accs_ta;
}

public object[] Flatten()
{
var elements = new List<object> { I };
elements.AddRange(A_Flat);
elements.AddRange(Accs_ta);
return elements.ToArray();
}

public BodyItem Pack(object[] sequences)
{
I = sequences[0] as Tensor;
A_Flat = new [] { sequences[1] as Tensor };
Accs_ta = new [] { sequences[2] as TensorArray };
return new BodyItem(I, A_Flat, Accs_ta);
}

public BodyItem FromMergeVars(ITensorOrTensorArray[] merge_vars)
{
I = (Tensor)merge_vars[1];
A_Flat = new [] {(Tensor) merge_vars[2]};
Accs_ta = new [] {(TensorArray) merge_vars[3]};
return this;
}
}
}
}


+ 1
- 1
src/TensorFlowNET.Core/Operations/gen_array_ops.cs View File

@@ -47,7 +47,7 @@ namespace Tensorflow
/// <param name="axis"></param>
/// <param name="name"></param>
/// <returns></returns>
public static Tensor concat_v2<T>(T[] values, int axis, string name = null)
public static Tensor concat_v2<T, Ta>(T[] values, Ta axis, string name = null)
{
var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });



+ 38
- 0
src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs View File

@@ -0,0 +1,38 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

namespace Tensorflow
{
public class gen_ctc_ops
{
public static OpDefLibrary _op_def_lib = new OpDefLibrary();

public static Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = "CTCGreedyDecoder")
{
var op = _op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, args: new
{
inputs,
sequence_length,
merge_repeated
});
/*var decoded_indices = op.outputs[0];
var decoded_values = op.outputs[1];
var decoded_shape = op.outputs[2];
var log_probability = op.outputs[3];*/
return op.outputs;
}
}
}

+ 13
- 0
src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs View File

@@ -27,6 +27,19 @@ namespace Tensorflow
return _op.output;
}

public static Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions,
string name = null)
{
var _op = _op_def_lib._apply_op_helper("DynamicPartition", name, new
{
data,
partitions,
num_partitions
});

return _op.outputs;
}

public static (Tensor, Tensor) tensor_array_v3<T>(T size, TF_DataType dtype = TF_DataType.DtInvalid,
TensorShape element_shape = null, bool dynamic_size = false, bool clear_after_read = true,
bool identical_element_shapes = false, string tensor_array_name = "", string name = null)


src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs → src/TensorFlowNET.Core/Operations/gen_image_ops.cs View File


src/TensorFlowNET.Core/Operations/gen_io_ops.py.cs → src/TensorFlowNET.Core/Operations/gen_io_ops.cs View File

@@ -34,7 +34,7 @@ namespace Tensorflow
return _op.outputs;
}

public static Tensor read_file(string filename, string name = null)
public static Tensor read_file<T>(T filename, string name = null)
{
var _op = _op_def_lib._apply_op_helper("ReadFile", name: name, args: new { filename });


+ 110
- 10
src/TensorFlowNET.Core/Operations/map_fn.cs View File

@@ -2,7 +2,10 @@
using System.Collections.Generic;
using System.Linq;
using System.Text;
using NumSharp;
using Tensorflow.Framework;
using Tensorflow.Operations;
using Tensorflow.Util;
using static Tensorflow.Binding;

namespace Tensorflow
@@ -30,10 +33,40 @@ namespace Tensorflow
bool infer_shape = true,
string name = null)
{
var elems_flat = new[] { elems };
tf_with(ops.name_scope(name, "map", elems_flat), delegate
bool input_is_sequence = nest.is_sequence(elems);
Tensor[] input_flatten(Tensor x) => input_is_sequence ? nest.flatten(x).ToArray() : new [] {x};
Tensor input_pack(Tensor[] x) => input_is_sequence ? (Tensor)nest.pack_sequence_as(elems, x) : x[0];

bool output_is_sequence;
Func<Tensor, Tensor[]> output_flatten;
Func<Tensor[], Tensor> output_pack;
if (dtype == TF_DataType.DtInvalid)
{
output_is_sequence = input_is_sequence;
output_flatten = input_flatten;
output_pack = input_pack;
}
else
{
output_is_sequence = nest.is_sequence(dtype);
output_flatten = (x) => output_is_sequence ? nest.flatten(x).ToArray() : new [] {x};
output_pack = (x) => output_is_sequence ? (Tensor)nest.pack_sequence_as(dtype, x) : x[0];
}

var elems_flat = input_flatten(elems);
return tf_with(ops.name_scope(name, "map", elems_flat), delegate
{
var varscope = tf.get_variable_scope();
//if in_graph_mode:
//# Any get_variable calls in fn will cache the first call locally
//# and not issue repeated network I/O requests for each iteration.
//varscope = vs.get_variable_scope()
//varscope_caching_device_was_none = False
//if varscope.caching_device is None:
// # TODO(ebrevdo): Change to using colocate_with here and in other
// # methods.
// varscope.set_caching_device(lambda op: op.device)
// varscope_caching_device_was_none = True

elems_flat = elems_flat.Select(elem => ops.convert_to_tensor(elem, name: "elem"))
.ToArray();

@@ -65,22 +98,89 @@ namespace Tensorflow
dynamic_size: false,
infer_shape: infer_shape)).ToArray();

/*Func<Tensor, TensorArray> compute = (i, tas) =>

BodyItem compute(BodyItem item)
{
throw new NotImplementedException("");
};
var packed_values = input_pack(elems_ta.Select(elem_ta => elem_ta.read(item.I)).ToArray());
var packed_fn_values = fn(packed_values);
//nest.assert_same_structure(dtype or elems, packed_fn_values)

var flat_fn_values = output_flatten(packed_fn_values);
for (int j = 0; j < item.Accs_ta.Length; j++)
{
item.Accs_ta[j].write(item.I, flat_fn_values[j]);
}

return new BodyItem(item.I + 1, item.Accs_ta);
}

var r_a = control_flow_ops.while_loop(
(i, _) => i < n,
(x) => x.I < n,
compute,
new[] { i, accs_ta },
new BodyItem(i, accs_ta),
parallel_iterations: parallel_iterations,
back_prop: back_prop,
swap_memory: swap_memory,
maximum_iterations: n);*/
maximum_iterations: tf.constant(n));
var results_flat = r_a.Accs_ta.Select(r => r.stack()).ToArray();

var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].TensorShape.with_rank_at_least(1).dims[0]));
foreach (var elem in elems_flat.Skip(1))
{
n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.TensorShape.with_rank_at_least(1).dims[0])));
}

foreach (Tensor r in results_flat)
{
r.set_shape(new TensorShape(n_static).concatenate(r.dims.Skip(1).ToArray()));
}

// todo get working when the above caching_device is fixed
//if (in_graph_mode && varscope_caching_device_was_none) {
// varscope.set_caching_device(None);
//}

return output_pack(results_flat);
});
}

internal class BodyItem : ICanBeFlattened, IPackable<BodyItem>, IFromMergeVars<BodyItem>
{
public Tensor I { get; set; }
public TensorArray[] Accs_ta { get; set; }

throw new NotImplementedException("");
public BodyItem()
{
}

public BodyItem(Tensor i, TensorArray[] accs_ta)
{
I = i;
Accs_ta = accs_ta;
}

public object[] Flatten()
{
var elements = new List<object> { I };
elements.AddRange(Accs_ta);
return elements.ToArray();
}

public BodyItem Pack(object[] sequences)
{
I = sequences[0] as Tensor;
Accs_ta = new [] { sequences[1] as TensorArray };
return new BodyItem(I, Accs_ta);
}

public BodyItem FromMergeVars(ITensorOrTensorArray[] merge_vars)
{
I = (Tensor)merge_vars[1];
Accs_ta = new [] {(TensorArray) merge_vars[2]};
return this;
}
}
}
}

+ 18
- 4
src/TensorFlowNET.Core/Operations/math_ops.cs View File

@@ -219,10 +219,19 @@ namespace Tensorflow
}
}

public static Tensor reduce_mean(Tensor[] input_tensors, int axis, bool keepdims = false, string name = null)
public static Tensor reduce_mean(Tensor[] input_tensors, int? axis = null, bool keepdims = false, string name = null)
{
var m = gen_math_ops.mean(input_tensors, axis, keepdims, name);
return _may_reduce_to_scalar(keepdims, axis, m);
if(axis == null)
{
var r = _ReductionDims(input_tensors, axis);
var m = gen_math_ops.mean(input_tensors, r, keepdims, name);
return _may_reduce_to_scalar(keepdims, axis, m);
}
else
{
var m = gen_math_ops.mean(input_tensors, axis, keepdims, name);
return _may_reduce_to_scalar(keepdims, axis, m);
}
}

/// <summary>
@@ -492,7 +501,7 @@ namespace Tensorflow
return output;
}

private static Tensor _may_reduce_to_scalar(bool keepdims, int axis, Tensor output)
private static Tensor _may_reduce_to_scalar(bool keepdims, int? axis, Tensor output)
{
return output;
}
@@ -515,6 +524,11 @@ namespace Tensorflow
return axis;
}

private static Tensor _ReductionDims(Tensor[] x, int? axis = null, string name = null)
{
return range(0, array_ops.rank(x));
}

private static Tensor _ReductionDims(Tensor x, int[] axis)
{
if (axis != null)


+ 1
- 1
src/TensorFlowNET.Core/Operations/random_ops.cs View File

@@ -80,7 +80,7 @@ namespace Tensorflow
}

public static Tensor random_uniform(Tensor shape,
long minval = 0,
int minval = 0,
Tensor maxval = null,
TF_DataType dtype = TF_DataType.TF_FLOAT,
int? seed = null,


+ 10
- 2
src/TensorFlowNET.Core/Sessions/BaseSession.cs View File

@@ -68,6 +68,14 @@ namespace Tensorflow
return _run(fetche, feed_dict)[0];
}

public virtual (NDArray, NDArray, NDArray, NDArray, NDArray) run(
(ITensorOrOperation, ITensorOrOperation, ITensorOrOperation, ITensorOrOperation, ITensorOrOperation) fetches,
params FeedItem[] feed_dict)
{
var results = _run(new object[] { fetches.Item1, fetches.Item2, fetches.Item3, fetches.Item4, fetches.Item5 }, feed_dict);
return (results[0], results[1], results[2], results[3], results[4]);
}

public virtual (NDArray, NDArray, NDArray, NDArray) run((ITensorOrOperation, ITensorOrOperation, ITensorOrOperation, ITensorOrOperation) fetches, params FeedItem[] feed_dict)
{
var results = _run(new object[] {fetches.Item1, fetches.Item2, fetches.Item3, fetches.Item4}, feed_dict);
@@ -271,7 +279,7 @@ namespace Tensorflow
break;
case TF_DataType.TF_STRING:
using (var reader = new CodedInputStream(new IntPtr(srcAddress).Stream(8, (long) tensor.bytesize)))
ret = NDArray.FromString(reader.ReadString());
ret = new NDArray(reader.ReadBytes().ToByteArray());
break;
case TF_DataType.TF_UINT8:
ret = NDArray.Scalar(*(byte*) srcAddress);
@@ -459,4 +467,4 @@ namespace Tensorflow
}
}
}
}
}

+ 1
- 2
src/TensorFlowNET.Core/Sessions/Session.cs View File

@@ -37,8 +37,7 @@ namespace Tensorflow

public Session as_default()
{
tf._defaultSessionFactory.Value = this;
return this;
return ops.set_default_session(this);
}

[MethodImpl(MethodImplOptions.NoOptimization)]


+ 0
- 33
src/TensorFlowNET.Core/Sessions/_ElementFetchMapper.cs View File

@@ -58,51 +58,18 @@ namespace Tensorflow
case NDArray value:
result = new[] { value };
break;
#if _REGEN
%types=["sbyte", "bool", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
%foreach types%
case #1 value:
result = new[] { NDArray.Scalar(value) };
break;
%
#else
case sbyte value:
result = new[] { NDArray.Scalar(value) };
break;
case bool value:
result = new[] { NDArray.Scalar(value) };
break;
case byte value:
result = new[] { NDArray.Scalar(value) };
break;
case short value:
result = new[] { NDArray.Scalar(value) };
break;
case ushort value:
result = new[] { NDArray.Scalar(value) };
break;
case int value:
result = new[] { NDArray.Scalar(value) };
break;
case uint value:
result = new[] { NDArray.Scalar(value) };
break;
case long value:
result = new[] { NDArray.Scalar(value) };
break;
case ulong value:
result = new[] { NDArray.Scalar(value) };
break;
case float value:
result = new[] { NDArray.Scalar(value) };
break;
case double value:
result = new[] { NDArray.Scalar(value) };
break;
case Complex value:
result = new[] { NDArray.Scalar(value) };
break;
#endif
default:
break;
}


+ 3
- 0
src/TensorFlowNET.Core/Sessions/_FetchHandler.cs View File

@@ -86,6 +86,9 @@ namespace Tensorflow
case NPTypeCode.Char:
full_values.Add(float.NaN);
break;
case NPTypeCode.Byte:
full_values.Add(float.NaN);
break;
default:
throw new NotImplementedException($"build_results tensor_values[0] {tensor_values[0].dtype.Name}");
}


+ 1
- 3
src/TensorFlowNET.Core/Status/Status.cs View File

@@ -65,9 +65,7 @@ namespace Tensorflow
}

public static implicit operator IntPtr(Status status)
{
return status._handle;
}
=> status._handle;

protected override void DisposeUnmanagedResources(IntPtr handle)
=> TF_DeleteStatus(handle);


+ 11
- 11
src/TensorFlowNET.Core/TensorFlow.Binding.csproj View File

@@ -1,11 +1,11 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<TargetFrameworks>net472;netstandard2.0</TargetFrameworks>
<TargetFramework>netstandard2.0</TargetFramework>
<AssemblyName>TensorFlow.NET</AssemblyName>
<RootNamespace>Tensorflow</RootNamespace>
<TargetTensorFlow>1.14.1</TargetTensorFlow>
<Version>0.12.1</Version>
<Version>0.14.2.0</Version>
<Authors>Haiping Chen, Meinrad Recheis, Eli Belash</Authors>
<Company>SciSharp STACK</Company>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
@@ -18,14 +18,13 @@
<Description>Google's TensorFlow full binding in .NET Standard.
Building, training and infering deep learning models.
https://tensorflownet.readthedocs.io</Description>
<AssemblyVersion>0.12.1.0</AssemblyVersion>
<PackageReleaseNotes>Changes since v0.11.0:
1: Add ICanBeFlattened for nest.flatten2.
2: Complete the WhileContext.
3: Add tf.nn.rnn_cell.BasicRNNCell and tf.nn.dynamic_rnn.
4: Add EstimatorSpec.</PackageReleaseNotes>
<AssemblyVersion>0.14.2.0</AssemblyVersion>
<PackageReleaseNotes>Changes since v0.14.0:
1: Add TransformGraphWithStringInputs.
2: tf.trainer.load_graph, tf.trainer.freeze_graph
3: Import Protobuf.Text</PackageReleaseNotes>
<LangVersion>7.3</LangVersion>
<FileVersion>0.12.1.0</FileVersion>
<FileVersion>0.14.2.0</FileVersion>
<PackageLicenseFile>LICENSE</PackageLicenseFile>
<PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance>
<SignAssembly>true</SignAssembly>
@@ -62,8 +61,9 @@ https://tensorflownet.readthedocs.io</Description>
</ItemGroup>

<ItemGroup>
<PackageReference Include="Google.Protobuf" Version="3.10.1" />
<PackageReference Include="NumSharp" Version="0.20.4" />
<PackageReference Include="Google.Protobuf" Version="3.11.3" />
<PackageReference Include="NumSharp" Version="0.30.0-alpha" />
<PackageReference Include="Protobuf.Text" Version="0.4.0" />
</ItemGroup>

<ItemGroup>


+ 6
- 0
src/TensorFlowNET.Core/Tensors/Dimension.cs View File

@@ -22,6 +22,12 @@ namespace Tensorflow
return new Dimension(_value);
}

public static implicit operator Dimension(int value)
=> new Dimension(value);

public static implicit operator int(Dimension dimension)
=> dimension.value;

public override string ToString() => $"Dimension({_value})";
}
}

+ 411
- 0
src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs View File

@@ -0,0 +1,411 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Diagnostics.CodeAnalysis;
using System.Globalization;
using System.Linq;
using System.Numerics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Text;
using NumSharp.Backends;
using NumSharp.Backends.Unmanaged;
using NumSharp.Utilities;
using static Tensorflow.c_api;

#if SERIALIZABLE
using Newtonsoft.Json;
#endif

namespace Tensorflow
{
[SuppressMessage("ReSharper", "InvokeAsExtensionMethod")]
public partial class Tensor
{
public T ToScalar<T>()
{
unsafe
{
if (typeof(T).as_dtype() == this.dtype && this.dtype != TF_DataType.TF_STRING)
return Unsafe.Read<T>(this.buffer.ToPointer());

switch (this.dtype)
{
#if _REGEN
%foreach supported_numericals_TF_DataType,supported_numericals,supported_numericals_lowercase%
case TF_DataType.#1:
return Converts.ChangeType<T>(*(#3*) this.buffer);
%
#else

case TF_DataType.TF_UINT8:
return Converts.ChangeType<T>(*(byte*) this.buffer);
case TF_DataType.TF_INT16:
return Converts.ChangeType<T>(*(short*) this.buffer);
case TF_DataType.TF_UINT16:
return Converts.ChangeType<T>(*(ushort*) this.buffer);
case TF_DataType.TF_INT32:
return Converts.ChangeType<T>(*(int*) this.buffer);
case TF_DataType.TF_UINT32:
return Converts.ChangeType<T>(*(uint*) this.buffer);
case TF_DataType.TF_INT64:
return Converts.ChangeType<T>(*(long*) this.buffer);
case TF_DataType.TF_UINT64:
return Converts.ChangeType<T>(*(ulong*) this.buffer);
case TF_DataType.TF_DOUBLE:
return Converts.ChangeType<T>(*(double*) this.buffer);
case TF_DataType.TF_FLOAT:
return Converts.ChangeType<T>(*(float*) this.buffer);
#endif
case TF_DataType.TF_STRING:
if (this.NDims != 0)
throw new ArgumentException($"{nameof(Tensor)} can only be scalar.");

IntPtr stringStartAddress = IntPtr.Zero;
UIntPtr dstLen = UIntPtr.Zero;

using (var status = new Status())
{
c_api.TF_StringDecode((byte*) this.buffer + 8, (UIntPtr) (this.bytesize), (byte**) &stringStartAddress, &dstLen, status);
status.Check(true);
}

var dstLenInt = checked((int) dstLen);
var value = Encoding.UTF8.GetString((byte*) stringStartAddress, dstLenInt);
if (typeof(T) == typeof(string))
return (T) (object) value;
else
return Converts.ChangeType<T>(value);

case TF_DataType.TF_COMPLEX64:
case TF_DataType.TF_COMPLEX128:
default:
throw new NotSupportedException();
}
}
}

public unsafe void CopyTo(NDArray nd)
{
if (!nd.Shape.IsContiguous)
throw new ArgumentException("NDArray has to be contiguous (ndarray.Shape.IsContiguous).");

#if _REGEN
#region Compute
switch (nd.typecode)
{
%foreach supported_dtypes,supported_dtypes_lowercase%
case NPTypeCode.#1:
{
CopyTo<#2>(new Span<#2>(nd.Unsafe.Address, nd.size*nd.dtypesize));
break;
}
%
default:
throw new NotSupportedException();
}
#endregion
#else

#region Compute

switch (nd.typecode)
{
case NPTypeCode.Boolean:
{
CopyTo<bool>(new Span<bool>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.Byte:
{
CopyTo<byte>(new Span<byte>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.Int16:
{
CopyTo<short>(new Span<short>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.UInt16:
{
CopyTo<ushort>(new Span<ushort>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.Int32:
{
CopyTo<int>(new Span<int>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.UInt32:
{
CopyTo<uint>(new Span<uint>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.Int64:
{
CopyTo<long>(new Span<long>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.UInt64:
{
CopyTo<ulong>(new Span<ulong>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.Char:
{
CopyTo<char>(new Span<char>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.Double:
{
CopyTo<double>(new Span<double>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
case NPTypeCode.Single:
{
CopyTo<float>(new Span<float>(nd.Unsafe.Address, nd.size * nd.dtypesize));
break;
}
default:
throw new NotSupportedException();
}

#endregion
#endif
}

public void CopyTo<T>(Span<T> destination) where T : unmanaged
{
unsafe
{
var len = checked((int) this.size);
//perform regular CopyTo using Span.CopyTo.
if (typeof(T).as_dtype() == this.dtype && this.dtype != TF_DataType.TF_STRING) //T can't be a string but tensor can.
{
var src = (T*) this.buffer;
var srcSpan = new Span<T>(src, len);
srcSpan.CopyTo(destination);

return;
}

if (len > destination.Length)
throw new ArgumentException("Destinion was too short to perform CopyTo.");

//Perform cast to type <T>.
fixed (T* dst = destination)
{
switch (this.dtype)
{
#if _REGEN
%foreach supported_numericals_TF_DataType,supported_numericals,supported_numericals_lowercase%
case TF_DataType.#1:
{
var converter = Converts.FindConverter<#3, T>();
var src = (#3*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
%
#else
case TF_DataType.TF_BOOL:
{
var converter = Converts.FindConverter<bool, T>();
var src = (bool*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_UINT8:
{
var converter = Converts.FindConverter<byte, T>();
var src = (byte*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_INT16:
{
var converter = Converts.FindConverter<short, T>();
var src = (short*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_UINT16:
{
var converter = Converts.FindConverter<ushort, T>();
var src = (ushort*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_INT32:
{
var converter = Converts.FindConverter<int, T>();
var src = (int*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_UINT32:
{
var converter = Converts.FindConverter<uint, T>();
var src = (uint*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_INT64:
{
var converter = Converts.FindConverter<long, T>();
var src = (long*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_UINT64:
{
var converter = Converts.FindConverter<ulong, T>();
var src = (ulong*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_DOUBLE:
{
var converter = Converts.FindConverter<double, T>();
var src = (double*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_FLOAT:
{
var converter = Converts.FindConverter<float, T>();
var src = (float*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
#endif
case TF_DataType.TF_STRING:
{
var src = this.StringData();
var culture = CultureInfo.InvariantCulture;

//pin to prevent GC from moving the span around.
fixed (T* _ = destination)
switch (typeof(T).as_dtype())
{
#if _REGEN
%foreach supported_numericals_TF_DataType,supported_numericals,supported_numericals_lowercase%
case TF_DataType.#1: {
var sdst = (#3*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).To#2(culture);
return;
}
%
#else
case TF_DataType.TF_BOOL:
{
var sdst = (bool*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToBoolean(culture);
return;
}
case TF_DataType.TF_UINT8:
{
var sdst = (byte*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToByte(culture);
return;
}
case TF_DataType.TF_INT16:
{
var sdst = (short*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToInt16(culture);
return;
}
case TF_DataType.TF_UINT16:
{
var sdst = (ushort*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToUInt16(culture);
return;
}
case TF_DataType.TF_INT32:
{
var sdst = (int*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToInt32(culture);
return;
}
case TF_DataType.TF_UINT32:
{
var sdst = (uint*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToUInt32(culture);
return;
}
case TF_DataType.TF_INT64:
{
var sdst = (long*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToInt64(culture);
return;
}
case TF_DataType.TF_UINT64:
{
var sdst = (ulong*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToUInt64(culture);
return;
}
case TF_DataType.TF_DOUBLE:
{
var sdst = (double*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToDouble(culture);
return;
}
case TF_DataType.TF_FLOAT:
{
var sdst = (float*) Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible) src[i]).ToSingle(culture);
return;
}
#endif
default:
throw new NotSupportedException();
}
}
case TF_DataType.TF_COMPLEX64:
case TF_DataType.TF_COMPLEX128:
default:
throw new NotSupportedException();
}
}
}
}
}
}

+ 0
- 14
src/TensorFlowNET.Core/Tensors/Tensor.Implicit.cs View File

@@ -7,20 +7,6 @@ namespace Tensorflow
{
public partial class Tensor
{
/// <summary>
/// Issue unresolved, will cause name_scope problem.
/// </summary>
/// <param name="scalar"></param>
/*public static implicit operator Tensor(double scalar)
{
return constant_op.constant(scalar);
}*/

/*public static implicit operator Tensor(int scalar)
{
return constant_op.constant(scalar);
}*/

public static implicit operator IntPtr(Tensor tensor)
{
if (tensor._handle == IntPtr.Zero)


+ 2
- 3
src/TensorFlowNET.Core/Tensors/Tensor.cs View File

@@ -162,9 +162,9 @@ namespace Tensorflow
using (var status = new Status())
{
if (value == null)
c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status);
c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), null, -1, status);
else
c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), value.Select(Convert.ToInt64).ToArray(), value.Length, status);
c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), value.Select(Convert.ToInt64).ToArray(), value.Length, status);

status.Check(true);
}
@@ -257,7 +257,6 @@ namespace Tensorflow
/// </summary>
/// <typeparam name="T"></typeparam>
/// <returns></returns>
/// <exception cref="ArgumentException">When <typeparam name="T"> is string </typeparam></exception>
public T[] ToArray<T>() where T : unmanaged
{
//Are the types matching?


+ 1
- 1
src/TensorFlowNET.Core/Tensors/TensorShape.cs View File

@@ -154,7 +154,7 @@ namespace Tensorflow
[SuppressMessage("ReSharper", "ParameterHidesMember")]
public TensorShape with_rank_at_least(int rank)
{
if (rank != ndim)
if (ndim < rank)
throw new ValueError($"Shape {this} must have rank at least {rank}");
else
return this;


+ 1
- 1
src/TensorFlowNET.Core/Tensors/dtypes.cs View File

@@ -138,7 +138,7 @@ namespace Tensorflow
dtype = TF_DataType.TF_INT8;
break;
case "Byte":
dtype = TF_DataType.TF_UINT8;
dtype = dtype ?? TF_DataType.TF_UINT8;
break;
case "Int16":
dtype = TF_DataType.TF_INT16;


+ 2
- 1
src/TensorFlowNET.Core/Tensors/tensor_util.cs View File

@@ -18,6 +18,7 @@ using NumSharp;
using System;
using System.Linq;
using NumSharp.Utilities;
using System.Text;

namespace Tensorflow
{
@@ -256,7 +257,7 @@ namespace Tensorflow
nd = np.array(doubleVals);
break;
case string strVal:
nd = NDArray.FromString(strVal);
nd = new NDArray(Encoding.ASCII.GetBytes(strVal));
break;
case string[] strVals:
nd = strVals;


+ 8
- 6
src/TensorFlowNET.Core/Training/Saving/Saver.cs View File

@@ -14,10 +14,12 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using static Tensorflow.Binding;

namespace Tensorflow
@@ -170,7 +172,7 @@ namespace Tensorflow
{
if (string.IsNullOrEmpty(latest_filename))
latest_filename = "checkpoint";
object model_checkpoint_path = "";
NDArray[] model_checkpoint_path = null;
string checkpoint_file = "";

if (global_step > 0)
@@ -183,15 +185,15 @@ namespace Tensorflow
if (!_is_empty)
{
model_checkpoint_path = sess.run(_saver_def.SaveTensorName,
new FeedItem(_saver_def.FilenameTensorName, checkpoint_file)
);
(_saver_def.FilenameTensorName, checkpoint_file));

if (write_state)
{
_RecordLastCheckpoint(model_checkpoint_path.ToString());
var path = UTF8Encoding.UTF8.GetString((byte[])model_checkpoint_path[0]);
_RecordLastCheckpoint(path);
checkpoint_management.update_checkpoint_state_internal(
save_dir: save_path_parent,
model_checkpoint_path: model_checkpoint_path.ToString(),
model_checkpoint_path: path,
all_model_checkpoint_paths: _last_checkpoints.Keys.Select(x => x).ToList(),
latest_filename: latest_filename,
save_relative_paths: _save_relative_paths);
@@ -205,7 +207,7 @@ namespace Tensorflow
export_meta_graph(meta_graph_filename, strip_default_attrs: strip_default_attrs, save_debug_info: save_debug_info);
}

return _is_empty ? string.Empty : model_checkpoint_path.ToString();
return _is_empty ? string.Empty : UTF8Encoding.UTF8.GetString((byte[])model_checkpoint_path[0]);
}

public (Saver, object) import_meta_graph(string meta_graph_or_file,


+ 20
- 21
src/TensorFlowNET.Core/Training/Saving/checkpoint_management.py.cs View File

@@ -20,6 +20,7 @@ using System.IO;
using System.Linq;
using static Tensorflow.SaverDef.Types;
using static Tensorflow.Binding;
using Protobuf.Text;

namespace Tensorflow
{
@@ -44,8 +45,7 @@ namespace Tensorflow
float? last_preserved_timestamp = null
)
{
CheckpointState ckpt = null;

CheckpointState ckpt = null;
// Writes the "checkpoint" file for the coordinator for later restoration.
string coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename);
if (save_relative_paths)
@@ -65,7 +65,14 @@ namespace Tensorflow
throw new RuntimeError($"Save path '{model_checkpoint_path}' conflicts with path used for " +
"checkpoint state. Please use a different save path.");

File.WriteAllText(coord_checkpoint_filename, ckpt.ToString());
// File.WriteAllText(coord_checkpoint_filename, ckpt.ToString());
var checkpoints = new List<string>
{
$"model_checkpoint_path: \"{ckpt.ModelCheckpointPath}\""
};
checkpoints.AddRange(all_model_checkpoint_paths.Select(x => $"all_model_checkpoint_paths: \"{x}\""));

File.WriteAllLines(coord_checkpoint_filename, checkpoints);
}

/// <summary>
@@ -98,7 +105,14 @@ namespace Tensorflow
all_model_checkpoint_paths.Add(model_checkpoint_path);

// Relative paths need to be rewritten to be relative to the "save_dir"
// if model_checkpoint_path already contains "save_dir".
if (model_checkpoint_path.StartsWith(save_dir))
{
model_checkpoint_path = model_checkpoint_path.Substring(save_dir.Length + 1);
all_model_checkpoint_paths = all_model_checkpoint_paths
.Select(x => x.Substring(save_dir.Length + 1))
.ToList();
}

var coord_checkpoint_proto = new CheckpointState()
{
@@ -174,24 +188,9 @@ namespace Tensorflow
var coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir, latest_filename);
if (File.Exists(coord_checkpoint_filename))
{
var file_content = File.ReadAllLines(coord_checkpoint_filename);
var file_content = File.ReadAllText(coord_checkpoint_filename);
// https://github.com/protocolbuffers/protobuf/issues/6654
// var ckpt = CheckpointState.Parser.ParseFrom(file_content);
var ckpt = new CheckpointState();
var field = CheckpointState.Descriptor.FindFieldByName("model_checkpoint_path");
ckpt.ModelCheckpointPath = file_content.FirstOrDefault(x => x.StartsWith(field.Name + ":")).Substring(field.Name.Length + 2);
// remove first and last quote.
ckpt.ModelCheckpointPath = ckpt.ModelCheckpointPath.Substring(1, ckpt.ModelCheckpointPath.Length - 2);

field = CheckpointState.Descriptor.FindFieldByName("all_model_checkpoint_paths");
file_content.Where(x => x.StartsWith(field.Name + ":"))
.ToList()
.ForEach(x =>
{
string value = x.Substring(field.Name.Length + 2);
ckpt.AllModelCheckpointPaths.Add(value.Substring(1, value.Length - 2));
});

var ckpt = CheckpointState.Parser.ParseText(file_content);
if (string.IsNullOrEmpty(ckpt.ModelCheckpointPath))
throw new ValueError($"Invalid checkpoint state loaded from {checkpoint_dir}");
// For relative model_checkpoint_path and all_model_checkpoint_paths,


+ 36
- 3
src/TensorFlowNET.Core/Training/Saving/saver.py.cs View File

@@ -14,9 +14,12 @@
limitations under the License.
******************************************************************************/

using Google.Protobuf;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using static Tensorflow.Binding;

namespace Tensorflow
{
@@ -29,14 +32,12 @@ namespace Tensorflow
{
var meta_graph_def = meta_graph.read_meta_graph_file(meta_graph_or_file);

var meta = meta_graph.import_scoped_meta_graph_with_return_elements(
var (imported_vars, imported_return_elements) = meta_graph.import_scoped_meta_graph_with_return_elements(
meta_graph_def,
clear_devices: clear_devices,
import_scope: import_scope,
return_elements: return_elements);

var (imported_vars, imported_return_elements) = meta;

var saver = _create_saver_from_imported_meta_graph(
meta_graph_def, import_scope, imported_vars);

@@ -83,5 +84,37 @@ namespace Tensorflow
}
}
}

public static string freeze_graph(string checkpoint_dir,
string output_pb_name,
string[] output_node_names)
{
var checkpoint = checkpoint_management.latest_checkpoint(checkpoint_dir);
if (!File.Exists($"{checkpoint}.meta")) return null;

string output_pb = Path.GetFullPath(Path.Combine(checkpoint_dir, "../", $"{output_pb_name}.pb"));

using (var graph = tf.Graph())
using (var sess = tf.Session(graph))
{
var saver = tf.train.import_meta_graph($"{checkpoint}.meta", clear_devices: true);
saver.restore(sess, checkpoint);
var output_graph_def = tf.graph_util.convert_variables_to_constants(sess,
graph.as_graph_def(),
output_node_names);
Console.WriteLine($"Froze {output_graph_def.Node.Count} nodes.");
File.WriteAllBytes(output_pb, output_graph_def.ToByteArray());
return output_pb;
}
}

public static Graph load_graph(string freeze_graph_pb, string name = "")
{
var bytes = File.ReadAllBytes(freeze_graph_pb);
var graph = tf.Graph().as_default();
importer.import_graph_def(GraphDef.Parser.ParseFrom(bytes),
name: name);
return graph;
}
}
}

+ 0
- 8
src/TensorFlowNET.Core/Util/nest.py.cs View File

@@ -526,14 +526,6 @@ namespace Tensorflow.Util
return pack_sequence_as(structure, mapped_flat_structure) as Tensor;
}
public static Tensor map_structure2<T>(Func<T, Tensor> func, T structure)
{
var flat_structure = flatten(structure);
var mapped_flat_structure = flat_structure.Select(func).ToList();
return pack_sequence_as(structure, mapped_flat_structure) as Tensor;
}
/// <summary>
/// Same as map_structure, but with only one structure (no combining of multiple structures)
/// </summary>


+ 5
- 0
src/TensorFlowNET.Core/Variables/VariableScope.cs View File

@@ -74,5 +74,10 @@ namespace Tensorflow
aggregation: aggregation) as RefVariable;
});
}

public void reuse_variables()
{
_reuse = _ReuseMode.AUTO_REUSE;
}
}
}

+ 1
- 0
src/TensorFlowNET.Core/Variables/_ReuseMode.cs View File

@@ -5,6 +5,7 @@
/// </summary>
public enum _ReuseMode
{
NOT_REUSE = 0,
// Indicates that variables are to be fetched if they already exist or
// otherwise created.
AUTO_REUSE = 1


+ 0
- 1
src/TensorFlowNET.Core/Variables/variables.py.cs View File

@@ -61,7 +61,6 @@ namespace Tensorflow
public static List<VariableV1> global_variables(string scope = null)
{
return ops.get_collection<VariableV1>(tf.GraphKeys.GLOBAL_VARIABLES, scope);

}

/// <summary>


+ 0
- 60
src/TensorFlowNET.Core/ops.cs View File

@@ -28,10 +28,6 @@ namespace Tensorflow
{
public partial class ops
{
private static readonly ThreadLocal<DefaultGraphStack> _defaultGraphFactory = new ThreadLocal<DefaultGraphStack>(() => new DefaultGraphStack());

public static DefaultGraphStack default_graph_stack => _defaultGraphFactory.Value;

public static int tensor_id(Tensor tensor)
{
return tensor.Id;
@@ -78,53 +74,6 @@ namespace Tensorflow
return get_default_graph().get_collection_ref<T>(key);
}

/// <summary>
/// Returns the default graph for the current thread.
///
/// The returned graph will be the innermost graph on which a
/// `Graph.as_default()` context has been entered, or a global default
/// graph if none has been explicitly created.
///
/// NOTE: The default graph is a property of the current thread.If you
/// create a new thread, and wish to use the default graph in that
/// thread, you must explicitly add a `with g.as_default():` in that
/// thread's function.
/// </summary>
/// <returns></returns>
public static Graph get_default_graph()
{
//TODO: original source indicates there should be a _default_graph_stack!
//return _default_graph_stack.get_default()
return default_graph_stack.get_controller();
}

public static Graph set_default_graph(Graph graph)
{
//TODO: original source does not have a 'set_default_graph' and indicates there should be a _default_graph_stack!
default_graph_stack.set_controller(graph);
return default_graph_stack.get_controller();
}

/// <summary>
/// Clears the default graph stack and resets the global default graph.
///
/// NOTE: The default graph is a property of the current thread.This
/// function applies only to the current thread.Calling this function while
/// a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
/// behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
/// after calling this function will result in undefined behavior.
/// </summary>
/// <returns></returns>
public static void reset_default_graph()
{
//TODO: original source indicates there should be a _default_graph_stack!
//if (!_default_graph_stack.is_cleared())
// throw new InvalidOperationException("Do not use tf.reset_default_graph() to clear " +
// "nested graphs. If you need a cleared graph, " +
// "exit the nesting and create a new graph.");
default_graph_stack.reset();
}

public static Graph _get_graph_from_inputs(params Tensor[] op_input_list)
=> _get_graph_from_inputs(op_input_list: op_input_list, graph: null);

@@ -399,15 +348,6 @@ namespace Tensorflow
return session.run(tensor, feed_dict);
}

/// <summary>
/// Returns the default session for the current thread.
/// </summary>
/// <returns>The default `Session` being used in the current thread.</returns>
public static Session get_default_session()
{
return tf.defaultSession;
}

/// <summary>
/// Prepends name scope to a name.
/// </summary>


+ 152
- 0
src/TensorFlowNET.Core/ops.threading.cs View File

@@ -0,0 +1,152 @@
using System.Threading;
using Tensorflow.Util;
using static Tensorflow.Binding;

namespace Tensorflow
{
public partial class ops
{
private static readonly ThreadLocal<DefaultGraphStack> _defaultGraphFactory = new ThreadLocal<DefaultGraphStack>(() => new DefaultGraphStack());
private static volatile Session _singleSesson;
private static volatile DefaultGraphStack _singleGraphStack;
private static readonly object _threadingLock = new object();

public static DefaultGraphStack default_graph_stack
{
get
{
if (!isSingleThreaded)
return _defaultGraphFactory.Value;

if (_singleGraphStack == null)
{
lock (_threadingLock)
{
if (_singleGraphStack == null)
_singleGraphStack = new DefaultGraphStack();
}
}

return _singleGraphStack;
}
}

private static bool isSingleThreaded = false;

/// <summary>
/// Does this library ignore different thread accessing.
/// </summary>
/// <remarks>https://github.com/SciSharp/TensorFlow.NET/wiki/Multithreading </remarks>
public static bool IsSingleThreaded
{
get => isSingleThreaded;
set
{
if (value)
enforce_singlethreading();
else
enforce_multithreading();
}
}

/// <summary>
/// Forces the library to ignore different thread accessing.
/// </summary>
/// <remarks>https://github.com/SciSharp/TensorFlow.NET/wiki/Multithreading <br></br>Note that this discards any sessions and graphs used in a multithreaded manner</remarks>
public static void enforce_singlethreading()
{
isSingleThreaded = true;
}

/// <summary>
/// Forces the library to provide a separate <see cref="Session"/> and <see cref="Graph"/> to every different thread accessing.
/// </summary>
/// <remarks>https://github.com/SciSharp/TensorFlow.NET/wiki/Multithreading <br></br>Note that this discards any sessions and graphs used in a singlethreaded manner</remarks>
public static void enforce_multithreading()
{
isSingleThreaded = false;
}

/// <summary>
/// Returns the default session for the current thread.
/// </summary>
/// <returns>The default `Session` being used in the current thread.</returns>
public static Session get_default_session()
{
if (!isSingleThreaded)
return tf.defaultSession;

if (_singleSesson == null)
{
lock (_threadingLock)
{
if (_singleSesson == null)
_singleSesson = new Session();
}
}

return _singleSesson;
}

/// <summary>
/// Returns the default session for the current thread.
/// </summary>
/// <returns>The default `Session` being used in the current thread.</returns>
public static Session set_default_session(Session sess)
{
if (!isSingleThreaded)
return tf.defaultSession = sess;

lock (_threadingLock)
{
_singleSesson = sess;
}

return _singleSesson;
}

/// <summary>
/// Returns the default graph for the current thread.
///
/// The returned graph will be the innermost graph on which a
/// `Graph.as_default()` context has been entered, or a global default
/// graph if none has been explicitly created.
///
/// NOTE: The default graph is a property of the current thread.If you
/// create a new thread, and wish to use the default graph in that
/// thread, you must explicitly add a `with g.as_default():` in that
/// thread's function.
/// </summary>
/// <returns></returns>
public static Graph get_default_graph()
{
//return _default_graph_stack.get_default()
return default_graph_stack.get_controller();
}

public static Graph set_default_graph(Graph graph)
{
default_graph_stack.set_controller(graph);
return default_graph_stack.get_controller();
}

/// <summary>
/// Clears the default graph stack and resets the global default graph.
///
/// NOTE: The default graph is a property of the current thread.This
/// function applies only to the current thread.Calling this function while
/// a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
/// behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
/// after calling this function will result in undefined behavior.
/// </summary>
/// <returns></returns>
public static void reset_default_graph()
{
//if (!_default_graph_stack.is_cleared())
// throw new InvalidOperationException("Do not use tf.reset_default_graph() to clear " +
// "nested graphs. If you need a cleared graph, " +
// "exit the nesting and create a new graph.");
default_graph_stack.reset();
}
}
}

+ 7
- 4
src/TensorFlowNET.Core/tensorflow.cs View File

@@ -21,8 +21,6 @@ namespace Tensorflow
{
public partial class tensorflow : IObjectLife
{
protected internal readonly ThreadLocal<Session> _defaultSessionFactory;

public TF_DataType @byte = TF_DataType.TF_UINT8;
public TF_DataType @sbyte = TF_DataType.TF_INT8;
public TF_DataType int16 = TF_DataType.TF_INT16;
@@ -40,10 +38,10 @@ namespace Tensorflow

public tensorflow()
{
_defaultSessionFactory = new ThreadLocal<Session>(() => new Session());
_constructThreadingObjects();
}

public Session defaultSession => _defaultSessionFactory.Value;

public RefVariable Variable<T>(T data,
bool trainable = true,
@@ -88,6 +86,11 @@ namespace Tensorflow

public string VERSION => c_api.StringPiece(c_api.TF_Version());

public Session get_default_session()
{
return ops.get_default_session();
}

public Session Session()
{
return new Session().as_default();


+ 53
- 0
src/TensorFlowNET.Core/tensorflow.threading.cs View File

@@ -0,0 +1,53 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System.Runtime.CompilerServices;
using System.Threading;

namespace Tensorflow
{
public partial class tensorflow : IObjectLife
{
protected ThreadLocal<Session> _defaultSessionFactory;

[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void _constructThreadingObjects()
{
_defaultSessionFactory = new ThreadLocal<Session>(() => new Session());
}

public Session defaultSession
{
get
{
if (!ops.IsSingleThreaded)
return _defaultSessionFactory.Value;

return ops.get_default_session();
}
internal set
{
if (!ops.IsSingleThreaded)
{
_defaultSessionFactory.Value = value;
return;
}

ops.set_default_session(value);
}
}
}
}

+ 13
- 0
src/TensorFlowNET.Hub/DataSetBase.cs View File

@@ -0,0 +1,13 @@
using System;
using System.Collections.Generic;
using System.Text;
using NumSharp;

namespace Tensorflow.Hub
{
public abstract class DataSetBase : IDataSet
{
public NDArray Data { get; protected set; }
public NDArray Labels { get; protected set; }
}
}

+ 46
- 0
src/TensorFlowNET.Hub/Datasets.cs View File

@@ -0,0 +1,46 @@
using System;
using System.Collections.Generic;
using System.Text;
using NumSharp;

namespace Tensorflow.Hub
{
public class Datasets<TDataSet> where TDataSet : IDataSet
{
public TDataSet Train { get; private set; }

public TDataSet Validation { get; private set; }

public TDataSet Test { get; private set; }

public Datasets(TDataSet train, TDataSet validation, TDataSet test)
{
Train = train;
Validation = validation;
Test = test;
}

public (NDArray, NDArray) Randomize(NDArray x, NDArray y)
{
var perm = np.random.permutation(y.shape[0]);
np.random.shuffle(perm);
return (x[perm], y[perm]);
}

/// <summary>
/// selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method)
/// </summary>
/// <param name="x"></param>
/// <param name="y"></param>
/// <param name="start"></param>
/// <param name="end"></param>
/// <returns></returns>
public (NDArray, NDArray) GetNextBatch(NDArray x, NDArray y, int start, int end)
{
var slice = new Slice(start, end);
var x_batch = x[slice];
var y_batch = y[slice];
return (x_batch, y_batch);
}
}
}

+ 13
- 0
src/TensorFlowNET.Hub/IDataSet.cs View File

@@ -0,0 +1,13 @@
using System;
using System.Collections.Generic;
using System.Text;
using NumSharp;

namespace Tensorflow.Hub
{
public interface IDataSet
{
NDArray Data { get; }
NDArray Labels { get; }
}
}

+ 14
- 0
src/TensorFlowNET.Hub/IModelLoader.cs View File

@@ -0,0 +1,14 @@
using System;
using System.Threading.Tasks;
using System.Collections.Generic;
using System.Text;
using NumSharp;

namespace Tensorflow.Hub
{
public interface IModelLoader<TDataSet>
where TDataSet : IDataSet
{
Task<Datasets<TDataSet>> LoadAsync(ModelLoadSetting setting);
}
}

+ 88
- 0
src/TensorFlowNET.Hub/MnistDataSet.cs View File

@@ -0,0 +1,88 @@
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Text;
using NumSharp;
using Tensorflow;

namespace Tensorflow.Hub
{
public class MnistDataSet : DataSetBase
{
public int NumOfExamples { get; private set; }
public int EpochsCompleted { get; private set; }
public int IndexInEpoch { get; private set; }

public MnistDataSet(NDArray images, NDArray labels, Type dataType, bool reshape)
{
EpochsCompleted = 0;
IndexInEpoch = 0;

NumOfExamples = images.shape[0];

images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]);
images = images.astype(dataType);
// for debug np.multiply performance
var sw = new Stopwatch();
sw.Start();
images = np.multiply(images, 1.0f / 255.0f);
sw.Stop();
Console.WriteLine($"{sw.ElapsedMilliseconds}ms");
Data = images;

labels = labels.astype(dataType);
Labels = labels;
}

public (NDArray, NDArray) GetNextBatch(int batch_size, bool fake_data = false, bool shuffle = true)
{
if (IndexInEpoch >= NumOfExamples)
IndexInEpoch = 0;

var start = IndexInEpoch;
// Shuffle for the first epoch
if(EpochsCompleted == 0 && start == 0 && shuffle)
{
var perm0 = np.arange(NumOfExamples);
np.random.shuffle(perm0);
Data = Data[perm0];
Labels = Labels[perm0];
}

// Go to the next epoch
if (start + batch_size > NumOfExamples)
{
// Finished epoch
EpochsCompleted += 1;

// Get the rest examples in this epoch
var rest_num_examples = NumOfExamples - start;
var images_rest_part = Data[np.arange(start, NumOfExamples)];
var labels_rest_part = Labels[np.arange(start, NumOfExamples)];
// Shuffle the data
if (shuffle)
{
var perm = np.arange(NumOfExamples);
np.random.shuffle(perm);
Data = Data[perm];
Labels = Labels[perm];
}

start = 0;
IndexInEpoch = batch_size - rest_num_examples;
var end = IndexInEpoch;
var images_new_part = Data[np.arange(start, end)];
var labels_new_part = Labels[np.arange(start, end)];

return (np.concatenate(new[] { images_rest_part, images_new_part }, axis: 0),
np.concatenate(new[] { labels_rest_part, labels_new_part }, axis: 0));
}
else
{
IndexInEpoch += batch_size;
var end = IndexInEpoch;
return (Data[np.arange(start, end)], Labels[np.arange(start, end)]);
}
}
}
}

+ 184
- 0
src/TensorFlowNET.Hub/MnistModelLoader.cs View File

@@ -0,0 +1,184 @@
using System;
using System.Threading.Tasks;
using System.Collections.Generic;
using System.Text;
using System.IO;
using NumSharp;

namespace Tensorflow.Hub
{
public class MnistModelLoader : IModelLoader<MnistDataSet>
{
private const string DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/";
private const string TRAIN_IMAGES = "train-images-idx3-ubyte.gz";
private const string TRAIN_LABELS = "train-labels-idx1-ubyte.gz";
private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz";
private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz";

public static async Task<Datasets<MnistDataSet>> LoadAsync(string trainDir, bool oneHot = false, int? trainSize = null, int? validationSize = null, int? testSize = null, bool showProgressInConsole = false)
{
var loader = new MnistModelLoader();

var setting = new ModelLoadSetting
{
TrainDir = trainDir,
OneHot = oneHot,
ShowProgressInConsole = showProgressInConsole
};

if (trainSize.HasValue)
setting.TrainSize = trainSize.Value;

if (validationSize.HasValue)
setting.ValidationSize = validationSize.Value;

if (testSize.HasValue)
setting.TestSize = testSize.Value;

return await loader.LoadAsync(setting);
}

public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting)
{
if (setting.TrainSize.HasValue && setting.ValidationSize >= setting.TrainSize.Value)
throw new ArgumentException("Validation set should be smaller than training set");

var sourceUrl = setting.SourceUrl;

if (string.IsNullOrEmpty(sourceUrl))
sourceUrl = DEFAULT_SOURCE_URL;

// load train images
await this.DownloadAsync(sourceUrl + TRAIN_IMAGES, setting.TrainDir, TRAIN_IMAGES, showProgressInConsole: setting.ShowProgressInConsole)
.ShowProgressInConsole(setting.ShowProgressInConsole);

await this.UnzipAsync(Path.Combine(setting.TrainDir, TRAIN_IMAGES), setting.TrainDir, showProgressInConsole: setting.ShowProgressInConsole)
.ShowProgressInConsole(setting.ShowProgressInConsole);

var trainImages = ExtractImages(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TRAIN_IMAGES)), limit: setting.TrainSize);

// load train labels
await this.DownloadAsync(sourceUrl + TRAIN_LABELS, setting.TrainDir, TRAIN_LABELS, showProgressInConsole: setting.ShowProgressInConsole)
.ShowProgressInConsole(setting.ShowProgressInConsole);

await this.UnzipAsync(Path.Combine(setting.TrainDir, TRAIN_LABELS), setting.TrainDir, showProgressInConsole: setting.ShowProgressInConsole)
.ShowProgressInConsole(setting.ShowProgressInConsole);

var trainLabels = ExtractLabels(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TRAIN_LABELS)), one_hot: setting.OneHot, limit: setting.TrainSize);

// load test images
await this.DownloadAsync(sourceUrl + TEST_IMAGES, setting.TrainDir, TEST_IMAGES, showProgressInConsole: setting.ShowProgressInConsole)
.ShowProgressInConsole(setting.ShowProgressInConsole);

await this.UnzipAsync(Path.Combine(setting.TrainDir, TEST_IMAGES), setting.TrainDir, showProgressInConsole: setting.ShowProgressInConsole)
.ShowProgressInConsole(setting.ShowProgressInConsole);

var testImages = ExtractImages(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TEST_IMAGES)), limit: setting.TestSize);

// load test labels
await this.DownloadAsync(sourceUrl + TEST_LABELS, setting.TrainDir, TEST_LABELS, showProgressInConsole: setting.ShowProgressInConsole)
.ShowProgressInConsole(setting.ShowProgressInConsole);

await this.UnzipAsync(Path.Combine(setting.TrainDir, TEST_LABELS), setting.TrainDir, showProgressInConsole: setting.ShowProgressInConsole)
.ShowProgressInConsole(setting.ShowProgressInConsole);

var testLabels = ExtractLabels(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TEST_LABELS)), one_hot: setting.OneHot, limit: setting.TestSize);

var end = trainImages.shape[0];

var validationSize = setting.ValidationSize;

var validationImages = trainImages[np.arange(validationSize)];
var validationLabels = trainLabels[np.arange(validationSize)];
trainImages = trainImages[np.arange(validationSize, end)];
trainLabels = trainLabels[np.arange(validationSize, end)];

var dtype = setting.DataType;
var reshape = setting.ReShape;

var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape);
var validation = new MnistDataSet(validationImages, validationLabels, dtype, reshape);
var test = new MnistDataSet(testImages, testLabels, dtype, reshape);

return new Datasets<MnistDataSet>(train, validation, test);
}

private NDArray ExtractImages(string file, int? limit = null)
{
if (!Path.IsPathRooted(file))
file = Path.Combine(AppContext.BaseDirectory, file);

using (var bytestream = new FileStream(file, FileMode.Open))
{
var magic = Read32(bytestream);
if (magic != 2051)
throw new Exception($"Invalid magic number {magic} in MNIST image file: {file}");
var num_images = Read32(bytestream);
num_images = limit == null ? num_images : Math.Min(num_images, (int)limit);

var rows = Read32(bytestream);
var cols = Read32(bytestream);

var buf = new byte[rows * cols * num_images];

bytestream.Read(buf, 0, buf.Length);

var data = np.frombuffer(buf, np.@byte);
data = data.reshape(num_images, rows, cols, 1);

return data;
}
}

private NDArray ExtractLabels(string file, bool one_hot = false, int num_classes = 10, int? limit = null)
{
if (!Path.IsPathRooted(file))
file = Path.Combine(AppContext.BaseDirectory, file);
using (var bytestream = new FileStream(file, FileMode.Open))
{
var magic = Read32(bytestream);
if (magic != 2049)
throw new Exception($"Invalid magic number {magic} in MNIST label file: {file}");
var num_items = Read32(bytestream);
num_items = limit == null ? num_items : Math.Min(num_items, (int)limit);
var buf = new byte[num_items];

bytestream.Read(buf, 0, buf.Length);
var labels = np.frombuffer(buf, np.uint8);

if (one_hot)
return DenseToOneHot(labels, num_classes);
return labels;
}
}

private NDArray DenseToOneHot(NDArray labels_dense, int num_classes)
{
var num_labels = labels_dense.shape[0];
var index_offset = np.arange(num_labels) * num_classes;
var labels_one_hot = np.zeros(num_labels, num_classes);
var labels = labels_dense.Data<byte>();
for (int row = 0; row < num_labels; row++)
{
var col = labels[row];
labels_one_hot.SetData(1.0, row, col);
}

return labels_one_hot;
}

private int Read32(FileStream bytestream)
{
var buffer = new byte[sizeof(uint)];
var count = bytestream.Read(buffer, 0, 4);
return np.frombuffer(buffer, ">u4").Data<int>()[0];
}
}
}

+ 20
- 0
src/TensorFlowNET.Hub/ModelLoadSetting.cs View File

@@ -0,0 +1,20 @@
using System;
using System.Collections.Generic;
using System.Text;
using NumSharp;

namespace Tensorflow.Hub
{
public class ModelLoadSetting
{
public string TrainDir { get; set; }
public bool OneHot { get; set; }
public Type DataType { get; set; } = typeof(float);
public bool ReShape { get; set; }
public int ValidationSize { get; set; } = 5000;
public int? TrainSize { get; set; }
public int? TestSize { get; set; }
public string SourceUrl { get; set; }
public bool ShowProgressInConsole { get; set; }
}
}

+ 5
- 0
src/TensorFlowNET.Hub/README.md View File

@@ -0,0 +1,5 @@
## TensorFlow Hub

TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models. In particular, it provides **modules**, which are pre-trained pieces of TensorFlow models that can be reused on new tasks.

https://github.com/tensorflow/hub

+ 27
- 0
src/TensorFlowNET.Hub/Tensorflow.Hub.csproj View File

@@ -0,0 +1,27 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<RootNamespace>Tensorflow.Hub</RootNamespace>
<TargetFramework>netstandard2.0</TargetFramework>
<Version>0.0.6</Version>
<Authors>Kerry Jiang, Haiping Chen</Authors>
<Company>SciSharp STACK</Company>
<Copyright>Apache 2.0</Copyright>
<RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl>
<RepositoryType>git</RepositoryType>
<PackageProjectUrl>http://scisharpstack.org</PackageProjectUrl>
<PackageTags>TensorFlow, SciSharp, MachineLearning</PackageTags>
<Description>TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models.</Description>
<PackageId>SciSharp.TensorFlowHub</PackageId>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
<PackageReleaseNotes>Fix GetNextBatch() bug.
Change to NumSharp compact version.</PackageReleaseNotes>
<PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&amp;v=4</PackageIconUrl>
<AssemblyName>TensorFlow.Hub</AssemblyName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'">
<DefineConstants>DEBUG;TRACE</DefineConstants>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="NumSharp" Version="0.30.0-alpha" />
</ItemGroup>
</Project>

+ 137
- 0
src/TensorFlowNET.Hub/Utils.cs View File

@@ -0,0 +1,137 @@
using System;
using System.IO;
using System.IO.Compression;
using System.Collections.Generic;
using System.Net;
using System.Text;
using System.Threading;
using System.Threading.Tasks;

namespace Tensorflow.Hub
{
public static class Utils
{
public static async Task DownloadAsync<TDataSet>(this IModelLoader<TDataSet> modelLoader, string url, string saveTo)
where TDataSet : IDataSet
{
var dir = Path.GetDirectoryName(saveTo);
var fileName = Path.GetFileName(saveTo);
await modelLoader.DownloadAsync(url, dir, fileName);
}

public static async Task DownloadAsync<TDataSet>(this IModelLoader<TDataSet> modelLoader, string url, string dirSaveTo, string fileName, bool showProgressInConsole = false)
where TDataSet : IDataSet
{
if (!Path.IsPathRooted(dirSaveTo))
dirSaveTo = Path.Combine(AppContext.BaseDirectory, dirSaveTo);

var fileSaveTo = Path.Combine(dirSaveTo, fileName);

if (showProgressInConsole)
{
Console.WriteLine($"Downloading {fileName}");
}

if (File.Exists(fileSaveTo))
{
if (showProgressInConsole)
{
Console.WriteLine($"The file {fileName} already exists");
}

return;
}
Directory.CreateDirectory(dirSaveTo);
using (var wc = new WebClient())
{
await wc.DownloadFileTaskAsync(url, fileSaveTo).ConfigureAwait(false);
}

}

public static async Task UnzipAsync<TDataSet>(this IModelLoader<TDataSet> modelLoader, string zipFile, string saveTo, bool showProgressInConsole = false)
where TDataSet : IDataSet
{
if (!Path.IsPathRooted(saveTo))
saveTo = Path.Combine(AppContext.BaseDirectory, saveTo);

Directory.CreateDirectory(saveTo);

if (!Path.IsPathRooted(zipFile))
zipFile = Path.Combine(AppContext.BaseDirectory, zipFile);

var destFileName = Path.GetFileNameWithoutExtension(zipFile);
var destFilePath = Path.Combine(saveTo, destFileName);

if (showProgressInConsole)
Console.WriteLine($"Unzippinng {Path.GetFileName(zipFile)}");

if (File.Exists(destFilePath))
{
if (showProgressInConsole)
Console.WriteLine($"The file {destFileName} already exists");
}

using (GZipStream unzipStream = new GZipStream(File.OpenRead(zipFile), CompressionMode.Decompress))
{
using (var destStream = File.Create(destFilePath))
{
await unzipStream.CopyToAsync(destStream).ConfigureAwait(false);
await destStream.FlushAsync().ConfigureAwait(false);
destStream.Close();
}

unzipStream.Close();
}
}

public static async Task ShowProgressInConsole(this Task task, bool enable)
{
if (!enable)
{
await task;
return;
}

var cts = new CancellationTokenSource();

var showProgressTask = ShowProgressInConsole(cts);

try
{
await task;
}
finally
{
cts.Cancel();
}

await showProgressTask;
Console.WriteLine("Done.");
}

private static async Task ShowProgressInConsole(CancellationTokenSource cts)
{
var cols = 0;

await Task.Delay(100);

while (!cts.IsCancellationRequested)
{
await Task.Delay(100);
Console.Write(".");
cols++;

if (cols % 50 == 0)
{
Console.WriteLine();
}
}

if (cols > 0)
Console.WriteLine();
}
}
}

+ 10
- 0
src/TensorFlowNET.Keras/Activations.cs View File

@@ -0,0 +1,10 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras
{
class Activations
{
}
}

+ 35
- 0
src/TensorFlowNET.Keras/Applications/Densenet.cs View File

@@ -0,0 +1,35 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class Densenet
{
public static Tensor dense_block(Tensor x, int blocks, string name) => throw new NotImplementedException();

public static Tensor transition_block(Tensor x, float reduction, string name) => throw new NotImplementedException();

public static Tensor conv_block(Tensor x, float growth_rate, string name) => throw new NotImplementedException();

public static Model DenseNet(int blocks, bool include_top=true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model DenseNet121(int blocks, bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model DenseNet169(int blocks, bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model DenseNet201(int blocks, bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 60
- 0
src/TensorFlowNET.Keras/Applications/Efficientnet.cs View File

@@ -0,0 +1,60 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class BlockArg
{

}

public class Efficientnet
{
public static Model EfficientNet(float width_coefficient, float depth_coefficient, int default_size, float dropout_rate = 0.2f,
float drop_connect_rate = 0.2f, int depth_divisor = 8, string activation = "swish",
BlockArg[] blocks_args = null, string model_name = "efficientnet", bool include_top = true,
string weights = "imagenet", Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor block(Tensor inputs, string activation= "swish", float drop_rate= 0f,string name= "",
int filters_in= 32, int filters_out= 16, int kernel_size= 3, int strides= 1,
int expand_ratio= 1, float se_ratio= 0, bool id_skip= true) => throw new NotImplementedException();

public static Model EfficientNetB0(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model EfficientNetB1(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model EfficientNetB2(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model EfficientNetB3(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model EfficientNetB4(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model EfficientNetB5(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model EfficientNetB6(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model EfficientNetB7(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 22
- 0
src/TensorFlowNET.Keras/Applications/ImagenetUtils.cs View File

@@ -0,0 +1,22 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class ImagenetUtils
{
public static Tensor preprocess_input(Tensor x, string data_format= null, string mode= "caffe") => throw new NotImplementedException();
public static Tensor decode_predictions(Tensor preds, int top= 5) => throw new NotImplementedException();

public static Tensor _preprocess_numpy_input(Tensor x, string data_format, string mode) => throw new NotImplementedException();

public static Tensor _preprocess_symbolic_input(Tensor x, string data_format, string mode) => throw new NotImplementedException();

public static TensorShape obtain_input_shape(TensorShape input_shape, int default_size, int min_size,
string data_format, bool require_flatten, string weights= null) => throw new NotImplementedException();

public static ((int, int), (int, int)) correct_pad(Tensor inputs, (int, int) kernel_size) => throw new NotImplementedException();
}
}

+ 22
- 0
src/TensorFlowNET.Keras/Applications/InceptionResnetV2.cs View File

@@ -0,0 +1,22 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class InceptionResnetV2
{
public static Model InceptionResNetV2(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor conv2d_bn(Tensor x, int filters, (int, int) kernel_size, (int, int) strides, string padding= "same",
string activation= "relu", bool use_bias= false, string name= null) => throw new NotImplementedException();

public static Tensor inception_resnet_block(Tensor x, float scale, string block_type, int block_idx, string activation= "relu") => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 19
- 0
src/TensorFlowNET.Keras/Applications/InceptionV3.cs View File

@@ -0,0 +1,19 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class InceptionV3
{
public static Model Inceptionv3(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor conv2d_bn(Tensor x, int filters, int num_row, int num_col, string padding = "same", (int, int)? strides = null, string name = null) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 18
- 0
src/TensorFlowNET.Keras/Applications/Mobilenet.cs View File

@@ -0,0 +1,18 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class Mobilenet
{
public static Model MobileNet(TensorShape input_shape= null, float alpha= 1.0f, int depth_multiplier= 1, float dropout= 1e-3f,
bool include_top= true, string weights= "imagenet", Tensor input_tensor= null, string pooling= null, int classes= 1000) => throw new NotImplementedException();

public static Tensor conv2d_bn(Tensor x, int filters, float alpha, (int, int)? kernel = null, (int, int)? strides = null) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 21
- 0
src/TensorFlowNET.Keras/Applications/MobilenetV2.cs View File

@@ -0,0 +1,21 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class MobilenetV2
{
public static Model MobileNetV2(TensorShape input_shape = null, float alpha = 1.0f, bool include_top = true,
string weights = "imagenet", Tensor input_tensor = null, string pooling = null,
int classes = 1000) => throw new NotImplementedException();

public static Tensor _inverted_res_block(Tensor inputs, int expansion, (int, int) stride, float alpha, int filters, string block_id) => throw new NotImplementedException();

public static Tensor _make_divisible(Tensor v, Tensor divisor, Tensor min_value= null) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 31
- 0
src/TensorFlowNET.Keras/Applications/Nasnet.cs View File

@@ -0,0 +1,31 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class Nasnet
{
public static Model NASNet(TensorShape input_shape = null, int penultimate_filters = 4032, int num_blocks = 6, int stem_block_filters = 96,
bool skip_reduction = true, int filter_multiplier = 2, bool include_top = true, string weights = null,
Tensor input_tensor = null, string pooling = null, int classes = 1000, int? default_size = null) => throw new NotImplementedException();

public static Model NASNetMobile(TensorShape input_shape = null, bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model NASNetLarge(TensorShape input_shape = null, bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor _separable_conv_block(Tensor ip, int filters, (int, int)? kernel_size= null, (int, int)? strides= null, string block_id= null) => throw new NotImplementedException();

public static Tensor _adjust_block(Tensor p, Tensor ip, int filters, string block_id= null) => throw new NotImplementedException();

public static Tensor _normal_a_cell(Tensor p, Tensor ip, int filters, string block_id = null) => throw new NotImplementedException();

public static Tensor _reduction_a_cell(Tensor p, Tensor ip, int filters, string block_id = null) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 41
- 0
src/TensorFlowNET.Keras/Applications/Resnet.cs View File

@@ -0,0 +1,41 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class Resnet
{
public static Model ResNet(Func<Tensor, Tensor> stack_fn, bool preact, bool use_bias, string model_name= "resnet", bool include_top= true,
string weights= "imagenet", Tensor input_tensor= null, TensorShape input_shape= null, string pooling= null,
int classes= 1000) => throw new NotImplementedException();

public static Tensor block1(Tensor x, int filters, int kernel_size= 3, int stride= 1, bool conv_shortcut= true, string name= null) => throw new NotImplementedException();

public static Tensor stack1(Tensor x, int filters, int blocks, int stride1 = 2, string name = null) => throw new NotImplementedException();

public static Tensor block2(Tensor x, int filters, int kernel_size = 3, int stride = 1, bool conv_shortcut = true, string name = null) => throw new NotImplementedException();

public static Tensor stack2(Tensor x, int filters, int blocks, int stride1 = 2, string name = null) => throw new NotImplementedException();

public static Tensor block3(Tensor x, int filters, int kernel_size = 3, int stride = 1, int groups = 32, bool conv_shortcut = true, string name = null) => throw new NotImplementedException();

public static Tensor stack3(Tensor x, int filters, int blocks, int stride1 = 2, int groups = 32, string name = null) => throw new NotImplementedException();

public static Model ResNet50(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model ResNet101(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model ResNet152(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 25
- 0
src/TensorFlowNET.Keras/Applications/ResnetV2.cs View File

@@ -0,0 +1,25 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class ResnetV2
{
public static Model ResNet50V2(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model ResNet101V2(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Model ResNet152V2(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 17
- 0
src/TensorFlowNET.Keras/Applications/Vgg16.cs View File

@@ -0,0 +1,17 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class Vgg16
{
public static Model VGG16(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

+ 17
- 0
src/TensorFlowNET.Keras/Applications/Vgg19.cs View File

@@ -0,0 +1,17 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Keras.Applications
{
public class Vgg19
{
public static Model VGG19(bool include_top = true, string weights = "imagenet",
Tensor input_tensor = null, TensorShape input_shape = null,
string pooling = null, int classes = 1000) => throw new NotImplementedException();

public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException();

public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException();
}
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save