diff --git a/.gitignore b/.gitignore index ce600fbb..261c681a 100644 --- a/.gitignore +++ b/.gitignore @@ -336,3 +336,4 @@ test/TensorFlowNET.Examples/mnist # training model resources .resources +/redist diff --git a/README.md b/README.md index f7708caf..2a0caeb7 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,10 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr | TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.0 | | ----------- | ------- | ------- | ------- | ------ | -| tf.net 0.12 | | x | | | +| tf.net 0.14 | | x | x | | +| tf.net 0.13 | | x | x | | +| tf.net 0.12 | x | x | | | | tf.net 0.11 | x | x | | | -| tf.net 0.10 | x | x | | | -| tf.net 0.9 | x | | | | Install TF.NET and TensorFlow binary through NuGet. ```sh diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 2ad1cb5d..9efeddaa 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -3,16 +3,24 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 16 VisualStudioVersion = 16.0.29102.190 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlow.Binding", "src\TensorFlowNET.Core\TensorFlow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Benchmark", "src\TensorFlowNet.Benchmarks\Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "UnitTest", "test\TensorFlowNET.UnitTest\UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorFlowNET.Hub\Tensorflow.Hub.csproj", "{95B077C1-E21B-486F-8BDD-1C902FE687AB}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU Debug|x64 = Debug|x64 + Debug-Minimal|Any CPU = Debug-Minimal|Any CPU + Debug-Minimal|x64 = Debug-Minimal|x64 Publish|Any CPU = Publish|Any CPU Publish|x64 = Publish|x64 Release|Any CPU = Release|Any CPU @@ -23,6 +31,10 @@ Global {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU @@ -35,10 +47,14 @@ Global {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU @@ -47,14 +63,66 @@ Global {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.Build.0 = Release|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.ActiveCfg = Release|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/docs/source/ImageRecognition.md b/docs/source/ImageRecognition.md index 14d1c32b..74d3ee5b 100644 --- a/docs/source/ImageRecognition.md +++ b/docs/source/ImageRecognition.md @@ -133,4 +133,5 @@ grace_hopper.jpg: 466 bulletproof vest, 0.005350832 2/18/2019 3:56:25 AM Completed InceptionArchGoogLeNet ``` -You can find the full source code from [github](https://github.com/SciSharp/TensorFlow.NET/tree/master/test/TensorFlowNET.Examples/ImageProcess). \ No newline at end of file +You can find the full source code from [github](https://github.com/SciSharp/TensorFlow.NET-Examples/tree/master/src/TensorFlowNET.Examples/ImageProcessing). + diff --git a/docs/source/MnistInRnn.md b/docs/source/MnistInRnn.md new file mode 100644 index 00000000..ce8a1390 --- /dev/null +++ b/docs/source/MnistInRnn.md @@ -0,0 +1,5 @@ +# Chapter. MNIST In RNN + +### Recurrent Neural Networks + +Recurrent Neural Networks (RNNs) are popular models that have shown great promise in sequential data classification task. The traditional neural network model cannot make the next prediction input based on the knowledge that has been learned before. \ No newline at end of file diff --git a/src/TensorFlowNET.Core/APIs/tf.data_flow.cs b/src/TensorFlowNET.Core/APIs/tf.data_flow.cs index 593596ff..3ea6a70d 100644 --- a/src/TensorFlowNET.Core/APIs/tf.data_flow.cs +++ b/src/TensorFlowNET.Core/APIs/tf.data_flow.cs @@ -27,7 +27,19 @@ namespace Tensorflow /// /// /// - public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null) + public Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null) => gen_data_flow_ops.dynamic_stitch(indices, data, name: name); + + /// + /// Partitions `data` into `num_partitions` tensors using indices from `partitions`. + /// + /// + /// + /// The number of partitions to output. + /// + /// + public Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions, + string name = null) + => gen_data_flow_ops.dynamic_partition(data, partitions, num_partitions, name: name); } } diff --git a/src/TensorFlowNET.Core/APIs/tf.graph.cs b/src/TensorFlowNET.Core/APIs/tf.graph.cs index a28c007a..05851b6b 100644 --- a/src/TensorFlowNET.Core/APIs/tf.graph.cs +++ b/src/TensorFlowNET.Core/APIs/tf.graph.cs @@ -21,7 +21,7 @@ namespace Tensorflow public partial class tensorflow { public graph_util_impl graph_util => new graph_util_impl(); - + public GraphTransformer graph_transforms => new GraphTransformer(); public GraphKeys GraphKeys { get; } = new GraphKeys(); public void reset_default_graph() diff --git a/src/TensorFlowNET.Core/APIs/tf.io.cs b/src/TensorFlowNET.Core/APIs/tf.io.cs index 394357de..40da04b1 100644 --- a/src/TensorFlowNET.Core/APIs/tf.io.cs +++ b/src/TensorFlowNET.Core/APIs/tf.io.cs @@ -23,8 +23,9 @@ namespace Tensorflow { public GFile gfile = new GFile(); public Tensor read_file(string filename, string name = null) => gen_io_ops.read_file(filename, name); + public Tensor read_file(Tensor filename, string name = null) => gen_io_ops.read_file(filename, name); - public void import_graph_def(GraphDef graph_def, + public ITensorOrOperation[] import_graph_def(GraphDef graph_def, Dictionary input_map = null, string[] return_elements = null, string name = null, diff --git a/src/TensorFlowNET.Core/APIs/tf.layers.cs b/src/TensorFlowNET.Core/APIs/tf.layers.cs index 25448441..e62d5fa2 100644 --- a/src/TensorFlowNET.Core/APIs/tf.layers.cs +++ b/src/TensorFlowNET.Core/APIs/tf.layers.cs @@ -177,7 +177,8 @@ namespace Tensorflow use_bias: use_bias, bias_initializer: bias_initializer, kernel_initializer: kernel_initializer, - trainable: trainable); + trainable: trainable, + name: name); return layer.apply(inputs).Item1; } diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index 6cb43980..dec4c470 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -251,10 +251,11 @@ namespace Tensorflow /// greater than clip_value_max are set to clip_value_max. /// public Tensor clip_by_value (Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = "ClipByValue") - => gen_ops.clip_by_value(t, clip_value_min, clip_value_max, name); + => clip_ops.clip_by_value(t, clip_value_min, clip_value_max, name); + + public Tensor sub(Tx a, Ty b, string name = null) + => gen_math_ops.sub(a, b, name: name); - public Tensor sub(Tensor a, Tensor b) - => gen_math_ops.sub(a, b); public Tensor divide(Tensor a, Tensor b) => gen_math_ops.real_div(a, b); @@ -474,7 +475,7 @@ namespace Tensorflow public Tensor reduce_mean(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null, int? reduction_indices = null) => math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices); - public Tensor reduce_mean(Tensor[] input_tensors, int axis, bool keepdims = false, string name = null) + public Tensor reduce_mean(Tensor[] input_tensors, int? axis = null, bool keepdims = false, string name = null) => math_ops.reduce_mean(input_tensors, axis: axis, keepdims: keepdims, name: name); public Tensor round(Tensor x, string name = null) diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index b2ac4e94..2a6b125b 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -46,6 +46,9 @@ namespace Tensorflow return gen_nn_ops.conv2d(parameters); } + public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null) + => gen_ctc_ops.ctc_greedy_decoder(inputs, sequence_length, merge_repeated: merge_repeated, name: name); + /// /// Computes dropout. /// @@ -112,6 +115,7 @@ namespace Tensorflow public IActivation relu() => new relu(); public IActivation swish() => new swish(); + public IActivation tanh() => new tanh(); public Tensor relu(Tensor features, string name = null) => gen_nn_ops.relu(features, name); diff --git a/src/TensorFlowNET.Core/APIs/tf.scan.cs b/src/TensorFlowNET.Core/APIs/tf.scan.cs new file mode 100644 index 00000000..5642eaaf --- /dev/null +++ b/src/TensorFlowNET.Core/APIs/tf.scan.cs @@ -0,0 +1,35 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; + +namespace Tensorflow +{ + public partial class tensorflow + { + public Tensor scan( + Func fn, + Tensor elems, + Tensor initializer = null, + int parallel_iterations = 10, + bool back_prop = true, + bool swap_memory = false, + bool infer_shape = true, + bool reverse = false, + string name = null) => functional_ops.scan(fn, elems, initializer, parallel_iterations, back_prop, + swap_memory, infer_shape, reverse, name); + } +} \ No newline at end of file diff --git a/src/TensorFlowNET.Core/APIs/tf.train.cs b/src/TensorFlowNET.Core/APIs/tf.train.cs index 862212ef..b9bc430d 100644 --- a/src/TensorFlowNET.Core/APIs/tf.train.cs +++ b/src/TensorFlowNET.Core/APIs/tf.train.cs @@ -53,6 +53,12 @@ namespace Tensorflow public string write_graph(Graph graph, string logdir, string name, bool as_text = true) => graph_io.write_graph(graph, logdir, name, as_text); + public Graph load_graph(string freeze_graph_pb) + => saver.load_graph(freeze_graph_pb); + + public string freeze_graph(string checkpoint_dir, string output_pb_name, string[] output_node_names) + => saver.freeze_graph(checkpoint_dir, output_pb_name, output_node_names); + public Saver import_meta_graph(string meta_graph_or_file, bool clear_devices = false, string import_scope = "") => saver._import_meta_graph_with_return_elements(meta_graph_or_file, diff --git a/src/TensorFlowNET.Core/Binding.FuncTools.cs b/src/TensorFlowNET.Core/Binding.FuncTools.cs index 71240c67..8705cf44 100644 --- a/src/TensorFlowNET.Core/Binding.FuncTools.cs +++ b/src/TensorFlowNET.Core/Binding.FuncTools.cs @@ -16,9 +16,6 @@ namespace Tensorflow args = arg, invoke = func }; - - public static Func partial(Func func, (Tin1, Tin2) args) - => (arg1, arg2) => func(args.Item1, args.Item2); } public class PartialFunc diff --git a/src/TensorFlowNET.Core/Binding.Util.cs b/src/TensorFlowNET.Core/Binding.Util.cs index 31ea0d84..9f51ce2d 100644 --- a/src/TensorFlowNET.Core/Binding.Util.cs +++ b/src/TensorFlowNET.Core/Binding.Util.cs @@ -88,13 +88,16 @@ namespace Tensorflow case ICollection arr: return arr.Count; case NDArray ndArray: - return ndArray.shape[0]; + return ndArray.ndim == 0 ? 1 : ndArray.shape[0]; case IEnumerable enumerable: return enumerable.OfType().Count(); } throw new NotImplementedException("len() not implemented for type: " + a.GetType()); } + public static float min(float a, float b) + => Math.Min(a, b); + public static T[] list(IEnumerable list) => list.ToArray(); diff --git a/src/TensorFlowNET.Core/Framework/importer.py.cs b/src/TensorFlowNET.Core/Framework/importer.cs similarity index 75% rename from src/TensorFlowNET.Core/Framework/importer.py.cs rename to src/TensorFlowNET.Core/Framework/importer.cs index b6c011c4..b4bf1c73 100644 --- a/src/TensorFlowNET.Core/Framework/importer.py.cs +++ b/src/TensorFlowNET.Core/Framework/importer.cs @@ -54,6 +54,7 @@ namespace Tensorflow input_map = _ConvertInputMapValues(name, input_map); }); + TF_ImportGraphDefResults results = null; var bytes = graph_def.ToByteString().ToArray(); using (var buffer = c_api_util.tf_buffer(bytes)) using (var scoped_options = c_api_util.ScopedTFImportGraphDefOptions()) @@ -61,9 +62,8 @@ namespace Tensorflow { _PopulateTFImportGraphDefOptions(scoped_options, prefix, input_map, return_elements); // need to create a class ImportGraphDefWithResults with IDisposal - var results = c_api.TF_GraphImportGraphDefWithResults(graph, buffer, scoped_options, status); + results = c_api.TF_GraphImportGraphDefWithResults(graph, buffer, scoped_options, status); status.Check(true); - c_api.TF_DeleteImportGraphDefResults(results); } _ProcessNewOps(graph); @@ -71,7 +71,34 @@ namespace Tensorflow if (return_elements == null) return null; else - throw new NotImplementedException("import_graph_def return_elements"); + return _GatherReturnElements(return_elements, graph, results); + } + + private static ITensorOrOperation[] _GatherReturnElements(string[] requested_return_elements, + Graph graph, + TF_ImportGraphDefResults results) + { + var return_outputs = results.return_tensors; + var return_opers = results.return_opers; + + var combined_return_elements = new List(); + int outputs_idx = 0; + int opers_idx = 0; + foreach(var name in requested_return_elements) + { + if (name.Contains(":")) + { + combined_return_elements.append(graph.get_tensor_by_tf_output(return_outputs[outputs_idx])); + outputs_idx += 1; + } + else + { + throw new NotImplementedException("_GatherReturnElements"); + // combined_return_elements.append(graph._get_operation_by_tf_operation(return_opers[opers_idx])); + } + } + + return combined_return_elements.ToArray(); } private static void _ProcessNewOps(Graph graph) @@ -100,8 +127,29 @@ namespace Tensorflow foreach (var name in return_elements) { - throw new NotImplementedException("_PopulateTFImportGraphDefOptions"); + if(name.Contains(":")) + { + var (op_name, index) = _ParseTensorName(name); + c_api.TF_ImportGraphDefOptionsAddReturnOutput(options, op_name, index); + } + else + { + c_api.TF_ImportGraphDefOptionsAddReturnOperation(options, name); + } } + + // c_api.TF_ImportGraphDefOptionsSetValidateColocationConstraints(options, validate_colocation_constraints); + } + + private static (string, int) _ParseTensorName(string tensor_name) + { + var components = tensor_name.Split(':'); + if (components.Length == 2) + return (components[0], int.Parse(components[1])); + else if (components.Length == 1) + return (components[0], 0); + else + throw new ValueError($"Cannot convert {tensor_name} to a tensor name."); } public static Dictionary _ConvertInputMapValues(string name, Dictionary input_map) diff --git a/src/TensorFlowNET.Core/Framework/meta_graph.cs b/src/TensorFlowNET.Core/Framework/meta_graph.cs index d80e67b4..15847886 100644 --- a/src/TensorFlowNET.Core/Framework/meta_graph.cs +++ b/src/TensorFlowNET.Core/Framework/meta_graph.cs @@ -142,7 +142,8 @@ namespace Tensorflow break; default: - throw new NotImplementedException("import_scoped_meta_graph_with_return_elements"); + Console.WriteLine($"Cannot identify data type for collection {col.Key}. Skipping."); + break; } } @@ -267,6 +268,17 @@ namespace Tensorflow switch (graph.get_collection(key)) { + case List collection_list: + col_def.BytesList = new Types.BytesList(); + foreach (var x in collection_list) + { + if(x is RefVariable x_ref_var) + { + var proto = x_ref_var.to_proto(export_scope); + col_def.BytesList.Value.Add(proto.ToByteString()); + } + } + break; case List collection_list: col_def.BytesList = new Types.BytesList(); foreach (var x in collection_list) diff --git a/src/TensorFlowNET.Core/Framework/tensor_shape.cs b/src/TensorFlowNET.Core/Framework/tensor_shape.cs index d4e2f6cd..06d80972 100644 --- a/src/TensorFlowNET.Core/Framework/tensor_shape.cs +++ b/src/TensorFlowNET.Core/Framework/tensor_shape.cs @@ -24,6 +24,16 @@ namespace Tensorflow.Framework } } + public static Dimension dimension_at_index(TensorShape shape, int index) + { + return shape.rank < 0 ? + new Dimension(-1) : + new Dimension(shape.dims[index]); + } + + public static int dimension_value(Dimension dimension) + => dimension.value; + public static TensorShape as_shape(this Shape shape) => new TensorShape(shape.Dimensions); } diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs index 74e9ef10..33c5f7c5 100644 --- a/src/TensorFlowNET.Core/Gradients/array_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs @@ -231,6 +231,33 @@ namespace Tensorflow.Gradients return new Tensor[] { x_grad, null }; } + [RegisterGradient("Split")] + public static Tensor[] _SplitGrad(Operation op, Tensor[] grads) + { + return new Tensor[] { null, array_ops.concat(list(grads), op.inputs[0]) }; + } + + [RegisterGradient("Slice")] + public static Tensor[] _SliceGrad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var input_vec = op.inputs[0]; + var begin_vec = op.inputs[1]; + var input_rank = array_ops.rank(input_vec); + var slice_size = array_ops.shape(op.outputs[0]); + + var shape = array_ops.stack(new Tensor[] { input_rank, new Tensor(1) }); + var before_pad = array_ops.reshape(begin_vec, shape); + var after_pad = array_ops.reshape(array_ops.shape(input_vec) - slice_size - begin_vec, shape); + var paddings = array_ops.concat(new Tensor[] { before_pad, after_pad }, 1); + return new Tensor[] + { + array_ops.pad(grad, paddings), + null, + null + }; + } + [RegisterGradient("Squeeze")] public static Tensor[] _SqueezeGrad(Operation op, Tensor[] grads) { diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index b3c620c0..1f24afb0 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -319,7 +319,7 @@ namespace Tensorflow.Gradients [RegisterGradient("Maximum")] public static Tensor[] _MaximumGrad(Operation op, Tensor[] grads) { - return _MaximumMinimumGrad(op, grads[0]); + return _MaximumMinimumGrad(true, op, grads[0]); } /// @@ -331,7 +331,7 @@ namespace Tensorflow.Gradients [RegisterGradient("Minimum")] public static Tensor[] _MinimumGrad(Operation op, Tensor[] grads) { - return _MaximumMinimumGrad(op, grads[0]); + return _MaximumMinimumGrad(false, op, grads[0]); } /// @@ -340,7 +340,7 @@ namespace Tensorflow.Gradients /// /// /// - private static Tensor[] _MaximumMinimumGrad(Operation op, Tensor grad) + private static Tensor[] _MaximumMinimumGrad(bool isMaximum, Operation op, Tensor grad) { var x = op.inputs[0]; var y = op.inputs[1]; @@ -349,7 +349,10 @@ namespace Tensorflow.Gradients var sy = array_ops.shape(y); var gradshape = array_ops.shape(grad); var zeros = array_ops.zeros(gradshape, gdtype); - var xmask = gen_math_ops.greater_equal(x, y); + var xmask = + isMaximum + ? gen_math_ops.greater_equal(x, y) + : gen_math_ops.less_equal(x, y); var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); var xgrad = array_ops.where(xmask, grad, zeros); var ygrad = array_ops.where(xmask, zeros, grad); @@ -512,6 +515,72 @@ namespace Tensorflow.Gradients }); } + [RegisterGradient("Sqrt")] + public static Tensor[] _SqrtGrad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var y = op.outputs[0]; + + return tf_with(ops.control_dependencies(grads), delegate + { + y = math_ops.conj(y); + var factor = constant_op.constant(0.5f, dtype: y.dtype); + return new Tensor[] { grad * (factor * math_ops.reciprocal(y)) }; + }); + } + + [RegisterGradient("Sin")] + public static Tensor[] _SinGrad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var x = op.inputs[0]; + + return tf_with(ops.control_dependencies(grads), delegate + { + x = math_ops.conj(x); + return new Tensor[] { math_ops.multiply(grad, gen_math_ops.cos(x)) }; + }); + } + + [RegisterGradient("Sinh")] + public static Tensor[] _SinhGrad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var x = op.inputs[0]; + + return tf_with(ops.control_dependencies(grads), delegate + { + x = math_ops.conj(x); + return new Tensor[] { math_ops.multiply(grad, gen_math_ops.cosh(x)) }; + }); + } + + [RegisterGradient("Cos")] + public static Tensor[] _CosGrad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var x = op.inputs[0]; + + return tf_with(ops.control_dependencies(grads), delegate + { + x = math_ops.conj(x); + return new Tensor[] { math_ops.multiply(grad, -gen_math_ops.sin(x)) }; + }); + } + + [RegisterGradient("Cosh")] + public static Tensor[] _CoshGrad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var x = op.inputs[0]; + + return tf_with(ops.control_dependencies(grads), delegate + { + x = math_ops.conj(x); + return new Tensor[] { math_ops.multiply(grad, gen_math_ops.sinh(x)) }; + }); + } + [RegisterGradient("Tanh")] public static Tensor[] _TanhGrad(Operation op, Tensor[] grads) { diff --git a/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs b/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs index 4891fcbb..b479ba0b 100644 --- a/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs +++ b/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs @@ -26,31 +26,8 @@ namespace Tensorflow { static Dictionary> gradientFunctions = null; - /// - /// Regiter new gradient function - /// - /// operation type - /// function delegate - public static void RegisterGradientFunction(string name, Func func) + private static void RegisterFromAssembly() { - if(gradientFunctions == null) - gradientFunctions = new Dictionary>(); - - gradientFunctions[name] = func; - } - - public static void RegisterNoGradientFunction(string name) - { - if (gradientFunctions == null) - gradientFunctions = new Dictionary>(); - - gradientFunctions[name] = null; - } - - public static Func get_gradient_function(Operation op) - { - if (op.inputs == null) return null; - if (gradientFunctions == null) { gradientFunctions = new Dictionary>(); @@ -62,7 +39,8 @@ namespace Tensorflow foreach (var g in gradGroups) { - var methods = g.GetMethods().Where(x => x.GetCustomAttribute() != null) + var methods = g.GetMethods() + .Where(x => x.GetCustomAttribute() != null) .ToArray(); foreach (var m in methods) @@ -78,13 +56,40 @@ namespace Tensorflow } // REGISTER_NO_GRADIENT_OP - methods = g.GetMethods().Where(x => x.GetCustomAttribute() != null) + methods = g.GetMethods() + .Where(x => x.GetCustomAttribute() != null) .ToArray(); foreach (var m in methods) RegisterNoGradientFunction(m.GetCustomAttribute().Name); } } + } + + /// + /// Regiter new gradient function + /// + /// operation type + /// function delegate + public static void RegisterGradientFunction(string name, Func func) + { + RegisterFromAssembly(); + + gradientFunctions[name] = func; + } + + public static void RegisterNoGradientFunction(string name) + { + RegisterFromAssembly(); + + gradientFunctions[name] = null; + } + + public static Func get_gradient_function(Operation op) + { + if (op.inputs == null) return null; + + RegisterFromAssembly(); if (!gradientFunctions.ContainsKey(op.type)) throw new LookupError($"can't get graident function through get_gradient_function {op.type}"); diff --git a/src/TensorFlowNET.Core/GraphTransformation/GraphTransformer.cs b/src/TensorFlowNET.Core/GraphTransformation/GraphTransformer.cs new file mode 100644 index 00000000..381ff744 --- /dev/null +++ b/src/TensorFlowNET.Core/GraphTransformation/GraphTransformer.cs @@ -0,0 +1,46 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; +using Google.Protobuf; + +namespace Tensorflow +{ + public class GraphTransformer + { + /// + /// Graph Transform Tool + /// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md + /// + /// GraphDef object containing a model to be transformed + /// the model inputs + /// the model outputs + /// transform names and parameters + /// + public GraphDef TransformGraph(GraphDef input_graph_def, + string[] inputs, + string[] outputs, + string[] transforms) + { + var input_graph_def_string = input_graph_def.ToByteArray(); + var inputs_string = string.Join(",", inputs); + var outputs_string = string.Join(",", outputs); + var transforms_string = string.Join(" ", transforms); + using (var status = new Status()) + { + var buffer = new Buffer(); + var len = c_api.TransformGraphWithStringInputs(input_graph_def_string, + input_graph_def_string.Length, + inputs_string, + outputs_string, + transforms_string, + buffer, + status); + + status.Check(false); + var bytes = buffer.ToArray(); + return GraphDef.Parser.ParseFrom(bytes); + } + } + } +} diff --git a/src/TensorFlowNET.Core/GraphTransformation/c_api.transform_graph.cs b/src/TensorFlowNET.Core/GraphTransformation/c_api.transform_graph.cs new file mode 100644 index 00000000..8390d74e --- /dev/null +++ b/src/TensorFlowNET.Core/GraphTransformation/c_api.transform_graph.cs @@ -0,0 +1,33 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Runtime.InteropServices; + +namespace Tensorflow +{ + public partial class c_api + { + [DllImport(TensorFlowLibName)] + public static extern int TransformGraphWithStringInputs(byte[] graph_def_string, + int graph_def_string_len, + string inputs_string, + string outputs_string, + string transforms_string, + IntPtr output_buffer, + IntPtr status); + } +} diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs index 0b2dc0e6..d759e38d 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Import.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Import.cs @@ -60,12 +60,12 @@ namespace Tensorflow } } - /*public Graph Import(string file_path, string name = null) + public Graph ImportGraphDef(string file_path, string name = null) { as_default(); var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(file_path)); importer.import_graph_def(graph_def, name: name); return this; - }*/ + } } } diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs index 1f62295a..48420d18 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.cs @@ -494,6 +494,12 @@ namespace Tensorflow c_api.TF_DeleteGraph(handle); } + public Tensor get_tensor_by_tf_output(TF_Output tf_output) + { + var op = _get_operation_by_tf_operation(tf_output.oper); + return op.outputs[tf_output.index]; + } + /// /// Returns the with the given . /// This method may be called concurrently from multiple threads. diff --git a/src/TensorFlowNET.Core/Graphs/TF_ImportGraphDefResults.cs b/src/TensorFlowNET.Core/Graphs/TF_ImportGraphDefResults.cs index e9ad8474..71ea5306 100644 --- a/src/TensorFlowNET.Core/Graphs/TF_ImportGraphDefResults.cs +++ b/src/TensorFlowNET.Core/Graphs/TF_ImportGraphDefResults.cs @@ -3,13 +3,62 @@ using System.Runtime.InteropServices; namespace Tensorflow { - [StructLayout(LayoutKind.Sequential)] - public struct TF_ImportGraphDefResults + public class TF_ImportGraphDefResults : DisposableObject { - public IntPtr return_tensors; - public IntPtr return_nodes; + /*public IntPtr return_nodes; public IntPtr missing_unused_key_names; public IntPtr missing_unused_key_indexes; - public IntPtr missing_unused_key_names_data; + public IntPtr missing_unused_key_names_data;*/ + + public TF_ImportGraphDefResults(IntPtr handle) + { + _handle = handle; + } + + public TF_Output[] return_tensors + { + get + { + IntPtr return_output_handle = IntPtr.Zero; + int num_outputs = -1; + c_api.TF_ImportGraphDefResultsReturnOutputs(_handle, ref num_outputs, ref return_output_handle); + TF_Output[] return_outputs = new TF_Output[num_outputs]; + unsafe + { + var tf_output_ptr = (TF_Output*)return_output_handle; + for (int i = 0; i < num_outputs; i++) + return_outputs[i] = *(tf_output_ptr + i); + return return_outputs; + } + } + } + + public TF_Operation[] return_opers + { + get + { + return new TF_Operation[0]; + /*TF_Operation return_output_handle = new TF_Operation(); + int num_outputs = -1; + c_api.TF_ImportGraphDefResultsReturnOperations(_handle, ref num_outputs, ref return_output_handle); + TF_Operation[] return_outputs = new TF_Operation[num_outputs]; + unsafe + { + var tf_output_ptr = (TF_Operation*)return_output_handle; + for (int i = 0; i < num_outputs; i++) + return_outputs[i] = *(tf_output_ptr + i); + return return_outputs; + }*/ + } + } + + public static implicit operator TF_ImportGraphDefResults(IntPtr handle) + => new TF_ImportGraphDefResults(handle); + + public static implicit operator IntPtr(TF_ImportGraphDefResults results) + => results._handle; + + protected override void DisposeUnmanagedResources(IntPtr handle) + => c_api.TF_DeleteImportGraphDefResults(handle); } } diff --git a/src/TensorFlowNET.Core/Interfaces/IFromMergeVars.cs b/src/TensorFlowNET.Core/Interfaces/IFromMergeVars.cs new file mode 100644 index 00000000..2dd168e1 --- /dev/null +++ b/src/TensorFlowNET.Core/Interfaces/IFromMergeVars.cs @@ -0,0 +1,7 @@ +namespace Tensorflow +{ + public interface IFromMergeVars + { + T FromMergeVars(ITensorOrTensorArray[] mergeVars); + } +} \ No newline at end of file diff --git a/src/TensorFlowNET.Core/Keras/Layers/Dense.cs b/src/TensorFlowNET.Core/Keras/Layers/Dense.cs index bfd6f2a5..1b481ada 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/Dense.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/Dense.cs @@ -35,10 +35,11 @@ namespace Tensorflow.Keras.Layers public Dense(int units, IActivation activation, + string name = null, bool use_bias = true, bool trainable = false, IInitializer kernel_initializer = null, - IInitializer bias_initializer = null) : base(trainable: trainable) + IInitializer bias_initializer = null) : base(trainable: trainable, name: name) { this.units = units; this.activation = activation; diff --git a/src/TensorFlowNET.Core/Keras/Optimizers/PolynomialDecay.cs b/src/TensorFlowNET.Core/Keras/Optimizers/PolynomialDecay.cs index b44595b5..fe1604cf 100644 --- a/src/TensorFlowNET.Core/Keras/Optimizers/PolynomialDecay.cs +++ b/src/TensorFlowNET.Core/Keras/Optimizers/PolynomialDecay.cs @@ -36,7 +36,7 @@ namespace Tensorflow.Keras.Optimizers public Tensor __call__(RefVariable step) { - tf_with(ops.name_scope(name ?? "PolynomialDecay"), scope => + return tf_with(ops.name_scope(name ?? "PolynomialDecay"), scope => { name = scope; var initial_learning_rate_tensor = ops.convert_to_tensor(initial_learning_rate, name: "initial_learning_rate"); @@ -53,10 +53,17 @@ namespace Tensorflow.Keras.Optimizers } else { - + // Make sure that the global_step used is not bigger than decay_steps. + global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps); } + + var p = tf.divide(global_step_recomp, decay_steps_recomp); + var pow = tf.pow(1 - p, power_tensor); + var m = math_ops.multiply(initial_learning_rate_tensor - end_learning_rate_tensor, pow); + return math_ops.add(m, + end_learning_rate_tensor, + name: name); }); - throw new NotImplementedException(""); } } } diff --git a/src/TensorFlowNET.Core/Layers/Layer.cs b/src/TensorFlowNET.Core/Layers/Layer.cs index 39561990..26b29982 100644 --- a/src/TensorFlowNET.Core/Layers/Layer.cs +++ b/src/TensorFlowNET.Core/Layers/Layer.cs @@ -65,7 +65,9 @@ namespace Tensorflow.Layers variable_scope scope_context_manager = null; if (built) { - + scope_context_manager = tf.variable_scope(_scope, + reuse: true, + auxiliary_name_scope: false); } else { @@ -181,7 +183,7 @@ namespace Tensorflow.Layers return _current_scope.original_name_scope; } - private void _set_scope(VariableScope scope = null) + protected void _set_scope(VariableScope scope = null) { if (_scope == null) { diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs index fa7a77a6..c2e204ca 100644 --- a/src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs +++ b/src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs @@ -118,7 +118,7 @@ namespace Tensorflow.Operations Func, LoopVar> body, LoopVar loop_vars, TensorShape[] shape_invariants, - bool return_same_structure) + bool return_same_structure) where TItem : IFromMergeVars, new() { // Keep original_loop_vars to identify which are TensorArrays var original_loop_vars = loop_vars; @@ -178,7 +178,7 @@ namespace Tensorflow.Operations Func, LoopVar> body, LoopVar original_loop_vars, Tensor[] loop_vars, - TensorShape[] shape_invariants) + TensorShape[] shape_invariants) where TItem : IFromMergeVars, new() { var flat_loop_vars = nest.flatten2(original_loop_vars) .Select(x => (ITensorOrTensorArray)x) @@ -235,11 +235,9 @@ namespace Tensorflow.Operations // Build the graph for pred. var merge_vars_with_tensor_arrays = _convert_flows_to_tensorarrays(flat_loop_vars, merge_vars); - //var packed_vars = nest.pack_sequence_as(original_loop_vars, merge_vars_with_tensor_arrays, expand_composites: true); - var packed_vars = new LoopVar((Tensor)merge_vars_with_tensor_arrays[0], - (TItem)(object)new BodyItemInRnnWhileLoop((Tensor)merge_vars_with_tensor_arrays[1], - new[] { (TensorArray)merge_vars_with_tensor_arrays[2] }, - (Tensor)merge_vars_with_tensor_arrays[3])); + var packed_vars = new LoopVar( + (Tensor) merge_vars_with_tensor_arrays[0], + new TItem().FromMergeVars(merge_vars_with_tensor_arrays)); var pp = pred(packed_vars); var c = ops.convert_to_tensor(pp); _pivot = gen_control_flow_ops.loop_cond(c, name: "LoopCond"); diff --git a/src/TensorFlowNET.Core/Operations/Initializers/GlorotUniform.cs b/src/TensorFlowNET.Core/Operations/Initializers/GlorotUniform.cs index 0eead27d..d6162103 100644 --- a/src/TensorFlowNET.Core/Operations/Initializers/GlorotUniform.cs +++ b/src/TensorFlowNET.Core/Operations/Initializers/GlorotUniform.cs @@ -20,9 +20,11 @@ namespace Tensorflow.Operations.Initializers { public GlorotUniform(float scale = 1.0f, string mode = "FAN_AVG", + bool uniform = true, int? seed = null, TF_DataType dtype = TF_DataType.TF_FLOAT) : base(factor: scale, mode: mode, + uniform: uniform, seed: seed, dtype: dtype) { diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs new file mode 100644 index 00000000..3eb2ee95 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs @@ -0,0 +1,172 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using static Tensorflow.Binding; +using Tensorflow.Operations.Activation; +using Tensorflow.Keras.Engine; +using Tensorflow.Operations; + +namespace Tensorflow +{ + /// + /// Basic LSTM recurrent network cell. + /// The implementation is based on: http://arxiv.org/abs/1409.2329. + /// + public class BasicLstmCell : LayerRnnCell + { + int _num_units; + float _forget_bias; + bool _state_is_tuple; + IActivation _activation; + LSTMStateTuple _state; + VariableV1 _kernel; + VariableV1 _bias; + string _WEIGHTS_VARIABLE_NAME = "kernel"; + string _BIAS_VARIABLE_NAME = "bias"; + + /// + /// Initialize the basic LSTM cell. + /// + /// The number of units in the LSTM cell. + /// + /// + /// + /// + /// + /// + public BasicLstmCell(int num_units, float forget_bias = 1.0f, bool state_is_tuple = true, + IActivation activation = null, bool? reuse = null, string name = null, + TF_DataType dtype = TF_DataType.DtInvalid) : base(_reuse: reuse, name: name, dtype: dtype) + { + input_spec = new InputSpec(ndim: 2); + _num_units = num_units; + _forget_bias = forget_bias; + _state_is_tuple = state_is_tuple; + _activation = activation; + if (_activation == null) + _activation = tf.nn.tanh(); + } + + protected override void build(TensorShape input_shape) + { + var input_depth = input_shape.dims.Last(); + var h_depth = _num_units; + _kernel = add_weight(_WEIGHTS_VARIABLE_NAME, + shape: new[] { input_depth + h_depth, 4 * _num_units }); + _bias = add_weight(_BIAS_VARIABLE_NAME, + shape: new[] { 4 * _num_units }, + initializer: tf.zeros_initializer); + built = true; + } + + public Tensor[] __call__(Tensor inputs, LSTMStateTuple state) + { + _state = state; + return base.__call__(inputs); + } + + /// + /// Long short-term memory cell (LSTM). + /// + /// + /// + /// + /// + protected override Tensor[] call(Tensor inputs, Tensor training = null, Tensor state = null) + { + var one = constant_op.constant(1, dtype: dtypes.int32); + // Parameters of gates are concatenated into one multiply for efficiency. + Tensor c = null; + Tensor h = null; + if(_state_is_tuple) + (c, h) = ((Tensor)_state.c, (Tensor)_state.h); + else + { + // array_ops.split(value: state, num_or_size_splits: 2, axis: one); + throw new NotImplementedException("BasicLstmCell call"); + } + var gate_inputs = math_ops.matmul(array_ops.concat(new[] { inputs, h }, 1), _kernel as RefVariable); + gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable); + + // i = input_gate, j = new_input, f = forget_gate, o = output_gate + var tensors = array_ops.split(value: gate_inputs, num_or_size_splits: 4, axis: one); + var (i, j, f, o) = (tensors[0], tensors[1], tensors[2], tensors[3]); + + var forget_bias_tensor = constant_op.constant(_forget_bias, dtype: f.dtype); + // Note that using `add` and `multiply` instead of `+` and `*` gives a + // performance improvement. So using those at the cost of readability. + var new_c = gen_math_ops.add( + math_ops.multiply(c, math_ops.sigmoid(gen_math_ops.add(f, forget_bias_tensor))), + math_ops.multiply(math_ops.sigmoid(i), _activation.Activate(j))); + + var new_h = math_ops.multiply(_activation.Activate(new_c), math_ops.sigmoid(o)); + + + if (_state_is_tuple) + return new[] { new_c, new_h }; + else + return new[] { array_ops.concat(new[] { new_c, new_h }, 1) }; + } + + public override object get_initial_state(Tensor inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid) + { + if (inputs != null) + throw new NotImplementedException("get_initial_state input is not null"); + + return zero_state(batch_size, dtype); + } + + /// + /// Return zero-filled state tensor(s). + /// + /// + /// + /// + private LSTMStateTuple zero_state(Tensor batch_size, TF_DataType dtype) + { + LSTMStateTuple output = null; + tf_with(ops.name_scope($"{GetType().Name}ZeroState", values: new { batch_size }), delegate + { + output = _zero_state_tensors(state_size, batch_size, dtype); + }); + + return output; + } + + private LSTMStateTuple _zero_state_tensors(object state_size, Tensor batch_size, TF_DataType dtype) + { + if (state_size is LSTMStateTuple state_size_tuple) + { + var outputs = state_size_tuple.Flatten() + .Select(x => (int)x) + .Select(s => + { + var c = rnn_cell_impl._concat(batch_size, s); + var size = array_ops.zeros(c, dtype: dtype); + + var c_static = rnn_cell_impl._concat(batch_size, s, @static: true); + size.set_shape(c_static); + + return size; + }).ToArray(); + + return new LSTMStateTuple(outputs[0], outputs[1]); + } + + throw new NotImplementedException("_zero_state_tensors"); + } + + public override object state_size + { + get + { + if (_state_is_tuple) + return new LSTMStateTuple(_num_units, _num_units); + else + return 2 * _num_units; + } + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/BasicRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs similarity index 97% rename from src/TensorFlowNET.Core/Operations/BasicRNNCell.cs rename to src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs index 69f86349..b93bea8d 100644 --- a/src/TensorFlowNET.Core/Operations/BasicRNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs @@ -16,6 +16,7 @@ using System; using Tensorflow.Keras.Engine; +using Tensorflow.Operations; using static Tensorflow.Binding; namespace Tensorflow @@ -25,7 +26,7 @@ namespace Tensorflow int _num_units; Func _activation; - public override int state_size => _num_units; + public override object state_size => _num_units; public override int output_size => _num_units; public VariableV1 _kernel; string _WEIGHTS_VARIABLE_NAME = "kernel"; diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BodyItemInRnnWhileLoop.cs b/src/TensorFlowNET.Core/Operations/NnOps/BodyItemInRnnWhileLoop.cs index 1a21326d..3d055cb1 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/BodyItemInRnnWhileLoop.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/BodyItemInRnnWhileLoop.cs @@ -4,7 +4,7 @@ using System.Text; namespace Tensorflow.Operations { - internal class BodyItemInRnnWhileLoop : ICanBeFlattened, IPackable + internal class BodyItemInRnnWhileLoop : ICanBeFlattened, IPackable, IFromMergeVars { /// /// int32 scalar Tensor. @@ -19,6 +19,10 @@ namespace Tensorflow.Operations /// public Tensor state { get; set; } + public BodyItemInRnnWhileLoop() + { + } + public BodyItemInRnnWhileLoop(Tensor time, TensorArray[] output_ta_t, Tensor state) { this.time = time; @@ -45,5 +49,13 @@ namespace Tensorflow.Operations return new BodyItemInRnnWhileLoop(time, output_ta_t, state); } + + public BodyItemInRnnWhileLoop FromMergeVars(ITensorOrTensorArray[] mergeVars) + { + time = (Tensor) mergeVars[1]; + output_ta_t = new[] {(TensorArray) mergeVars[2]}; + state = (Tensor)mergeVars[3]; + return this; + } } } diff --git a/src/TensorFlowNET.Core/Operations/NnOps/LSTMStateTuple.cs b/src/TensorFlowNET.Core/Operations/NnOps/LSTMStateTuple.cs new file mode 100644 index 00000000..f6bf5c6e --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/NnOps/LSTMStateTuple.cs @@ -0,0 +1,35 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Operations +{ + /// + /// Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state. + /// + /// Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state + /// and `h` is the output. + /// + /// Only used when `state_is_tuple=True`. + /// + public class LSTMStateTuple : ICanBeFlattened + { + public object c; + public object h; + + public LSTMStateTuple(int c, int h) + { + this.c = c; + this.h = h; + } + + public LSTMStateTuple(Tensor c, Tensor h) + { + this.c = c; + this.h = h; + } + + public object[] Flatten() + => new[] { c, h }; + } +} diff --git a/src/TensorFlowNET.Core/Operations/LayerRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs similarity index 100% rename from src/TensorFlowNET.Core/Operations/LayerRNNCell.cs rename to src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs diff --git a/src/TensorFlowNET.Core/Operations/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs similarity index 78% rename from src/TensorFlowNET.Core/Operations/RNNCell.cs rename to src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs index 9902cd41..61d97cb9 100644 --- a/src/TensorFlowNET.Core/Operations/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs @@ -49,7 +49,7 @@ namespace Tensorflow /// difference between TF and Keras RNN cell. /// protected bool _is_tf_rnn_cell = false; - public virtual int state_size { get; } + public virtual object state_size { get; } public virtual int output_size { get; } @@ -64,7 +64,7 @@ namespace Tensorflow _is_tf_rnn_cell = true; } - public virtual Tensor get_initial_state(Tensor inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid) + public virtual object get_initial_state(Tensor inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid) { if (inputs != null) throw new NotImplementedException("get_initial_state input is not null"); @@ -78,11 +78,10 @@ namespace Tensorflow /// /// /// - public Tensor zero_state(Tensor batch_size, TF_DataType dtype) + private Tensor zero_state(Tensor batch_size, TF_DataType dtype) { Tensor output = null; - var state_size = this.state_size; - tf_with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate + tf_with(ops.name_scope($"{GetType().Name}ZeroState", values: new { batch_size }), delegate { output = _zero_state_tensors(state_size, batch_size, dtype); }); @@ -90,20 +89,25 @@ namespace Tensorflow return output; } - private Tensor _zero_state_tensors(int state_size, Tensor batch_size, TF_DataType dtype) + private Tensor _zero_state_tensors(object state_size, Tensor batch_size, TF_DataType dtype) { - var output = nest.map_structure(s => + if(state_size is int state_size_int) { - var c = rnn_cell_impl._concat(batch_size, s); - var size = array_ops.zeros(c, dtype: dtype); + var output = nest.map_structure(s => + { + var c = rnn_cell_impl._concat(batch_size, s); + var size = array_ops.zeros(c, dtype: dtype); - var c_static = rnn_cell_impl._concat(batch_size, s, @static: true); - size.set_shape(c_static); + var c_static = rnn_cell_impl._concat(batch_size, s, @static: true); + size.set_shape(c_static); - return size; - }, state_size); + return size; + }, state_size_int); - return output; + return output; + } + + throw new NotImplementedException("_zero_state_tensors"); } } } diff --git a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs index 48af7d58..5509ba2c 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs @@ -18,13 +18,160 @@ using NumSharp; using System; using System.Collections.Generic; using System.Linq; +using Tensorflow.Framework; using Tensorflow.Util; using static Tensorflow.Binding; namespace Tensorflow.Operations { - internal class rnn + public class rnn { + /// + /// Creates a bidirectional recurrent neural network. + /// + public static (Tensor[], LSTMStateTuple, LSTMStateTuple) static_bidirectional_rnn(BasicLstmCell cell_fw, + BasicLstmCell cell_bw, + Tensor[] inputs, + Tensor initial_state_fw = null, + Tensor initial_state_bw = null, + TF_DataType dtype = TF_DataType.DtInvalid, + Tensor sequence_length = null, + string scope = null) + { + if (inputs == null || inputs.Length == 0) + throw new ValueError("inputs must not be empty"); + + Tensor[] output_fw = null; + Tensor[] output_bw = null; + LSTMStateTuple output_state_fw = null; + LSTMStateTuple output_state_bw = null; + + tf_with(tf.variable_scope(scope ?? "bidirectional_rnn"), delegate + { + // Forward direction + tf_with(tf.variable_scope("fw"), fw_scope => + { + (output_fw, output_state_fw) = static_rnn( + cell_fw, + inputs, + initial_state_fw, + dtype, + sequence_length, + scope: fw_scope); + }); + + // backward direction + tf_with(tf.variable_scope("bw"), bw_scope => + { + var reversed_inputs = _reverse_seq(inputs, sequence_length); + (output_bw, output_state_bw) = static_rnn( + cell_bw, + reversed_inputs, + initial_state_bw, + dtype, + sequence_length, + scope: bw_scope); + }); + }); + + output_bw = _reverse_seq(output_bw, sequence_length); + + var flat_outputs = zip(output_fw, output_bw) + .Select(x => array_ops.concat(new[] { x.Item1, x.Item2 }, 1)) + .ToArray(); + + return (flat_outputs, output_state_fw, output_state_bw); + } + + private static Tensor[] _reverse_seq(Tensor[] input_seq, Tensor lengths) + { + if (lengths == null) + return input_seq.Reverse().ToArray(); + + throw new NotImplementedException("_reverse_seq"); + } + + public static (Tensor[], LSTMStateTuple) static_rnn(BasicLstmCell cell, + Tensor[] inputs, + Tensor initial_state, + TF_DataType dtype = TF_DataType.DtInvalid, + Tensor sequence_length = null, + VariableScope scope = null) + { + List outputs = new List(); + object state = null; + + // Create a new scope in which the caching device is either + // determined by the parent scope, or is set to place the cached + // Variable using the same placement as for the rest of the RNN. + if (scope == null) + tf_with(tf.variable_scope("rnn"), varscope => + { + throw new NotImplementedException("static_rnn"); + }); + else + tf_with(tf.variable_scope(scope), scope1 => + { + Dimension fixed_batch_size = null; + Dimension batch_size = null; + Tensor batch_size_tensor = null; + VariableScope varscope = scope1; + // Obtain the first sequence of the input + var first_input = inputs[0]; + if (first_input.TensorShape.rank != 1) + { + var input_shape = first_input.TensorShape.with_rank_at_least(2); + fixed_batch_size = input_shape.dims[0]; + var flat_inputs = nest.flatten2(inputs); + foreach (var flat_input in flat_inputs) + { + input_shape = flat_input.TensorShape.with_rank_at_least(2); + batch_size = tensor_shape.dimension_at_index(input_shape, 0); + var input_size = input_shape[1]; + fixed_batch_size.merge_with(batch_size); + foreach (var (i, size) in enumerate(input_size.dims)) + { + if (size < 0) + throw new ValueError($"Input size (dimension {i} of inputs) must be accessible via " + + "shape inference, but saw value None."); + } + } + } + else + fixed_batch_size = first_input.TensorShape.with_rank_at_least(1).dims[0]; + + if (tensor_shape.dimension_value(fixed_batch_size) >= 0) + batch_size = tensor_shape.dimension_value(fixed_batch_size); + else + batch_size_tensor = array_ops.shape(first_input)[0]; + + if (initial_state != null) + state = initial_state; + else + { + state = cell.get_initial_state(batch_size: batch_size_tensor, dtype: dtype); + } + + Tensor output = null; + if (state is LSTMStateTuple state_tuple) + { + foreach (var (time, input_) in enumerate(inputs)) + { + if (time > 0) + varscope.reuse_variables(); + if (sequence_length != null) + throw new NotImplementedException("static_rnn"); + + var results = cell.__call__(input_, state_tuple); + (output, state_tuple) = (results[1], new LSTMStateTuple(results[0], results[1])); + outputs.Add(output); + } + } + }); + + return (outputs.ToArray(), state as LSTMStateTuple); + } + public static (Tensor, Tensor) dynamic_rnn(RnnCell cell, Tensor inputs_tensor, Tensor sequence_length = null, Tensor initial_state = null, TF_DataType dtype = TF_DataType.DtInvalid, @@ -52,7 +199,7 @@ namespace Tensorflow.Operations if (initial_state != null) state = initial_state; else - state = cell.get_initial_state(batch_size: batch_size, dtype: dtype); + state = cell.get_initial_state(batch_size: batch_size, dtype: dtype) as Tensor; var inputs = nest.pack_sequence_as(structure: inputs_tensor, flat_sequence: flat_input); diff --git a/src/TensorFlowNET.Core/Operations/Operation.Control.cs b/src/TensorFlowNET.Core/Operations/Operation.Control.cs index c9ae7071..ba7b0829 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Control.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Control.cs @@ -52,10 +52,6 @@ namespace Tensorflow public void _set_control_flow_context(ControlFlowContext ctx) { - if (name.Contains("gradients/rnn/while/basic_rnn_cell/Tanh_grad/TanhGrad/f_acc")) - { - - } _control_flow_context = ctx; } diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 04964069..f9f2f58f 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -228,6 +228,16 @@ namespace Tensorflow public static Tensor rank(Tensor input, string name = null) => rank_internal(input, name, optimize: true); + public static Tensor rank(Tensor[] inputs, string name = null) + { + return tf_with(ops.name_scope(name, "Rank", new { inputs }), scope => + { + name = scope; + var input_tensor = ops.convert_to_tensor(inputs); + return constant_op.constant(input_tensor.NDims, dtype: tf.int32, name: name); + }); + } + public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true) { return tf_with(ops.name_scope(name, "Rank", new List { input }), scope => @@ -594,6 +604,11 @@ namespace Tensorflow return gen_array_ops.concat_v2(values, axis, name: name); } + public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat") + { + return gen_array_ops.concat_v2(values, axis, name: name); + } + public static Tensor concat(object[] values, int axis, string name = "concat") { return gen_array_ops.concat_v2(values, axis, name: name); @@ -619,6 +634,16 @@ namespace Tensorflow }); } + public static Tensor[] split(Tensor value, int num_or_size_splits, Tensor axis, + string name = "split") + { + var size_splits = ops.convert_to_tensor(num_or_size_splits); + return gen_array_ops.split(axis: axis, + num_split: num_or_size_splits, + value: value, + name: name); + } + public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null) => gen_array_ops.slice(input, begin, size, name: name); diff --git a/src/TensorFlowNET.Core/Operations/clip_ops.cs b/src/TensorFlowNET.Core/Operations/clip_ops.cs new file mode 100644 index 00000000..701664f4 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/clip_ops.cs @@ -0,0 +1,45 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using static Tensorflow.Binding; + +namespace Tensorflow +{ + public class clip_ops + { + public static Tensor clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null) + { + return tf_with(ops.name_scope(name, "clip_by_value", new { t, clip_value_min, clip_value_max }), delegate + { + var values = ops.convert_to_tensor(t, name: "t"); + // Go through list of tensors, for each value in each tensor clip + var t_min = math_ops.minimum(values, clip_value_max); + // Assert that the shape is compatible with the initial shape, + // to prevent unintentional broadcasting. + _ = values.TensorShape.merge_with(t_min.shape); + var t_max = math_ops.maximum(t_min, clip_value_min, name: name); + _ = values.TensorShape.merge_with(t_max.shape); + + return t_max; + }); + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs index ffa0675b..2852c05c 100644 --- a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs +++ b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs @@ -625,7 +625,7 @@ namespace Tensorflow bool swap_memory = false, string name = null, Tensor maximum_iterations = null, - bool return_same_structure = false) + bool return_same_structure = false) where TItem : IFromMergeVars, new() { return tf_with(ops.name_scope(name, "while", loop_vars), scope => { diff --git a/src/TensorFlowNET.Core/Operations/ctc_ops.cs b/src/TensorFlowNET.Core/Operations/ctc_ops.cs new file mode 100644 index 00000000..07ed811d --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/ctc_ops.cs @@ -0,0 +1,67 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Linq; +using Tensorflow.Operations; +using static Tensorflow.Binding; + +namespace Tensorflow +{ + public class ctc_ops + { + /// + /// Performs greedy decoding on the logits given in inputs. + /// + /// + /// 3-D, shape: (max_time x batch_size x num_classes), the logits. + /// + /// + /// A vector containing sequence lengths, size (batch_size). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CTCGreedyDecoder'. + /// + /// + /// If True, merge repeated classes in output. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// decoded_indices : Indices matrix, size (total_decoded_outputs x 2), + /// of a SparseTensor&lt;int64, 2&gt;. The rows store: [batch, time]. + /// decoded_values : Values vector, size: (total_decoded_outputs), + /// of a SparseTensor&lt;int64, 2&gt;. The vector stores the decoded classes. + /// decoded_shape : Shape vector, size (2), of the decoded SparseTensor. + /// Values are: [batch_size, max_decoded_length]. + /// log_probability : Matrix, size (batch_size x 1), containing sequence + /// log-probabilities. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// A note about the attribute merge_repeated: if enabled, when + /// consecutive logits' maximum indices are the same, only the first of + /// these is emitted. Labeling the blank '*', the sequence "A B B * B B" + /// becomes "A B B" if merge_repeated = True and "A B B B B" if + /// merge_repeated = False. + /// + /// Regardless of the value of merge_repeated, if the maximum index of a given + /// time and batch corresponds to the blank, index (num_classes - 1), no new + /// element is emitted. + /// + public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null) + => gen_ctc_ops.ctc_greedy_decoder(inputs, sequence_length, merge_repeated: merge_repeated, name: name); + } +} diff --git a/src/TensorFlowNET.Core/Operations/functional_ops.cs b/src/TensorFlowNET.Core/Operations/functional_ops.cs new file mode 100644 index 00000000..5e7a7240 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/functional_ops.cs @@ -0,0 +1,238 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Collections.Generic; +using System.Linq; +using NumSharp; +using Tensorflow.Framework; +using Tensorflow.Util; +using static Tensorflow.Binding; + +namespace Tensorflow +{ + public class functional_ops + { + public static Tensor scan( + Func fn, + Tensor elems, + Tensor initializer = null, + int parallel_iterations = 10, + bool back_prop = true, + bool swap_memory = false, + bool infer_shape = true, + bool reverse = false, + string name = null) + { + bool input_is_sequence = nest.is_sequence(elems); + + Tensor[] input_flatten(Tensor x) => input_is_sequence ? nest.flatten(x).ToArray() : new [] {x}; + Tensor input_pack(Tensor[] x) => input_is_sequence ? (Tensor)nest.pack_sequence_as(elems, x) : x[0]; + + bool output_is_sequence; + Func output_flatten; + Func output_pack; + if (initializer == null) + { + output_is_sequence = input_is_sequence; + output_flatten = input_flatten; + output_pack = input_pack; + } + else + { + output_is_sequence = nest.is_sequence(initializer); + output_flatten = (x) => output_is_sequence ? nest.flatten(x).ToArray() : new [] {x}; + output_pack = (x) => output_is_sequence ? (Tensor)nest.pack_sequence_as(initializer, x) : x[0]; + } + + var elems_flat = input_flatten(elems); + + bool in_graph_mode = tf.context.executing_eagerly(); + + return tf_with(ops.name_scope(name, "scan", new { elems_flat }), scope => + { + if (in_graph_mode) + { + // todo tf.net doesn't expose .caching_device + //// Any get_variable calls in fn will cache the first call locally + //// and not issue repeated network I/O requests for each iteration. + //var varscope = variable_scope.get_variable_scope(); + //bool varscope_caching_device_was_none = false; + //if (varscope.caching_device = null) + //{ + // // varscope.set_caching_device(lambda op: op.device) + // // varscope_caching_device_was_none = True + //} + } + + elems_flat = elems_flat.Select(elem => ops.convert_to_tensor(elem, name: "elem")).ToArray(); + + var n = tensor_shape.dimension_value(elems_flat[0].shape[0]); + + // todo python had the below but dimension_value returns int which can't be null + //if (n == null) + //{ + // n = array_ops.shape(elems_flat[0])[0]; + //} + + var elems_ta = elems_flat.Select(elem => new TensorArray( + elem.dtype, + size: tf.constant(n), + dynamic_size: false, + element_shape: elem.shape.Skip(1).ToArray(), + infer_shape: true)).ToList(); + + for (int index = 0; index < elems_ta.Count; index++) + { + elems_ta[index].unstack(elems_flat[index]); + } + + Tensor[] a_flat; + int i; + if (initializer == null) + { + a_flat = elems_ta.Select(elem => elem.read(tf.constant(reverse ? n - 1 : 0))).ToArray(); + i = 1; + } + else + { + Tensor[] initializer_flat = output_flatten(initializer); + a_flat = initializer_flat.Select(init => ops.convert_to_tensor(init)).ToArray(); + i = 0; + } + + var accs_ta = a_flat.Select(init => new TensorArray( + dtype: init.dtype, + size: tf.constant(n), + element_shape: infer_shape ? init.shape : null, + dynamic_size: false, + infer_shape: infer_shape)).ToArray(); + + if (initializer == null) + { + for (int index = 0; index < accs_ta.Length; index++) + { + accs_ta[index].write(tf.constant(reverse ? n - 1 : 0), a_flat[index]); + } + } + + BodyItem compute(BodyItem item) + { + var packed_elems = input_pack(elems_ta.Select(elem_ta => elem_ta.read(item.I)).ToArray()); + var packed_a = output_pack(item.A_Flat); + var a_out = fn(packed_a, packed_elems); + + var flat_a_out = output_flatten(a_out); + for (int j = 0; j < item.Accs_ta.Length; j++) + { + item.Accs_ta[j].write(item.I, flat_a_out[j]); + } + + var next_i = reverse ? item.I - 1 : item.I + 1; + return new BodyItem(next_i, flat_a_out, item.Accs_ta); + } + + int initial_i; + Func condition; + if (reverse) + { + initial_i = n - 1 - i; + condition = x => x.I >= 0; + } + else + { + initial_i = i; + condition = x => x.I < n; + } + + BodyItem bodyItem = + control_flow_ops.while_loop( + condition, + compute, + new BodyItem(tf.constant(initial_i), a_flat, accs_ta), + parallel_iterations: parallel_iterations, + back_prop: back_prop, + swap_memory: swap_memory, + maximum_iterations: tf.constant(n)); + + var results_flat = bodyItem.Accs_ta.Select(r => r.stack()).ToArray(); + + var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].TensorShape.with_rank_at_least(1).dims[0])); + + foreach (var elem in elems_flat.Skip(1)) + { + n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.TensorShape.with_rank_at_least(1).dims[0]))); + } + + foreach (Tensor r in results_flat) + { + r.set_shape(new TensorShape(n_static).concatenate(r.dims.Skip(1).ToArray())); + } + + // todo get working when the above caching_device is fixed + //if (in_graph_mode && varscope_caching_device_was_none) { + // varscope.set_caching_device(None); + //} + + return output_pack(results_flat); + }); + } + + internal class BodyItem : ICanBeFlattened, IPackable, IFromMergeVars + { + public Tensor I { get; set; } + public Tensor[] A_Flat { get; set; } + public TensorArray[] Accs_ta { get; set; } + + public BodyItem() + { + } + + public BodyItem(Tensor i, Tensor[] a_flat, TensorArray[] accs_ta) + { + I = i; + A_Flat = a_flat; + Accs_ta = accs_ta; + } + + public object[] Flatten() + { + var elements = new List { I }; + elements.AddRange(A_Flat); + elements.AddRange(Accs_ta); + return elements.ToArray(); + } + + public BodyItem Pack(object[] sequences) + { + I = sequences[0] as Tensor; + A_Flat = new [] { sequences[1] as Tensor }; + Accs_ta = new [] { sequences[2] as TensorArray }; + + return new BodyItem(I, A_Flat, Accs_ta); + } + + public BodyItem FromMergeVars(ITensorOrTensorArray[] merge_vars) + { + I = (Tensor)merge_vars[1]; + A_Flat = new [] {(Tensor) merge_vars[2]}; + Accs_ta = new [] {(TensorArray) merge_vars[3]}; + return this; + } + } + } +} + diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs index 29910d04..d151d024 100644 --- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs @@ -47,7 +47,7 @@ namespace Tensorflow /// /// /// - public static Tensor concat_v2(T[] values, int axis, string name = null) + public static Tensor concat_v2(T[] values, Ta axis, string name = null) { var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); diff --git a/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs new file mode 100644 index 00000000..018a56bb --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs @@ -0,0 +1,38 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +namespace Tensorflow +{ + public class gen_ctc_ops + { + public static OpDefLibrary _op_def_lib = new OpDefLibrary(); + + public static Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = "CTCGreedyDecoder") + { + var op = _op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, args: new + { + inputs, + sequence_length, + merge_repeated + }); + /*var decoded_indices = op.outputs[0]; + var decoded_values = op.outputs[1]; + var decoded_shape = op.outputs[2]; + var log_probability = op.outputs[3];*/ + return op.outputs; + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs b/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs index 65b86f04..37ae486e 100644 --- a/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs @@ -27,6 +27,19 @@ namespace Tensorflow return _op.output; } + public static Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions, + string name = null) + { + var _op = _op_def_lib._apply_op_helper("DynamicPartition", name, new + { + data, + partitions, + num_partitions + }); + + return _op.outputs; + } + public static (Tensor, Tensor) tensor_array_v3(T size, TF_DataType dtype = TF_DataType.DtInvalid, TensorShape element_shape = null, bool dynamic_size = false, bool clear_after_read = true, bool identical_element_shapes = false, string tensor_array_name = "", string name = null) diff --git a/src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs b/src/TensorFlowNET.Core/Operations/gen_image_ops.cs similarity index 100% rename from src/TensorFlowNET.Core/Operations/gen_image_ops.py.cs rename to src/TensorFlowNET.Core/Operations/gen_image_ops.cs diff --git a/src/TensorFlowNET.Core/Operations/gen_io_ops.py.cs b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs similarity index 95% rename from src/TensorFlowNET.Core/Operations/gen_io_ops.py.cs rename to src/TensorFlowNET.Core/Operations/gen_io_ops.cs index 2a921d93..13408452 100644 --- a/src/TensorFlowNET.Core/Operations/gen_io_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs @@ -34,7 +34,7 @@ namespace Tensorflow return _op.outputs; } - public static Tensor read_file(string filename, string name = null) + public static Tensor read_file(T filename, string name = null) { var _op = _op_def_lib._apply_op_helper("ReadFile", name: name, args: new { filename }); diff --git a/src/TensorFlowNET.Core/Operations/map_fn.cs b/src/TensorFlowNET.Core/Operations/map_fn.cs index 1206d5b9..89ea5dd4 100644 --- a/src/TensorFlowNET.Core/Operations/map_fn.cs +++ b/src/TensorFlowNET.Core/Operations/map_fn.cs @@ -2,7 +2,10 @@ using System.Collections.Generic; using System.Linq; using System.Text; +using NumSharp; +using Tensorflow.Framework; using Tensorflow.Operations; +using Tensorflow.Util; using static Tensorflow.Binding; namespace Tensorflow @@ -30,10 +33,40 @@ namespace Tensorflow bool infer_shape = true, string name = null) { - var elems_flat = new[] { elems }; - tf_with(ops.name_scope(name, "map", elems_flat), delegate + bool input_is_sequence = nest.is_sequence(elems); + Tensor[] input_flatten(Tensor x) => input_is_sequence ? nest.flatten(x).ToArray() : new [] {x}; + Tensor input_pack(Tensor[] x) => input_is_sequence ? (Tensor)nest.pack_sequence_as(elems, x) : x[0]; + + bool output_is_sequence; + Func output_flatten; + Func output_pack; + if (dtype == TF_DataType.DtInvalid) + { + output_is_sequence = input_is_sequence; + output_flatten = input_flatten; + output_pack = input_pack; + } + else + { + output_is_sequence = nest.is_sequence(dtype); + output_flatten = (x) => output_is_sequence ? nest.flatten(x).ToArray() : new [] {x}; + output_pack = (x) => output_is_sequence ? (Tensor)nest.pack_sequence_as(dtype, x) : x[0]; + } + + var elems_flat = input_flatten(elems); + return tf_with(ops.name_scope(name, "map", elems_flat), delegate { - var varscope = tf.get_variable_scope(); + //if in_graph_mode: + //# Any get_variable calls in fn will cache the first call locally + //# and not issue repeated network I/O requests for each iteration. + //varscope = vs.get_variable_scope() + //varscope_caching_device_was_none = False + //if varscope.caching_device is None: + // # TODO(ebrevdo): Change to using colocate_with here and in other + // # methods. + // varscope.set_caching_device(lambda op: op.device) + // varscope_caching_device_was_none = True + elems_flat = elems_flat.Select(elem => ops.convert_to_tensor(elem, name: "elem")) .ToArray(); @@ -65,22 +98,89 @@ namespace Tensorflow dynamic_size: false, infer_shape: infer_shape)).ToArray(); - /*Func compute = (i, tas) => + + BodyItem compute(BodyItem item) { - throw new NotImplementedException(""); - }; + var packed_values = input_pack(elems_ta.Select(elem_ta => elem_ta.read(item.I)).ToArray()); + var packed_fn_values = fn(packed_values); + //nest.assert_same_structure(dtype or elems, packed_fn_values) + + var flat_fn_values = output_flatten(packed_fn_values); + for (int j = 0; j < item.Accs_ta.Length; j++) + { + item.Accs_ta[j].write(item.I, flat_fn_values[j]); + } + + return new BodyItem(item.I + 1, item.Accs_ta); + } var r_a = control_flow_ops.while_loop( - (i, _) => i < n, + (x) => x.I < n, compute, - new[] { i, accs_ta }, + new BodyItem(i, accs_ta), parallel_iterations: parallel_iterations, back_prop: back_prop, swap_memory: swap_memory, - maximum_iterations: n);*/ + maximum_iterations: tf.constant(n)); + var results_flat = r_a.Accs_ta.Select(r => r.stack()).ToArray(); + + var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].TensorShape.with_rank_at_least(1).dims[0])); + + foreach (var elem in elems_flat.Skip(1)) + { + n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.TensorShape.with_rank_at_least(1).dims[0]))); + } + + foreach (Tensor r in results_flat) + { + r.set_shape(new TensorShape(n_static).concatenate(r.dims.Skip(1).ToArray())); + } + + // todo get working when the above caching_device is fixed + //if (in_graph_mode && varscope_caching_device_was_none) { + // varscope.set_caching_device(None); + //} + + return output_pack(results_flat); }); + } + + internal class BodyItem : ICanBeFlattened, IPackable, IFromMergeVars + { + public Tensor I { get; set; } + public TensorArray[] Accs_ta { get; set; } - throw new NotImplementedException(""); + public BodyItem() + { + } + + public BodyItem(Tensor i, TensorArray[] accs_ta) + { + I = i; + Accs_ta = accs_ta; + } + + public object[] Flatten() + { + var elements = new List { I }; + elements.AddRange(Accs_ta); + return elements.ToArray(); + } + + public BodyItem Pack(object[] sequences) + { + I = sequences[0] as Tensor; + Accs_ta = new [] { sequences[1] as TensorArray }; + + return new BodyItem(I, Accs_ta); + } + + public BodyItem FromMergeVars(ITensorOrTensorArray[] merge_vars) + { + I = (Tensor)merge_vars[1]; + Accs_ta = new [] {(TensorArray) merge_vars[2]}; + return this; + } } } } diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index 848a89cd..bb8d7134 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -219,10 +219,19 @@ namespace Tensorflow } } - public static Tensor reduce_mean(Tensor[] input_tensors, int axis, bool keepdims = false, string name = null) + public static Tensor reduce_mean(Tensor[] input_tensors, int? axis = null, bool keepdims = false, string name = null) { - var m = gen_math_ops.mean(input_tensors, axis, keepdims, name); - return _may_reduce_to_scalar(keepdims, axis, m); + if(axis == null) + { + var r = _ReductionDims(input_tensors, axis); + var m = gen_math_ops.mean(input_tensors, r, keepdims, name); + return _may_reduce_to_scalar(keepdims, axis, m); + } + else + { + var m = gen_math_ops.mean(input_tensors, axis, keepdims, name); + return _may_reduce_to_scalar(keepdims, axis, m); + } } /// @@ -492,7 +501,7 @@ namespace Tensorflow return output; } - private static Tensor _may_reduce_to_scalar(bool keepdims, int axis, Tensor output) + private static Tensor _may_reduce_to_scalar(bool keepdims, int? axis, Tensor output) { return output; } @@ -515,6 +524,11 @@ namespace Tensorflow return axis; } + private static Tensor _ReductionDims(Tensor[] x, int? axis = null, string name = null) + { + return range(0, array_ops.rank(x)); + } + private static Tensor _ReductionDims(Tensor x, int[] axis) { if (axis != null) diff --git a/src/TensorFlowNET.Core/Operations/random_ops.cs b/src/TensorFlowNET.Core/Operations/random_ops.cs index bd718768..c722c9c0 100644 --- a/src/TensorFlowNET.Core/Operations/random_ops.cs +++ b/src/TensorFlowNET.Core/Operations/random_ops.cs @@ -80,7 +80,7 @@ namespace Tensorflow } public static Tensor random_uniform(Tensor shape, - long minval = 0, + int minval = 0, Tensor maxval = null, TF_DataType dtype = TF_DataType.TF_FLOAT, int? seed = null, diff --git a/src/TensorFlowNET.Core/Sessions/BaseSession.cs b/src/TensorFlowNET.Core/Sessions/BaseSession.cs index bb37956c..b25e77e5 100644 --- a/src/TensorFlowNET.Core/Sessions/BaseSession.cs +++ b/src/TensorFlowNET.Core/Sessions/BaseSession.cs @@ -68,6 +68,14 @@ namespace Tensorflow return _run(fetche, feed_dict)[0]; } + public virtual (NDArray, NDArray, NDArray, NDArray, NDArray) run( + (ITensorOrOperation, ITensorOrOperation, ITensorOrOperation, ITensorOrOperation, ITensorOrOperation) fetches, + params FeedItem[] feed_dict) + { + var results = _run(new object[] { fetches.Item1, fetches.Item2, fetches.Item3, fetches.Item4, fetches.Item5 }, feed_dict); + return (results[0], results[1], results[2], results[3], results[4]); + } + public virtual (NDArray, NDArray, NDArray, NDArray) run((ITensorOrOperation, ITensorOrOperation, ITensorOrOperation, ITensorOrOperation) fetches, params FeedItem[] feed_dict) { var results = _run(new object[] {fetches.Item1, fetches.Item2, fetches.Item3, fetches.Item4}, feed_dict); @@ -271,7 +279,7 @@ namespace Tensorflow break; case TF_DataType.TF_STRING: using (var reader = new CodedInputStream(new IntPtr(srcAddress).Stream(8, (long) tensor.bytesize))) - ret = NDArray.FromString(reader.ReadString()); + ret = new NDArray(reader.ReadBytes().ToByteArray()); break; case TF_DataType.TF_UINT8: ret = NDArray.Scalar(*(byte*) srcAddress); @@ -459,4 +467,4 @@ namespace Tensorflow } } } -} \ No newline at end of file +} diff --git a/src/TensorFlowNET.Core/Sessions/Session.cs b/src/TensorFlowNET.Core/Sessions/Session.cs index caa669d3..c60a49c1 100644 --- a/src/TensorFlowNET.Core/Sessions/Session.cs +++ b/src/TensorFlowNET.Core/Sessions/Session.cs @@ -37,8 +37,7 @@ namespace Tensorflow public Session as_default() { - tf._defaultSessionFactory.Value = this; - return this; + return ops.set_default_session(this); } [MethodImpl(MethodImplOptions.NoOptimization)] diff --git a/src/TensorFlowNET.Core/Sessions/_ElementFetchMapper.cs b/src/TensorFlowNET.Core/Sessions/_ElementFetchMapper.cs index 48eddf3b..12422289 100644 --- a/src/TensorFlowNET.Core/Sessions/_ElementFetchMapper.cs +++ b/src/TensorFlowNET.Core/Sessions/_ElementFetchMapper.cs @@ -58,51 +58,18 @@ namespace Tensorflow case NDArray value: result = new[] { value }; break; -#if _REGEN - %types=["sbyte", "bool", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"] - %foreach types% - case #1 value: - result = new[] { NDArray.Scalar(value) }; - break; - % -#else - case sbyte value: - result = new[] { NDArray.Scalar(value) }; - break; case bool value: result = new[] { NDArray.Scalar(value) }; break; case byte value: result = new[] { NDArray.Scalar(value) }; break; - case short value: - result = new[] { NDArray.Scalar(value) }; - break; - case ushort value: - result = new[] { NDArray.Scalar(value) }; - break; case int value: result = new[] { NDArray.Scalar(value) }; break; - case uint value: - result = new[] { NDArray.Scalar(value) }; - break; - case long value: - result = new[] { NDArray.Scalar(value) }; - break; - case ulong value: - result = new[] { NDArray.Scalar(value) }; - break; case float value: result = new[] { NDArray.Scalar(value) }; break; - case double value: - result = new[] { NDArray.Scalar(value) }; - break; - case Complex value: - result = new[] { NDArray.Scalar(value) }; - break; -#endif default: break; } diff --git a/src/TensorFlowNET.Core/Sessions/_FetchHandler.cs b/src/TensorFlowNET.Core/Sessions/_FetchHandler.cs index b7434089..79692798 100644 --- a/src/TensorFlowNET.Core/Sessions/_FetchHandler.cs +++ b/src/TensorFlowNET.Core/Sessions/_FetchHandler.cs @@ -86,6 +86,9 @@ namespace Tensorflow case NPTypeCode.Char: full_values.Add(float.NaN); break; + case NPTypeCode.Byte: + full_values.Add(float.NaN); + break; default: throw new NotImplementedException($"build_results tensor_values[0] {tensor_values[0].dtype.Name}"); } diff --git a/src/TensorFlowNET.Core/Status/Status.cs b/src/TensorFlowNET.Core/Status/Status.cs index 21ff6f6e..928f39f2 100644 --- a/src/TensorFlowNET.Core/Status/Status.cs +++ b/src/TensorFlowNET.Core/Status/Status.cs @@ -65,9 +65,7 @@ namespace Tensorflow } public static implicit operator IntPtr(Status status) - { - return status._handle; - } + => status._handle; protected override void DisposeUnmanagedResources(IntPtr handle) => TF_DeleteStatus(handle); diff --git a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj index 39279808..22006661 100644 --- a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj +++ b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj @@ -1,11 +1,11 @@  - net472;netstandard2.0 + netstandard2.0 TensorFlow.NET Tensorflow 1.14.1 - 0.12.1 + 0.14.2.0 Haiping Chen, Meinrad Recheis, Eli Belash SciSharp STACK true @@ -18,14 +18,13 @@ Google's TensorFlow full binding in .NET Standard. Building, training and infering deep learning models. https://tensorflownet.readthedocs.io - 0.12.1.0 - Changes since v0.11.0: -1: Add ICanBeFlattened for nest.flatten2. -2: Complete the WhileContext. -3: Add tf.nn.rnn_cell.BasicRNNCell and tf.nn.dynamic_rnn. -4: Add EstimatorSpec. + 0.14.2.0 + Changes since v0.14.0: +1: Add TransformGraphWithStringInputs. +2: tf.trainer.load_graph, tf.trainer.freeze_graph +3: Import Protobuf.Text 7.3 - 0.12.1.0 + 0.14.2.0 LICENSE true true @@ -62,8 +61,9 @@ https://tensorflownet.readthedocs.io - - + + + diff --git a/src/TensorFlowNET.Core/Tensors/Dimension.cs b/src/TensorFlowNET.Core/Tensors/Dimension.cs index 58520270..878ba5ae 100644 --- a/src/TensorFlowNET.Core/Tensors/Dimension.cs +++ b/src/TensorFlowNET.Core/Tensors/Dimension.cs @@ -22,6 +22,12 @@ namespace Tensorflow return new Dimension(_value); } + public static implicit operator Dimension(int value) + => new Dimension(value); + + public static implicit operator int(Dimension dimension) + => dimension.value; + public override string ToString() => $"Dimension({_value})"; } } diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs new file mode 100644 index 00000000..b9b71154 --- /dev/null +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs @@ -0,0 +1,411 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using NumSharp; +using System; +using System.Diagnostics.CodeAnalysis; +using System.Globalization; +using System.Linq; +using System.Numerics; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Text; +using NumSharp.Backends; +using NumSharp.Backends.Unmanaged; +using NumSharp.Utilities; +using static Tensorflow.c_api; + +#if SERIALIZABLE +using Newtonsoft.Json; +#endif + +namespace Tensorflow +{ + [SuppressMessage("ReSharper", "InvokeAsExtensionMethod")] + public partial class Tensor + { + public T ToScalar() + { + unsafe + { + if (typeof(T).as_dtype() == this.dtype && this.dtype != TF_DataType.TF_STRING) + return Unsafe.Read(this.buffer.ToPointer()); + + switch (this.dtype) + { +#if _REGEN + %foreach supported_numericals_TF_DataType,supported_numericals,supported_numericals_lowercase% + case TF_DataType.#1: + return Converts.ChangeType(*(#3*) this.buffer); + % +#else + + case TF_DataType.TF_UINT8: + return Converts.ChangeType(*(byte*) this.buffer); + case TF_DataType.TF_INT16: + return Converts.ChangeType(*(short*) this.buffer); + case TF_DataType.TF_UINT16: + return Converts.ChangeType(*(ushort*) this.buffer); + case TF_DataType.TF_INT32: + return Converts.ChangeType(*(int*) this.buffer); + case TF_DataType.TF_UINT32: + return Converts.ChangeType(*(uint*) this.buffer); + case TF_DataType.TF_INT64: + return Converts.ChangeType(*(long*) this.buffer); + case TF_DataType.TF_UINT64: + return Converts.ChangeType(*(ulong*) this.buffer); + case TF_DataType.TF_DOUBLE: + return Converts.ChangeType(*(double*) this.buffer); + case TF_DataType.TF_FLOAT: + return Converts.ChangeType(*(float*) this.buffer); +#endif + case TF_DataType.TF_STRING: + if (this.NDims != 0) + throw new ArgumentException($"{nameof(Tensor)} can only be scalar."); + + IntPtr stringStartAddress = IntPtr.Zero; + UIntPtr dstLen = UIntPtr.Zero; + + using (var status = new Status()) + { + c_api.TF_StringDecode((byte*) this.buffer + 8, (UIntPtr) (this.bytesize), (byte**) &stringStartAddress, &dstLen, status); + status.Check(true); + } + + var dstLenInt = checked((int) dstLen); + var value = Encoding.UTF8.GetString((byte*) stringStartAddress, dstLenInt); + if (typeof(T) == typeof(string)) + return (T) (object) value; + else + return Converts.ChangeType(value); + + case TF_DataType.TF_COMPLEX64: + case TF_DataType.TF_COMPLEX128: + default: + throw new NotSupportedException(); + } + } + } + + public unsafe void CopyTo(NDArray nd) + { + if (!nd.Shape.IsContiguous) + throw new ArgumentException("NDArray has to be contiguous (ndarray.Shape.IsContiguous)."); + +#if _REGEN + #region Compute + switch (nd.typecode) + { + %foreach supported_dtypes,supported_dtypes_lowercase% + case NPTypeCode.#1: + { + CopyTo<#2>(new Span<#2>(nd.Unsafe.Address, nd.size*nd.dtypesize)); + break; + } + % + default: + throw new NotSupportedException(); + } + #endregion +#else + + #region Compute + + switch (nd.typecode) + { + case NPTypeCode.Boolean: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.Byte: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.Int16: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.UInt16: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.Int32: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.UInt32: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.Int64: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.UInt64: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.Char: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.Double: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + case NPTypeCode.Single: + { + CopyTo(new Span(nd.Unsafe.Address, nd.size * nd.dtypesize)); + break; + } + default: + throw new NotSupportedException(); + } + + #endregion +#endif + } + + public void CopyTo(Span destination) where T : unmanaged + { + unsafe + { + var len = checked((int) this.size); + //perform regular CopyTo using Span.CopyTo. + if (typeof(T).as_dtype() == this.dtype && this.dtype != TF_DataType.TF_STRING) //T can't be a string but tensor can. + { + var src = (T*) this.buffer; + var srcSpan = new Span(src, len); + srcSpan.CopyTo(destination); + + return; + } + + if (len > destination.Length) + throw new ArgumentException("Destinion was too short to perform CopyTo."); + + //Perform cast to type . + fixed (T* dst = destination) + { + switch (this.dtype) + { +#if _REGEN + %foreach supported_numericals_TF_DataType,supported_numericals,supported_numericals_lowercase% + case TF_DataType.#1: + { + var converter = Converts.FindConverter<#3, T>(); + var src = (#3*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + % +#else + case TF_DataType.TF_BOOL: + { + var converter = Converts.FindConverter(); + var src = (bool*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_UINT8: + { + var converter = Converts.FindConverter(); + var src = (byte*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_INT16: + { + var converter = Converts.FindConverter(); + var src = (short*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_UINT16: + { + var converter = Converts.FindConverter(); + var src = (ushort*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_INT32: + { + var converter = Converts.FindConverter(); + var src = (int*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_UINT32: + { + var converter = Converts.FindConverter(); + var src = (uint*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_INT64: + { + var converter = Converts.FindConverter(); + var src = (long*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_UINT64: + { + var converter = Converts.FindConverter(); + var src = (ulong*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_DOUBLE: + { + var converter = Converts.FindConverter(); + var src = (double*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } + case TF_DataType.TF_FLOAT: + { + var converter = Converts.FindConverter(); + var src = (float*) this.buffer; + for (var i = 0; i < len; i++) + *(dst + i) = converter(unchecked(*(src + i))); + return; + } +#endif + case TF_DataType.TF_STRING: + { + var src = this.StringData(); + var culture = CultureInfo.InvariantCulture; + + //pin to prevent GC from moving the span around. + fixed (T* _ = destination) + switch (typeof(T).as_dtype()) + { +#if _REGEN + %foreach supported_numericals_TF_DataType,supported_numericals,supported_numericals_lowercase% + case TF_DataType.#1: { + var sdst = (#3*)Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible)src[i]).To#2(culture); + return; + } + % +#else + case TF_DataType.TF_BOOL: + { + var sdst = (bool*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToBoolean(culture); + return; + } + case TF_DataType.TF_UINT8: + { + var sdst = (byte*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToByte(culture); + return; + } + case TF_DataType.TF_INT16: + { + var sdst = (short*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToInt16(culture); + return; + } + case TF_DataType.TF_UINT16: + { + var sdst = (ushort*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToUInt16(culture); + return; + } + case TF_DataType.TF_INT32: + { + var sdst = (int*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToInt32(culture); + return; + } + case TF_DataType.TF_UINT32: + { + var sdst = (uint*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToUInt32(culture); + return; + } + case TF_DataType.TF_INT64: + { + var sdst = (long*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToInt64(culture); + return; + } + case TF_DataType.TF_UINT64: + { + var sdst = (ulong*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToUInt64(culture); + return; + } + case TF_DataType.TF_DOUBLE: + { + var sdst = (double*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToDouble(culture); + return; + } + case TF_DataType.TF_FLOAT: + { + var sdst = (float*) Unsafe.AsPointer(ref destination.GetPinnableReference()); + for (var i = 0; i < len; i++) + *(sdst + i) = ((IConvertible) src[i]).ToSingle(culture); + return; + } +#endif + default: + throw new NotSupportedException(); + } + } + case TF_DataType.TF_COMPLEX64: + case TF_DataType.TF_COMPLEX128: + default: + throw new NotSupportedException(); + } + } + } + } + } +} \ No newline at end of file diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Implicit.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Implicit.cs index 846db42d..b5fdde48 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Implicit.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Implicit.cs @@ -7,20 +7,6 @@ namespace Tensorflow { public partial class Tensor { - /// - /// Issue unresolved, will cause name_scope problem. - /// - /// - /*public static implicit operator Tensor(double scalar) - { - return constant_op.constant(scalar); - }*/ - - /*public static implicit operator Tensor(int scalar) - { - return constant_op.constant(scalar); - }*/ - public static implicit operator IntPtr(Tensor tensor) { if (tensor._handle == IntPtr.Zero) diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs index 67474eb9..efac802d 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs @@ -162,9 +162,9 @@ namespace Tensorflow using (var status = new Status()) { if (value == null) - c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status); + c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), null, -1, status); else - c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), value.Select(Convert.ToInt64).ToArray(), value.Length, status); + c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), value.Select(Convert.ToInt64).ToArray(), value.Length, status); status.Check(true); } @@ -257,7 +257,6 @@ namespace Tensorflow /// /// /// - /// When is string public T[] ToArray() where T : unmanaged { //Are the types matching? diff --git a/src/TensorFlowNET.Core/Tensors/TensorShape.cs b/src/TensorFlowNET.Core/Tensors/TensorShape.cs index b3099799..5b521ef4 100644 --- a/src/TensorFlowNET.Core/Tensors/TensorShape.cs +++ b/src/TensorFlowNET.Core/Tensors/TensorShape.cs @@ -154,7 +154,7 @@ namespace Tensorflow [SuppressMessage("ReSharper", "ParameterHidesMember")] public TensorShape with_rank_at_least(int rank) { - if (rank != ndim) + if (ndim < rank) throw new ValueError($"Shape {this} must have rank at least {rank}"); else return this; diff --git a/src/TensorFlowNET.Core/Tensors/dtypes.cs b/src/TensorFlowNET.Core/Tensors/dtypes.cs index 59de20ac..a54d0448 100644 --- a/src/TensorFlowNET.Core/Tensors/dtypes.cs +++ b/src/TensorFlowNET.Core/Tensors/dtypes.cs @@ -138,7 +138,7 @@ namespace Tensorflow dtype = TF_DataType.TF_INT8; break; case "Byte": - dtype = TF_DataType.TF_UINT8; + dtype = dtype ?? TF_DataType.TF_UINT8; break; case "Int16": dtype = TF_DataType.TF_INT16; diff --git a/src/TensorFlowNET.Core/Tensors/tensor_util.cs b/src/TensorFlowNET.Core/Tensors/tensor_util.cs index 0989db4f..504ef024 100644 --- a/src/TensorFlowNET.Core/Tensors/tensor_util.cs +++ b/src/TensorFlowNET.Core/Tensors/tensor_util.cs @@ -18,6 +18,7 @@ using NumSharp; using System; using System.Linq; using NumSharp.Utilities; +using System.Text; namespace Tensorflow { @@ -256,7 +257,7 @@ namespace Tensorflow nd = np.array(doubleVals); break; case string strVal: - nd = NDArray.FromString(strVal); + nd = new NDArray(Encoding.ASCII.GetBytes(strVal)); break; case string[] strVals: nd = strVals; diff --git a/src/TensorFlowNET.Core/Training/Saving/Saver.cs b/src/TensorFlowNET.Core/Training/Saving/Saver.cs index 3f72a438..9e641a43 100644 --- a/src/TensorFlowNET.Core/Training/Saving/Saver.cs +++ b/src/TensorFlowNET.Core/Training/Saving/Saver.cs @@ -14,10 +14,12 @@ limitations under the License. ******************************************************************************/ +using NumSharp; using System; using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text; using static Tensorflow.Binding; namespace Tensorflow @@ -170,7 +172,7 @@ namespace Tensorflow { if (string.IsNullOrEmpty(latest_filename)) latest_filename = "checkpoint"; - object model_checkpoint_path = ""; + NDArray[] model_checkpoint_path = null; string checkpoint_file = ""; if (global_step > 0) @@ -183,15 +185,15 @@ namespace Tensorflow if (!_is_empty) { model_checkpoint_path = sess.run(_saver_def.SaveTensorName, - new FeedItem(_saver_def.FilenameTensorName, checkpoint_file) - ); + (_saver_def.FilenameTensorName, checkpoint_file)); if (write_state) { - _RecordLastCheckpoint(model_checkpoint_path.ToString()); + var path = UTF8Encoding.UTF8.GetString((byte[])model_checkpoint_path[0]); + _RecordLastCheckpoint(path); checkpoint_management.update_checkpoint_state_internal( save_dir: save_path_parent, - model_checkpoint_path: model_checkpoint_path.ToString(), + model_checkpoint_path: path, all_model_checkpoint_paths: _last_checkpoints.Keys.Select(x => x).ToList(), latest_filename: latest_filename, save_relative_paths: _save_relative_paths); @@ -205,7 +207,7 @@ namespace Tensorflow export_meta_graph(meta_graph_filename, strip_default_attrs: strip_default_attrs, save_debug_info: save_debug_info); } - return _is_empty ? string.Empty : model_checkpoint_path.ToString(); + return _is_empty ? string.Empty : UTF8Encoding.UTF8.GetString((byte[])model_checkpoint_path[0]); } public (Saver, object) import_meta_graph(string meta_graph_or_file, diff --git a/src/TensorFlowNET.Core/Training/Saving/checkpoint_management.py.cs b/src/TensorFlowNET.Core/Training/Saving/checkpoint_management.py.cs index 47f64b91..242464bd 100644 --- a/src/TensorFlowNET.Core/Training/Saving/checkpoint_management.py.cs +++ b/src/TensorFlowNET.Core/Training/Saving/checkpoint_management.py.cs @@ -20,6 +20,7 @@ using System.IO; using System.Linq; using static Tensorflow.SaverDef.Types; using static Tensorflow.Binding; +using Protobuf.Text; namespace Tensorflow { @@ -44,8 +45,7 @@ namespace Tensorflow float? last_preserved_timestamp = null ) { - CheckpointState ckpt = null; - + CheckpointState ckpt = null; // Writes the "checkpoint" file for the coordinator for later restoration. string coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename); if (save_relative_paths) @@ -65,7 +65,14 @@ namespace Tensorflow throw new RuntimeError($"Save path '{model_checkpoint_path}' conflicts with path used for " + "checkpoint state. Please use a different save path."); - File.WriteAllText(coord_checkpoint_filename, ckpt.ToString()); + // File.WriteAllText(coord_checkpoint_filename, ckpt.ToString()); + var checkpoints = new List + { + $"model_checkpoint_path: \"{ckpt.ModelCheckpointPath}\"" + }; + checkpoints.AddRange(all_model_checkpoint_paths.Select(x => $"all_model_checkpoint_paths: \"{x}\"")); + + File.WriteAllLines(coord_checkpoint_filename, checkpoints); } /// @@ -98,7 +105,14 @@ namespace Tensorflow all_model_checkpoint_paths.Add(model_checkpoint_path); // Relative paths need to be rewritten to be relative to the "save_dir" - // if model_checkpoint_path already contains "save_dir". + if (model_checkpoint_path.StartsWith(save_dir)) + { + model_checkpoint_path = model_checkpoint_path.Substring(save_dir.Length + 1); + all_model_checkpoint_paths = all_model_checkpoint_paths + .Select(x => x.Substring(save_dir.Length + 1)) + .ToList(); + } + var coord_checkpoint_proto = new CheckpointState() { @@ -174,24 +188,9 @@ namespace Tensorflow var coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir, latest_filename); if (File.Exists(coord_checkpoint_filename)) { - var file_content = File.ReadAllLines(coord_checkpoint_filename); + var file_content = File.ReadAllText(coord_checkpoint_filename); // https://github.com/protocolbuffers/protobuf/issues/6654 - // var ckpt = CheckpointState.Parser.ParseFrom(file_content); - var ckpt = new CheckpointState(); - var field = CheckpointState.Descriptor.FindFieldByName("model_checkpoint_path"); - ckpt.ModelCheckpointPath = file_content.FirstOrDefault(x => x.StartsWith(field.Name + ":")).Substring(field.Name.Length + 2); - // remove first and last quote. - ckpt.ModelCheckpointPath = ckpt.ModelCheckpointPath.Substring(1, ckpt.ModelCheckpointPath.Length - 2); - - field = CheckpointState.Descriptor.FindFieldByName("all_model_checkpoint_paths"); - file_content.Where(x => x.StartsWith(field.Name + ":")) - .ToList() - .ForEach(x => - { - string value = x.Substring(field.Name.Length + 2); - ckpt.AllModelCheckpointPaths.Add(value.Substring(1, value.Length - 2)); - }); - + var ckpt = CheckpointState.Parser.ParseText(file_content); if (string.IsNullOrEmpty(ckpt.ModelCheckpointPath)) throw new ValueError($"Invalid checkpoint state loaded from {checkpoint_dir}"); // For relative model_checkpoint_path and all_model_checkpoint_paths, diff --git a/src/TensorFlowNET.Core/Training/Saving/saver.py.cs b/src/TensorFlowNET.Core/Training/Saving/saver.py.cs index 49c9bfc5..5f119791 100644 --- a/src/TensorFlowNET.Core/Training/Saving/saver.py.cs +++ b/src/TensorFlowNET.Core/Training/Saving/saver.py.cs @@ -14,9 +14,12 @@ limitations under the License. ******************************************************************************/ +using Google.Protobuf; using System; using System.Collections.Generic; +using System.IO; using System.Linq; +using static Tensorflow.Binding; namespace Tensorflow { @@ -29,14 +32,12 @@ namespace Tensorflow { var meta_graph_def = meta_graph.read_meta_graph_file(meta_graph_or_file); - var meta = meta_graph.import_scoped_meta_graph_with_return_elements( + var (imported_vars, imported_return_elements) = meta_graph.import_scoped_meta_graph_with_return_elements( meta_graph_def, clear_devices: clear_devices, import_scope: import_scope, return_elements: return_elements); - var (imported_vars, imported_return_elements) = meta; - var saver = _create_saver_from_imported_meta_graph( meta_graph_def, import_scope, imported_vars); @@ -83,5 +84,37 @@ namespace Tensorflow } } } + + public static string freeze_graph(string checkpoint_dir, + string output_pb_name, + string[] output_node_names) + { + var checkpoint = checkpoint_management.latest_checkpoint(checkpoint_dir); + if (!File.Exists($"{checkpoint}.meta")) return null; + + string output_pb = Path.GetFullPath(Path.Combine(checkpoint_dir, "../", $"{output_pb_name}.pb")); + + using (var graph = tf.Graph()) + using (var sess = tf.Session(graph)) + { + var saver = tf.train.import_meta_graph($"{checkpoint}.meta", clear_devices: true); + saver.restore(sess, checkpoint); + var output_graph_def = tf.graph_util.convert_variables_to_constants(sess, + graph.as_graph_def(), + output_node_names); + Console.WriteLine($"Froze {output_graph_def.Node.Count} nodes."); + File.WriteAllBytes(output_pb, output_graph_def.ToByteArray()); + return output_pb; + } + } + + public static Graph load_graph(string freeze_graph_pb, string name = "") + { + var bytes = File.ReadAllBytes(freeze_graph_pb); + var graph = tf.Graph().as_default(); + importer.import_graph_def(GraphDef.Parser.ParseFrom(bytes), + name: name); + return graph; + } } } diff --git a/src/TensorFlowNET.Core/Util/nest.py.cs b/src/TensorFlowNET.Core/Util/nest.py.cs index 7dbacea0..54149fe1 100644 --- a/src/TensorFlowNET.Core/Util/nest.py.cs +++ b/src/TensorFlowNET.Core/Util/nest.py.cs @@ -526,14 +526,6 @@ namespace Tensorflow.Util return pack_sequence_as(structure, mapped_flat_structure) as Tensor; } - public static Tensor map_structure2(Func func, T structure) - { - var flat_structure = flatten(structure); - var mapped_flat_structure = flat_structure.Select(func).ToList(); - - return pack_sequence_as(structure, mapped_flat_structure) as Tensor; - } - /// /// Same as map_structure, but with only one structure (no combining of multiple structures) /// diff --git a/src/TensorFlowNET.Core/Variables/VariableScope.cs b/src/TensorFlowNET.Core/Variables/VariableScope.cs index 52766e4f..68c75ca3 100644 --- a/src/TensorFlowNET.Core/Variables/VariableScope.cs +++ b/src/TensorFlowNET.Core/Variables/VariableScope.cs @@ -74,5 +74,10 @@ namespace Tensorflow aggregation: aggregation) as RefVariable; }); } + + public void reuse_variables() + { + _reuse = _ReuseMode.AUTO_REUSE; + } } } diff --git a/src/TensorFlowNET.Core/Variables/_ReuseMode.cs b/src/TensorFlowNET.Core/Variables/_ReuseMode.cs index e63e51f7..9344e824 100644 --- a/src/TensorFlowNET.Core/Variables/_ReuseMode.cs +++ b/src/TensorFlowNET.Core/Variables/_ReuseMode.cs @@ -5,6 +5,7 @@ /// public enum _ReuseMode { + NOT_REUSE = 0, // Indicates that variables are to be fetched if they already exist or // otherwise created. AUTO_REUSE = 1 diff --git a/src/TensorFlowNET.Core/Variables/variables.py.cs b/src/TensorFlowNET.Core/Variables/variables.py.cs index 818b324e..a9f91ff2 100644 --- a/src/TensorFlowNET.Core/Variables/variables.py.cs +++ b/src/TensorFlowNET.Core/Variables/variables.py.cs @@ -61,7 +61,6 @@ namespace Tensorflow public static List global_variables(string scope = null) { return ops.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope); - } /// diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index 02417594..633a9bf7 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -28,10 +28,6 @@ namespace Tensorflow { public partial class ops { - private static readonly ThreadLocal _defaultGraphFactory = new ThreadLocal(() => new DefaultGraphStack()); - - public static DefaultGraphStack default_graph_stack => _defaultGraphFactory.Value; - public static int tensor_id(Tensor tensor) { return tensor.Id; @@ -78,53 +74,6 @@ namespace Tensorflow return get_default_graph().get_collection_ref(key); } - /// - /// Returns the default graph for the current thread. - /// - /// The returned graph will be the innermost graph on which a - /// `Graph.as_default()` context has been entered, or a global default - /// graph if none has been explicitly created. - /// - /// NOTE: The default graph is a property of the current thread.If you - /// create a new thread, and wish to use the default graph in that - /// thread, you must explicitly add a `with g.as_default():` in that - /// thread's function. - /// - /// - public static Graph get_default_graph() - { - //TODO: original source indicates there should be a _default_graph_stack! - //return _default_graph_stack.get_default() - return default_graph_stack.get_controller(); - } - - public static Graph set_default_graph(Graph graph) - { - //TODO: original source does not have a 'set_default_graph' and indicates there should be a _default_graph_stack! - default_graph_stack.set_controller(graph); - return default_graph_stack.get_controller(); - } - - /// - /// Clears the default graph stack and resets the global default graph. - /// - /// NOTE: The default graph is a property of the current thread.This - /// function applies only to the current thread.Calling this function while - /// a `tf.Session` or `tf.InteractiveSession` is active will result in undefined - /// behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects - /// after calling this function will result in undefined behavior. - /// - /// - public static void reset_default_graph() - { - //TODO: original source indicates there should be a _default_graph_stack! - //if (!_default_graph_stack.is_cleared()) - // throw new InvalidOperationException("Do not use tf.reset_default_graph() to clear " + - // "nested graphs. If you need a cleared graph, " + - // "exit the nesting and create a new graph."); - default_graph_stack.reset(); - } - public static Graph _get_graph_from_inputs(params Tensor[] op_input_list) => _get_graph_from_inputs(op_input_list: op_input_list, graph: null); @@ -399,15 +348,6 @@ namespace Tensorflow return session.run(tensor, feed_dict); } - /// - /// Returns the default session for the current thread. - /// - /// The default `Session` being used in the current thread. - public static Session get_default_session() - { - return tf.defaultSession; - } - /// /// Prepends name scope to a name. /// diff --git a/src/TensorFlowNET.Core/ops.threading.cs b/src/TensorFlowNET.Core/ops.threading.cs new file mode 100644 index 00000000..f8796596 --- /dev/null +++ b/src/TensorFlowNET.Core/ops.threading.cs @@ -0,0 +1,152 @@ +using System.Threading; +using Tensorflow.Util; +using static Tensorflow.Binding; + +namespace Tensorflow +{ + public partial class ops + { + private static readonly ThreadLocal _defaultGraphFactory = new ThreadLocal(() => new DefaultGraphStack()); + private static volatile Session _singleSesson; + private static volatile DefaultGraphStack _singleGraphStack; + private static readonly object _threadingLock = new object(); + + public static DefaultGraphStack default_graph_stack + { + get + { + if (!isSingleThreaded) + return _defaultGraphFactory.Value; + + if (_singleGraphStack == null) + { + lock (_threadingLock) + { + if (_singleGraphStack == null) + _singleGraphStack = new DefaultGraphStack(); + } + } + + return _singleGraphStack; + } + } + + private static bool isSingleThreaded = false; + + /// + /// Does this library ignore different thread accessing. + /// + /// https://github.com/SciSharp/TensorFlow.NET/wiki/Multithreading + public static bool IsSingleThreaded + { + get => isSingleThreaded; + set + { + if (value) + enforce_singlethreading(); + else + enforce_multithreading(); + } + } + + /// + /// Forces the library to ignore different thread accessing. + /// + /// https://github.com/SciSharp/TensorFlow.NET/wiki/Multithreading

Note that this discards any sessions and graphs used in a multithreaded manner
+ public static void enforce_singlethreading() + { + isSingleThreaded = true; + } + + /// + /// Forces the library to provide a separate and to every different thread accessing. + /// + /// https://github.com/SciSharp/TensorFlow.NET/wiki/Multithreading

Note that this discards any sessions and graphs used in a singlethreaded manner
+ public static void enforce_multithreading() + { + isSingleThreaded = false; + } + + /// + /// Returns the default session for the current thread. + /// + /// The default `Session` being used in the current thread. + public static Session get_default_session() + { + if (!isSingleThreaded) + return tf.defaultSession; + + if (_singleSesson == null) + { + lock (_threadingLock) + { + if (_singleSesson == null) + _singleSesson = new Session(); + } + } + + return _singleSesson; + } + + /// + /// Returns the default session for the current thread. + /// + /// The default `Session` being used in the current thread. + public static Session set_default_session(Session sess) + { + if (!isSingleThreaded) + return tf.defaultSession = sess; + + lock (_threadingLock) + { + _singleSesson = sess; + } + + return _singleSesson; + } + + /// + /// Returns the default graph for the current thread. + /// + /// The returned graph will be the innermost graph on which a + /// `Graph.as_default()` context has been entered, or a global default + /// graph if none has been explicitly created. + /// + /// NOTE: The default graph is a property of the current thread.If you + /// create a new thread, and wish to use the default graph in that + /// thread, you must explicitly add a `with g.as_default():` in that + /// thread's function. + /// + /// + public static Graph get_default_graph() + { + //return _default_graph_stack.get_default() + return default_graph_stack.get_controller(); + } + + public static Graph set_default_graph(Graph graph) + { + default_graph_stack.set_controller(graph); + return default_graph_stack.get_controller(); + } + + /// + /// Clears the default graph stack and resets the global default graph. + /// + /// NOTE: The default graph is a property of the current thread.This + /// function applies only to the current thread.Calling this function while + /// a `tf.Session` or `tf.InteractiveSession` is active will result in undefined + /// behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects + /// after calling this function will result in undefined behavior. + /// + /// + public static void reset_default_graph() + { + //if (!_default_graph_stack.is_cleared()) + // throw new InvalidOperationException("Do not use tf.reset_default_graph() to clear " + + // "nested graphs. If you need a cleared graph, " + + // "exit the nesting and create a new graph."); + default_graph_stack.reset(); + } + } +} \ No newline at end of file diff --git a/src/TensorFlowNET.Core/tensorflow.cs b/src/TensorFlowNET.Core/tensorflow.cs index a512663e..a42297b2 100644 --- a/src/TensorFlowNET.Core/tensorflow.cs +++ b/src/TensorFlowNET.Core/tensorflow.cs @@ -21,8 +21,6 @@ namespace Tensorflow { public partial class tensorflow : IObjectLife { - protected internal readonly ThreadLocal _defaultSessionFactory; - public TF_DataType @byte = TF_DataType.TF_UINT8; public TF_DataType @sbyte = TF_DataType.TF_INT8; public TF_DataType int16 = TF_DataType.TF_INT16; @@ -40,10 +38,10 @@ namespace Tensorflow public tensorflow() { - _defaultSessionFactory = new ThreadLocal(() => new Session()); + _constructThreadingObjects(); } - public Session defaultSession => _defaultSessionFactory.Value; + public RefVariable Variable(T data, bool trainable = true, @@ -88,6 +86,11 @@ namespace Tensorflow public string VERSION => c_api.StringPiece(c_api.TF_Version()); + public Session get_default_session() + { + return ops.get_default_session(); + } + public Session Session() { return new Session().as_default(); diff --git a/src/TensorFlowNET.Core/tensorflow.threading.cs b/src/TensorFlowNET.Core/tensorflow.threading.cs new file mode 100644 index 00000000..33e925fd --- /dev/null +++ b/src/TensorFlowNET.Core/tensorflow.threading.cs @@ -0,0 +1,53 @@ +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System.Runtime.CompilerServices; +using System.Threading; + +namespace Tensorflow +{ + public partial class tensorflow : IObjectLife + { + protected ThreadLocal _defaultSessionFactory; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void _constructThreadingObjects() + { + _defaultSessionFactory = new ThreadLocal(() => new Session()); + } + + public Session defaultSession + { + get + { + if (!ops.IsSingleThreaded) + return _defaultSessionFactory.Value; + + return ops.get_default_session(); + } + internal set + { + if (!ops.IsSingleThreaded) + { + _defaultSessionFactory.Value = value; + return; + } + + ops.set_default_session(value); + } + } + } +} \ No newline at end of file diff --git a/src/TensorFlowNET.Hub/DataSetBase.cs b/src/TensorFlowNET.Hub/DataSetBase.cs new file mode 100644 index 00000000..dc47b1c8 --- /dev/null +++ b/src/TensorFlowNET.Hub/DataSetBase.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; +using NumSharp; + +namespace Tensorflow.Hub +{ + public abstract class DataSetBase : IDataSet + { + public NDArray Data { get; protected set; } + public NDArray Labels { get; protected set; } + } +} diff --git a/src/TensorFlowNET.Hub/Datasets.cs b/src/TensorFlowNET.Hub/Datasets.cs new file mode 100644 index 00000000..6c05efb6 --- /dev/null +++ b/src/TensorFlowNET.Hub/Datasets.cs @@ -0,0 +1,46 @@ +using System; +using System.Collections.Generic; +using System.Text; +using NumSharp; + +namespace Tensorflow.Hub +{ + public class Datasets where TDataSet : IDataSet + { + public TDataSet Train { get; private set; } + + public TDataSet Validation { get; private set; } + + public TDataSet Test { get; private set; } + + public Datasets(TDataSet train, TDataSet validation, TDataSet test) + { + Train = train; + Validation = validation; + Test = test; + } + + public (NDArray, NDArray) Randomize(NDArray x, NDArray y) + { + var perm = np.random.permutation(y.shape[0]); + np.random.shuffle(perm); + return (x[perm], y[perm]); + } + + /// + /// selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method) + /// + /// + /// + /// + /// + /// + public (NDArray, NDArray) GetNextBatch(NDArray x, NDArray y, int start, int end) + { + var slice = new Slice(start, end); + var x_batch = x[slice]; + var y_batch = y[slice]; + return (x_batch, y_batch); + } + } +} diff --git a/src/TensorFlowNET.Hub/IDataSet.cs b/src/TensorFlowNET.Hub/IDataSet.cs new file mode 100644 index 00000000..f38a4217 --- /dev/null +++ b/src/TensorFlowNET.Hub/IDataSet.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; +using NumSharp; + +namespace Tensorflow.Hub +{ + public interface IDataSet + { + NDArray Data { get; } + NDArray Labels { get; } + } +} diff --git a/src/TensorFlowNET.Hub/IModelLoader.cs b/src/TensorFlowNET.Hub/IModelLoader.cs new file mode 100644 index 00000000..530138af --- /dev/null +++ b/src/TensorFlowNET.Hub/IModelLoader.cs @@ -0,0 +1,14 @@ +using System; +using System.Threading.Tasks; +using System.Collections.Generic; +using System.Text; +using NumSharp; + +namespace Tensorflow.Hub +{ + public interface IModelLoader + where TDataSet : IDataSet + { + Task> LoadAsync(ModelLoadSetting setting); + } +} diff --git a/src/TensorFlowNET.Hub/MnistDataSet.cs b/src/TensorFlowNET.Hub/MnistDataSet.cs new file mode 100644 index 00000000..4cd9663b --- /dev/null +++ b/src/TensorFlowNET.Hub/MnistDataSet.cs @@ -0,0 +1,88 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using NumSharp; +using Tensorflow; + +namespace Tensorflow.Hub +{ + public class MnistDataSet : DataSetBase + { + public int NumOfExamples { get; private set; } + public int EpochsCompleted { get; private set; } + public int IndexInEpoch { get; private set; } + + public MnistDataSet(NDArray images, NDArray labels, Type dataType, bool reshape) + { + EpochsCompleted = 0; + IndexInEpoch = 0; + + NumOfExamples = images.shape[0]; + + images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]); + images = images.astype(dataType); + // for debug np.multiply performance + var sw = new Stopwatch(); + sw.Start(); + images = np.multiply(images, 1.0f / 255.0f); + sw.Stop(); + Console.WriteLine($"{sw.ElapsedMilliseconds}ms"); + Data = images; + + labels = labels.astype(dataType); + Labels = labels; + } + + public (NDArray, NDArray) GetNextBatch(int batch_size, bool fake_data = false, bool shuffle = true) + { + if (IndexInEpoch >= NumOfExamples) + IndexInEpoch = 0; + + var start = IndexInEpoch; + // Shuffle for the first epoch + if(EpochsCompleted == 0 && start == 0 && shuffle) + { + var perm0 = np.arange(NumOfExamples); + np.random.shuffle(perm0); + Data = Data[perm0]; + Labels = Labels[perm0]; + } + + // Go to the next epoch + if (start + batch_size > NumOfExamples) + { + // Finished epoch + EpochsCompleted += 1; + + // Get the rest examples in this epoch + var rest_num_examples = NumOfExamples - start; + var images_rest_part = Data[np.arange(start, NumOfExamples)]; + var labels_rest_part = Labels[np.arange(start, NumOfExamples)]; + // Shuffle the data + if (shuffle) + { + var perm = np.arange(NumOfExamples); + np.random.shuffle(perm); + Data = Data[perm]; + Labels = Labels[perm]; + } + + start = 0; + IndexInEpoch = batch_size - rest_num_examples; + var end = IndexInEpoch; + var images_new_part = Data[np.arange(start, end)]; + var labels_new_part = Labels[np.arange(start, end)]; + + return (np.concatenate(new[] { images_rest_part, images_new_part }, axis: 0), + np.concatenate(new[] { labels_rest_part, labels_new_part }, axis: 0)); + } + else + { + IndexInEpoch += batch_size; + var end = IndexInEpoch; + return (Data[np.arange(start, end)], Labels[np.arange(start, end)]); + } + } + } +} diff --git a/src/TensorFlowNET.Hub/MnistModelLoader.cs b/src/TensorFlowNET.Hub/MnistModelLoader.cs new file mode 100644 index 00000000..82096452 --- /dev/null +++ b/src/TensorFlowNET.Hub/MnistModelLoader.cs @@ -0,0 +1,184 @@ +using System; +using System.Threading.Tasks; +using System.Collections.Generic; +using System.Text; +using System.IO; +using NumSharp; + +namespace Tensorflow.Hub +{ + public class MnistModelLoader : IModelLoader + { + private const string DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/"; + private const string TRAIN_IMAGES = "train-images-idx3-ubyte.gz"; + private const string TRAIN_LABELS = "train-labels-idx1-ubyte.gz"; + private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; + private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; + + public static async Task> LoadAsync(string trainDir, bool oneHot = false, int? trainSize = null, int? validationSize = null, int? testSize = null, bool showProgressInConsole = false) + { + var loader = new MnistModelLoader(); + + var setting = new ModelLoadSetting + { + TrainDir = trainDir, + OneHot = oneHot, + ShowProgressInConsole = showProgressInConsole + }; + + if (trainSize.HasValue) + setting.TrainSize = trainSize.Value; + + if (validationSize.HasValue) + setting.ValidationSize = validationSize.Value; + + if (testSize.HasValue) + setting.TestSize = testSize.Value; + + return await loader.LoadAsync(setting); + } + + public async Task> LoadAsync(ModelLoadSetting setting) + { + if (setting.TrainSize.HasValue && setting.ValidationSize >= setting.TrainSize.Value) + throw new ArgumentException("Validation set should be smaller than training set"); + + var sourceUrl = setting.SourceUrl; + + if (string.IsNullOrEmpty(sourceUrl)) + sourceUrl = DEFAULT_SOURCE_URL; + + // load train images + await this.DownloadAsync(sourceUrl + TRAIN_IMAGES, setting.TrainDir, TRAIN_IMAGES, showProgressInConsole: setting.ShowProgressInConsole) + .ShowProgressInConsole(setting.ShowProgressInConsole); + + await this.UnzipAsync(Path.Combine(setting.TrainDir, TRAIN_IMAGES), setting.TrainDir, showProgressInConsole: setting.ShowProgressInConsole) + .ShowProgressInConsole(setting.ShowProgressInConsole); + + var trainImages = ExtractImages(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TRAIN_IMAGES)), limit: setting.TrainSize); + + // load train labels + await this.DownloadAsync(sourceUrl + TRAIN_LABELS, setting.TrainDir, TRAIN_LABELS, showProgressInConsole: setting.ShowProgressInConsole) + .ShowProgressInConsole(setting.ShowProgressInConsole); + + await this.UnzipAsync(Path.Combine(setting.TrainDir, TRAIN_LABELS), setting.TrainDir, showProgressInConsole: setting.ShowProgressInConsole) + .ShowProgressInConsole(setting.ShowProgressInConsole); + + var trainLabels = ExtractLabels(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TRAIN_LABELS)), one_hot: setting.OneHot, limit: setting.TrainSize); + + // load test images + await this.DownloadAsync(sourceUrl + TEST_IMAGES, setting.TrainDir, TEST_IMAGES, showProgressInConsole: setting.ShowProgressInConsole) + .ShowProgressInConsole(setting.ShowProgressInConsole); + + await this.UnzipAsync(Path.Combine(setting.TrainDir, TEST_IMAGES), setting.TrainDir, showProgressInConsole: setting.ShowProgressInConsole) + .ShowProgressInConsole(setting.ShowProgressInConsole); + + var testImages = ExtractImages(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TEST_IMAGES)), limit: setting.TestSize); + + // load test labels + await this.DownloadAsync(sourceUrl + TEST_LABELS, setting.TrainDir, TEST_LABELS, showProgressInConsole: setting.ShowProgressInConsole) + .ShowProgressInConsole(setting.ShowProgressInConsole); + + await this.UnzipAsync(Path.Combine(setting.TrainDir, TEST_LABELS), setting.TrainDir, showProgressInConsole: setting.ShowProgressInConsole) + .ShowProgressInConsole(setting.ShowProgressInConsole); + + var testLabels = ExtractLabels(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TEST_LABELS)), one_hot: setting.OneHot, limit: setting.TestSize); + + var end = trainImages.shape[0]; + + var validationSize = setting.ValidationSize; + + var validationImages = trainImages[np.arange(validationSize)]; + var validationLabels = trainLabels[np.arange(validationSize)]; + + trainImages = trainImages[np.arange(validationSize, end)]; + trainLabels = trainLabels[np.arange(validationSize, end)]; + + var dtype = setting.DataType; + var reshape = setting.ReShape; + + var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape); + var validation = new MnistDataSet(validationImages, validationLabels, dtype, reshape); + var test = new MnistDataSet(testImages, testLabels, dtype, reshape); + + return new Datasets(train, validation, test); + } + + private NDArray ExtractImages(string file, int? limit = null) + { + if (!Path.IsPathRooted(file)) + file = Path.Combine(AppContext.BaseDirectory, file); + + using (var bytestream = new FileStream(file, FileMode.Open)) + { + var magic = Read32(bytestream); + if (magic != 2051) + throw new Exception($"Invalid magic number {magic} in MNIST image file: {file}"); + + var num_images = Read32(bytestream); + num_images = limit == null ? num_images : Math.Min(num_images, (int)limit); + + var rows = Read32(bytestream); + var cols = Read32(bytestream); + + var buf = new byte[rows * cols * num_images]; + + bytestream.Read(buf, 0, buf.Length); + + var data = np.frombuffer(buf, np.@byte); + data = data.reshape(num_images, rows, cols, 1); + + return data; + } + } + + private NDArray ExtractLabels(string file, bool one_hot = false, int num_classes = 10, int? limit = null) + { + if (!Path.IsPathRooted(file)) + file = Path.Combine(AppContext.BaseDirectory, file); + + using (var bytestream = new FileStream(file, FileMode.Open)) + { + var magic = Read32(bytestream); + if (magic != 2049) + throw new Exception($"Invalid magic number {magic} in MNIST label file: {file}"); + + var num_items = Read32(bytestream); + num_items = limit == null ? num_items : Math.Min(num_items, (int)limit); + + var buf = new byte[num_items]; + + bytestream.Read(buf, 0, buf.Length); + + var labels = np.frombuffer(buf, np.uint8); + + if (one_hot) + return DenseToOneHot(labels, num_classes); + + return labels; + } + } + + private NDArray DenseToOneHot(NDArray labels_dense, int num_classes) + { + var num_labels = labels_dense.shape[0]; + var index_offset = np.arange(num_labels) * num_classes; + var labels_one_hot = np.zeros(num_labels, num_classes); + var labels = labels_dense.Data(); + for (int row = 0; row < num_labels; row++) + { + var col = labels[row]; + labels_one_hot.SetData(1.0, row, col); + } + + return labels_one_hot; + } + + private int Read32(FileStream bytestream) + { + var buffer = new byte[sizeof(uint)]; + var count = bytestream.Read(buffer, 0, 4); + return np.frombuffer(buffer, ">u4").Data()[0]; + } + } +} diff --git a/src/TensorFlowNET.Hub/ModelLoadSetting.cs b/src/TensorFlowNET.Hub/ModelLoadSetting.cs new file mode 100644 index 00000000..89e46748 --- /dev/null +++ b/src/TensorFlowNET.Hub/ModelLoadSetting.cs @@ -0,0 +1,20 @@ +using System; +using System.Collections.Generic; +using System.Text; +using NumSharp; + +namespace Tensorflow.Hub +{ + public class ModelLoadSetting + { + public string TrainDir { get; set; } + public bool OneHot { get; set; } + public Type DataType { get; set; } = typeof(float); + public bool ReShape { get; set; } + public int ValidationSize { get; set; } = 5000; + public int? TrainSize { get; set; } + public int? TestSize { get; set; } + public string SourceUrl { get; set; } + public bool ShowProgressInConsole { get; set; } + } +} diff --git a/src/TensorFlowNET.Hub/README.md b/src/TensorFlowNET.Hub/README.md new file mode 100644 index 00000000..156b263d --- /dev/null +++ b/src/TensorFlowNET.Hub/README.md @@ -0,0 +1,5 @@ +## TensorFlow Hub + +TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models. In particular, it provides **modules**, which are pre-trained pieces of TensorFlow models that can be reused on new tasks. + +https://github.com/tensorflow/hub \ No newline at end of file diff --git a/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj b/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj new file mode 100644 index 00000000..640e1515 --- /dev/null +++ b/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj @@ -0,0 +1,27 @@ + + + Tensorflow.Hub + netstandard2.0 + 0.0.6 + Kerry Jiang, Haiping Chen + SciSharp STACK + Apache 2.0 + https://github.com/SciSharp/TensorFlow.NET + git + http://scisharpstack.org + TensorFlow, SciSharp, MachineLearning + TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models. + SciSharp.TensorFlowHub + true + Fix GetNextBatch() bug. +Change to NumSharp compact version. + https://avatars3.githubusercontent.com/u/44989469?s=200&v=4 + TensorFlow.Hub + + + DEBUG;TRACE + + + + + \ No newline at end of file diff --git a/src/TensorFlowNET.Hub/Utils.cs b/src/TensorFlowNET.Hub/Utils.cs new file mode 100644 index 00000000..5b06aaad --- /dev/null +++ b/src/TensorFlowNET.Hub/Utils.cs @@ -0,0 +1,137 @@ +using System; +using System.IO; +using System.IO.Compression; +using System.Collections.Generic; +using System.Net; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace Tensorflow.Hub +{ + public static class Utils + { + public static async Task DownloadAsync(this IModelLoader modelLoader, string url, string saveTo) + where TDataSet : IDataSet + { + var dir = Path.GetDirectoryName(saveTo); + var fileName = Path.GetFileName(saveTo); + await modelLoader.DownloadAsync(url, dir, fileName); + } + + public static async Task DownloadAsync(this IModelLoader modelLoader, string url, string dirSaveTo, string fileName, bool showProgressInConsole = false) + where TDataSet : IDataSet + { + if (!Path.IsPathRooted(dirSaveTo)) + dirSaveTo = Path.Combine(AppContext.BaseDirectory, dirSaveTo); + + var fileSaveTo = Path.Combine(dirSaveTo, fileName); + + if (showProgressInConsole) + { + Console.WriteLine($"Downloading {fileName}"); + } + + if (File.Exists(fileSaveTo)) + { + if (showProgressInConsole) + { + Console.WriteLine($"The file {fileName} already exists"); + } + + return; + } + + Directory.CreateDirectory(dirSaveTo); + + using (var wc = new WebClient()) + { + await wc.DownloadFileTaskAsync(url, fileSaveTo).ConfigureAwait(false); + } + + } + + public static async Task UnzipAsync(this IModelLoader modelLoader, string zipFile, string saveTo, bool showProgressInConsole = false) + where TDataSet : IDataSet + { + if (!Path.IsPathRooted(saveTo)) + saveTo = Path.Combine(AppContext.BaseDirectory, saveTo); + + Directory.CreateDirectory(saveTo); + + if (!Path.IsPathRooted(zipFile)) + zipFile = Path.Combine(AppContext.BaseDirectory, zipFile); + + var destFileName = Path.GetFileNameWithoutExtension(zipFile); + var destFilePath = Path.Combine(saveTo, destFileName); + + if (showProgressInConsole) + Console.WriteLine($"Unzippinng {Path.GetFileName(zipFile)}"); + + if (File.Exists(destFilePath)) + { + if (showProgressInConsole) + Console.WriteLine($"The file {destFileName} already exists"); + } + + using (GZipStream unzipStream = new GZipStream(File.OpenRead(zipFile), CompressionMode.Decompress)) + { + using (var destStream = File.Create(destFilePath)) + { + await unzipStream.CopyToAsync(destStream).ConfigureAwait(false); + await destStream.FlushAsync().ConfigureAwait(false); + destStream.Close(); + } + + unzipStream.Close(); + } + } + + public static async Task ShowProgressInConsole(this Task task, bool enable) + { + if (!enable) + { + await task; + return; + } + + var cts = new CancellationTokenSource(); + + var showProgressTask = ShowProgressInConsole(cts); + + try + { + await task; + } + finally + { + cts.Cancel(); + } + + await showProgressTask; + Console.WriteLine("Done."); + } + + private static async Task ShowProgressInConsole(CancellationTokenSource cts) + { + var cols = 0; + + await Task.Delay(100); + + while (!cts.IsCancellationRequested) + { + await Task.Delay(100); + Console.Write("."); + cols++; + + if (cols % 50 == 0) + { + Console.WriteLine(); + } + } + + if (cols > 0) + Console.WriteLine(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Activations.cs b/src/TensorFlowNET.Keras/Activations.cs new file mode 100644 index 00000000..5213fcb9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Activations.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + class Activations + { + } +} diff --git a/src/TensorFlowNET.Keras/Applications/Densenet.cs b/src/TensorFlowNET.Keras/Applications/Densenet.cs new file mode 100644 index 00000000..a4cacc4a --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/Densenet.cs @@ -0,0 +1,35 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class Densenet + { + public static Tensor dense_block(Tensor x, int blocks, string name) => throw new NotImplementedException(); + + public static Tensor transition_block(Tensor x, float reduction, string name) => throw new NotImplementedException(); + + public static Tensor conv_block(Tensor x, float growth_rate, string name) => throw new NotImplementedException(); + + public static Model DenseNet(int blocks, bool include_top=true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model DenseNet121(int blocks, bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model DenseNet169(int blocks, bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model DenseNet201(int blocks, bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/Efficientnet.cs b/src/TensorFlowNET.Keras/Applications/Efficientnet.cs new file mode 100644 index 00000000..4b59bcee --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/Efficientnet.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class BlockArg + { + + } + + public class Efficientnet + { + public static Model EfficientNet(float width_coefficient, float depth_coefficient, int default_size, float dropout_rate = 0.2f, + float drop_connect_rate = 0.2f, int depth_divisor = 8, string activation = "swish", + BlockArg[] blocks_args = null, string model_name = "efficientnet", bool include_top = true, + string weights = "imagenet", Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor block(Tensor inputs, string activation= "swish", float drop_rate= 0f,string name= "", + int filters_in= 32, int filters_out= 16, int kernel_size= 3, int strides= 1, + int expand_ratio= 1, float se_ratio= 0, bool id_skip= true) => throw new NotImplementedException(); + + public static Model EfficientNetB0(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model EfficientNetB1(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model EfficientNetB2(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model EfficientNetB3(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model EfficientNetB4(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model EfficientNetB5(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model EfficientNetB6(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model EfficientNetB7(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/ImagenetUtils.cs b/src/TensorFlowNET.Keras/Applications/ImagenetUtils.cs new file mode 100644 index 00000000..5e5df051 --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/ImagenetUtils.cs @@ -0,0 +1,22 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class ImagenetUtils + { + public static Tensor preprocess_input(Tensor x, string data_format= null, string mode= "caffe") => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top= 5) => throw new NotImplementedException(); + + public static Tensor _preprocess_numpy_input(Tensor x, string data_format, string mode) => throw new NotImplementedException(); + + public static Tensor _preprocess_symbolic_input(Tensor x, string data_format, string mode) => throw new NotImplementedException(); + + public static TensorShape obtain_input_shape(TensorShape input_shape, int default_size, int min_size, + string data_format, bool require_flatten, string weights= null) => throw new NotImplementedException(); + + public static ((int, int), (int, int)) correct_pad(Tensor inputs, (int, int) kernel_size) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/InceptionResnetV2.cs b/src/TensorFlowNET.Keras/Applications/InceptionResnetV2.cs new file mode 100644 index 00000000..bfc27f53 --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/InceptionResnetV2.cs @@ -0,0 +1,22 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class InceptionResnetV2 + { + public static Model InceptionResNetV2(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor conv2d_bn(Tensor x, int filters, (int, int) kernel_size, (int, int) strides, string padding= "same", + string activation= "relu", bool use_bias= false, string name= null) => throw new NotImplementedException(); + + public static Tensor inception_resnet_block(Tensor x, float scale, string block_type, int block_idx, string activation= "relu") => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/InceptionV3.cs b/src/TensorFlowNET.Keras/Applications/InceptionV3.cs new file mode 100644 index 00000000..9b339e18 --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/InceptionV3.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class InceptionV3 + { + public static Model Inceptionv3(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor conv2d_bn(Tensor x, int filters, int num_row, int num_col, string padding = "same", (int, int)? strides = null, string name = null) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/Mobilenet.cs b/src/TensorFlowNET.Keras/Applications/Mobilenet.cs new file mode 100644 index 00000000..65eb5db6 --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/Mobilenet.cs @@ -0,0 +1,18 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class Mobilenet + { + public static Model MobileNet(TensorShape input_shape= null, float alpha= 1.0f, int depth_multiplier= 1, float dropout= 1e-3f, + bool include_top= true, string weights= "imagenet", Tensor input_tensor= null, string pooling= null, int classes= 1000) => throw new NotImplementedException(); + + public static Tensor conv2d_bn(Tensor x, int filters, float alpha, (int, int)? kernel = null, (int, int)? strides = null) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/MobilenetV2.cs b/src/TensorFlowNET.Keras/Applications/MobilenetV2.cs new file mode 100644 index 00000000..a30c6c2a --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/MobilenetV2.cs @@ -0,0 +1,21 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class MobilenetV2 + { + public static Model MobileNetV2(TensorShape input_shape = null, float alpha = 1.0f, bool include_top = true, + string weights = "imagenet", Tensor input_tensor = null, string pooling = null, + int classes = 1000) => throw new NotImplementedException(); + + public static Tensor _inverted_res_block(Tensor inputs, int expansion, (int, int) stride, float alpha, int filters, string block_id) => throw new NotImplementedException(); + + public static Tensor _make_divisible(Tensor v, Tensor divisor, Tensor min_value= null) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/Nasnet.cs b/src/TensorFlowNET.Keras/Applications/Nasnet.cs new file mode 100644 index 00000000..9de5d3d9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/Nasnet.cs @@ -0,0 +1,31 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class Nasnet + { + public static Model NASNet(TensorShape input_shape = null, int penultimate_filters = 4032, int num_blocks = 6, int stem_block_filters = 96, + bool skip_reduction = true, int filter_multiplier = 2, bool include_top = true, string weights = null, + Tensor input_tensor = null, string pooling = null, int classes = 1000, int? default_size = null) => throw new NotImplementedException(); + + public static Model NASNetMobile(TensorShape input_shape = null, bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model NASNetLarge(TensorShape input_shape = null, bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor _separable_conv_block(Tensor ip, int filters, (int, int)? kernel_size= null, (int, int)? strides= null, string block_id= null) => throw new NotImplementedException(); + + public static Tensor _adjust_block(Tensor p, Tensor ip, int filters, string block_id= null) => throw new NotImplementedException(); + + public static Tensor _normal_a_cell(Tensor p, Tensor ip, int filters, string block_id = null) => throw new NotImplementedException(); + + public static Tensor _reduction_a_cell(Tensor p, Tensor ip, int filters, string block_id = null) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/Resnet.cs b/src/TensorFlowNET.Keras/Applications/Resnet.cs new file mode 100644 index 00000000..8154f404 --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/Resnet.cs @@ -0,0 +1,41 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class Resnet + { + public static Model ResNet(Func stack_fn, bool preact, bool use_bias, string model_name= "resnet", bool include_top= true, + string weights= "imagenet", Tensor input_tensor= null, TensorShape input_shape= null, string pooling= null, + int classes= 1000) => throw new NotImplementedException(); + + public static Tensor block1(Tensor x, int filters, int kernel_size= 3, int stride= 1, bool conv_shortcut= true, string name= null) => throw new NotImplementedException(); + + public static Tensor stack1(Tensor x, int filters, int blocks, int stride1 = 2, string name = null) => throw new NotImplementedException(); + + public static Tensor block2(Tensor x, int filters, int kernel_size = 3, int stride = 1, bool conv_shortcut = true, string name = null) => throw new NotImplementedException(); + + public static Tensor stack2(Tensor x, int filters, int blocks, int stride1 = 2, string name = null) => throw new NotImplementedException(); + + public static Tensor block3(Tensor x, int filters, int kernel_size = 3, int stride = 1, int groups = 32, bool conv_shortcut = true, string name = null) => throw new NotImplementedException(); + + public static Tensor stack3(Tensor x, int filters, int blocks, int stride1 = 2, int groups = 32, string name = null) => throw new NotImplementedException(); + + public static Model ResNet50(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model ResNet101(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model ResNet152(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/ResnetV2.cs b/src/TensorFlowNET.Keras/Applications/ResnetV2.cs new file mode 100644 index 00000000..edb9df55 --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/ResnetV2.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class ResnetV2 + { + public static Model ResNet50V2(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model ResNet101V2(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Model ResNet152V2(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/Vgg16.cs b/src/TensorFlowNET.Keras/Applications/Vgg16.cs new file mode 100644 index 00000000..8dcc1ce2 --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/Vgg16.cs @@ -0,0 +1,17 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class Vgg16 + { + public static Model VGG16(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/Vgg19.cs b/src/TensorFlowNET.Keras/Applications/Vgg19.cs new file mode 100644 index 00000000..86e2969b --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/Vgg19.cs @@ -0,0 +1,17 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class Vgg19 + { + public static Model VGG19(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Applications/Xception.cs b/src/TensorFlowNET.Keras/Applications/Xception.cs new file mode 100644 index 00000000..fe400cfb --- /dev/null +++ b/src/TensorFlowNET.Keras/Applications/Xception.cs @@ -0,0 +1,17 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Applications +{ + public class Xception + { + public static Model XCeption(bool include_top = true, string weights = "imagenet", + Tensor input_tensor = null, TensorShape input_shape = null, + string pooling = null, int classes = 1000) => throw new NotImplementedException(); + + public static Tensor preprocess_input(Tensor x, string data_format = null) => throw new NotImplementedException(); + + public static Tensor decode_predictions(Tensor preds, int top = 5) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Args.cs b/src/TensorFlowNET.Keras/Args.cs new file mode 100644 index 00000000..f2d9d27b --- /dev/null +++ b/src/TensorFlowNET.Keras/Args.cs @@ -0,0 +1,29 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class Args + { + private List args = new List(); + + public object this[int index] + { + get + { + return args.Count < index ? args[index] : null; + } + } + + public T Get(int index) + { + return args.Count < index ? (T)args[index] : default(T); + } + + public void Add(T arg) + { + args.Add(arg); + } + } +} diff --git a/src/TensorFlowNET.Keras/Backend.cs b/src/TensorFlowNET.Keras/Backend.cs new file mode 100644 index 00000000..4612d7ee --- /dev/null +++ b/src/TensorFlowNET.Keras/Backend.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + class Backend + { + } +} diff --git a/src/TensorFlowNET.Keras/BackendConfig.cs b/src/TensorFlowNET.Keras/BackendConfig.cs new file mode 100644 index 00000000..f8321bc3 --- /dev/null +++ b/src/TensorFlowNET.Keras/BackendConfig.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + class BackendConfig + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/BaseLogger.cs b/src/TensorFlowNET.Keras/Callbacks/BaseLogger.cs new file mode 100644 index 00000000..1f2ece8f --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/BaseLogger.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class BaseLogger + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/CSVLogger.cs b/src/TensorFlowNET.Keras/Callbacks/CSVLogger.cs new file mode 100644 index 00000000..698bfb53 --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/CSVLogger.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class CSVLogger + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/Callback.cs b/src/TensorFlowNET.Keras/Callbacks/Callback.cs new file mode 100644 index 00000000..ce5b839c --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/Callback.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class Callback + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/CallbackList.cs b/src/TensorFlowNET.Keras/Callbacks/CallbackList.cs new file mode 100644 index 00000000..e0782fea --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/CallbackList.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class CallbackList + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/EarlyStopping.cs b/src/TensorFlowNET.Keras/Callbacks/EarlyStopping.cs new file mode 100644 index 00000000..19c320ce --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/EarlyStopping.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class EarlyStopping + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/History.cs b/src/TensorFlowNET.Keras/Callbacks/History.cs new file mode 100644 index 00000000..3e6e4bbb --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/History.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class History + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/LambdaCallback.cs b/src/TensorFlowNET.Keras/Callbacks/LambdaCallback.cs new file mode 100644 index 00000000..67203f40 --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/LambdaCallback.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class LambdaCallback + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/LearningRateScheduler.cs b/src/TensorFlowNET.Keras/Callbacks/LearningRateScheduler.cs new file mode 100644 index 00000000..539c97d9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/LearningRateScheduler.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class LearningRateScheduler + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/ModelCheckpoint.cs b/src/TensorFlowNET.Keras/Callbacks/ModelCheckpoint.cs new file mode 100644 index 00000000..72eca36c --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/ModelCheckpoint.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class ModelCheckpoint + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/ProgbarLogger.cs b/src/TensorFlowNET.Keras/Callbacks/ProgbarLogger.cs new file mode 100644 index 00000000..bf875a35 --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/ProgbarLogger.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class ProgbarLogger + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/ReduceLROnPlateau.cs b/src/TensorFlowNET.Keras/Callbacks/ReduceLROnPlateau.cs new file mode 100644 index 00000000..41e63aa3 --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/ReduceLROnPlateau.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class ReduceLROnPlateau + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/RemoteMonitor.cs b/src/TensorFlowNET.Keras/Callbacks/RemoteMonitor.cs new file mode 100644 index 00000000..59d9f67c --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/RemoteMonitor.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class RemoteMonitor + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/TensorBoard.cs b/src/TensorFlowNET.Keras/Callbacks/TensorBoard.cs new file mode 100644 index 00000000..ab9d62ee --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/TensorBoard.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class TensorBoard + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/TensorBoardV1.cs b/src/TensorFlowNET.Keras/Callbacks/TensorBoardV1.cs new file mode 100644 index 00000000..6db82123 --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/TensorBoardV1.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class TensorBoardV1 + { + } +} diff --git a/src/TensorFlowNET.Keras/Callbacks/TerminateOnNaN.cs b/src/TensorFlowNET.Keras/Callbacks/TerminateOnNaN.cs new file mode 100644 index 00000000..f26a8717 --- /dev/null +++ b/src/TensorFlowNET.Keras/Callbacks/TerminateOnNaN.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Callbacks +{ + class TerminateOnNaN + { + } +} diff --git a/src/TensorFlowNET.Keras/Constraints/ConstraintBase.cs b/src/TensorFlowNET.Keras/Constraints/ConstraintBase.cs new file mode 100644 index 00000000..dd100cef --- /dev/null +++ b/src/TensorFlowNET.Keras/Constraints/ConstraintBase.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Constraints +{ + public abstract class ConstraintBase + { + } +} diff --git a/src/TensorFlowNET.Keras/Constraints/MaxNorm.cs b/src/TensorFlowNET.Keras/Constraints/MaxNorm.cs new file mode 100644 index 00000000..15c7b439 --- /dev/null +++ b/src/TensorFlowNET.Keras/Constraints/MaxNorm.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Constraints +{ + class MaxNorm + { + } +} diff --git a/src/TensorFlowNET.Keras/Constraints/MinMaxNorm.cs b/src/TensorFlowNET.Keras/Constraints/MinMaxNorm.cs new file mode 100644 index 00000000..f4636553 --- /dev/null +++ b/src/TensorFlowNET.Keras/Constraints/MinMaxNorm.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Constraints +{ + class MinMaxNorm + { + } +} diff --git a/src/TensorFlowNET.Keras/Constraints/NonNeg.cs b/src/TensorFlowNET.Keras/Constraints/NonNeg.cs new file mode 100644 index 00000000..b1a5e82e --- /dev/null +++ b/src/TensorFlowNET.Keras/Constraints/NonNeg.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Constraints +{ + class NonNeg + { + } +} diff --git a/src/TensorFlowNET.Keras/Constraints/RadialConstraint.cs b/src/TensorFlowNET.Keras/Constraints/RadialConstraint.cs new file mode 100644 index 00000000..3080bb7e --- /dev/null +++ b/src/TensorFlowNET.Keras/Constraints/RadialConstraint.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Constraints +{ + class RadialConstraint + { + } +} diff --git a/src/TensorFlowNET.Keras/Constraints/UnitNorm.cs b/src/TensorFlowNET.Keras/Constraints/UnitNorm.cs new file mode 100644 index 00000000..0a0a5a6b --- /dev/null +++ b/src/TensorFlowNET.Keras/Constraints/UnitNorm.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Constraints +{ + class UnitNorm + { + } +} diff --git a/src/TensorFlowNET.Keras/Datasets/BostonHousing.cs b/src/TensorFlowNET.Keras/Datasets/BostonHousing.cs new file mode 100644 index 00000000..261d892f --- /dev/null +++ b/src/TensorFlowNET.Keras/Datasets/BostonHousing.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Datasets +{ + public class BostonHousing + { + public static ((Tensor, Tensor), (Tensor, Tensor)) load_data(string path = "boston_housing.npz", float test_split = 0.2f, int seed = 113) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Datasets/Cifar.cs b/src/TensorFlowNET.Keras/Datasets/Cifar.cs new file mode 100644 index 00000000..6bf1687f --- /dev/null +++ b/src/TensorFlowNET.Keras/Datasets/Cifar.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Datasets +{ + public class Cifar + { + public (Tensor, Tensor) load_batch(string fpath, string label_key = "labels") => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Datasets/Cifar10.cs b/src/TensorFlowNET.Keras/Datasets/Cifar10.cs new file mode 100644 index 00000000..2dccf547 --- /dev/null +++ b/src/TensorFlowNET.Keras/Datasets/Cifar10.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Datasets +{ + public class Cifar10 + { + public static ((Tensor, Tensor), (Tensor, Tensor)) load_data() => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Datasets/Cifar100.cs b/src/TensorFlowNET.Keras/Datasets/Cifar100.cs new file mode 100644 index 00000000..d4adca8d --- /dev/null +++ b/src/TensorFlowNET.Keras/Datasets/Cifar100.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Datasets +{ + public class Cifar100 + { + public static ((Tensor, Tensor), (Tensor, Tensor)) load_data(string label_mode = "fine") => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Datasets/FashionMNIST.cs b/src/TensorFlowNET.Keras/Datasets/FashionMNIST.cs new file mode 100644 index 00000000..36db09c8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Datasets/FashionMNIST.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Datasets +{ + public class FashionMNIST + { + public static ((Tensor, Tensor), (Tensor, Tensor)) load_data() => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Datasets/IMDB.cs b/src/TensorFlowNET.Keras/Datasets/IMDB.cs new file mode 100644 index 00000000..c115bc69 --- /dev/null +++ b/src/TensorFlowNET.Keras/Datasets/IMDB.cs @@ -0,0 +1,15 @@ +using Newtonsoft.Json.Linq; +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Datasets +{ + public class IMDB + { + public static ((Tensor, Tensor), (Tensor, Tensor)) load_data(string path= "imdb.npz", int? num_words= null, int skip_top= 0, int? maxlen= null, + int seed= 113,int start_char= 1, int oov_char= 2, int index_from= 3) => throw new NotImplementedException(); + + public static JObject get_word_index(string path= "imdb_word_index.json") => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Datasets/MNIST.cs b/src/TensorFlowNET.Keras/Datasets/MNIST.cs new file mode 100644 index 00000000..558c959a --- /dev/null +++ b/src/TensorFlowNET.Keras/Datasets/MNIST.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Datasets +{ + public class MNIST + { + public static ((Tensor, Tensor), (Tensor, Tensor)) load_data(string path = "mnist.npz") => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Datasets/Reuters.cs b/src/TensorFlowNET.Keras/Datasets/Reuters.cs new file mode 100644 index 00000000..6a704e75 --- /dev/null +++ b/src/TensorFlowNET.Keras/Datasets/Reuters.cs @@ -0,0 +1,12 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Datasets +{ + public class Reuters + { + public static ((Tensor, Tensor), (Tensor, Tensor)) load_data(string path = "reuters.npz", int? num_words= null, int skip_top= 0, + int? maxlen= null,float test_split= 0.2f, int seed= 113,int start_char= 1,int oov_char= 2,int index_from= 3) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/DistributedTrainingUtils.cs b/src/TensorFlowNET.Keras/Distribute/DistributedTrainingUtils.cs new file mode 100644 index 00000000..b78931a2 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/DistributedTrainingUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class DistributedTrainingUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasCorrectnessTestBase.cs b/src/TensorFlowNET.Keras/Distribute/KerasCorrectnessTestBase.cs new file mode 100644 index 00000000..668d6c0e --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasCorrectnessTestBase.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasCorrectnessTestBase + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasDnnCorrectnessTest.cs b/src/TensorFlowNET.Keras/Distribute/KerasDnnCorrectnessTest.cs new file mode 100644 index 00000000..c7b69c90 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasDnnCorrectnessTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasDnnCorrectnessTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasEmbeddingModelCorrectnessTest.cs b/src/TensorFlowNET.Keras/Distribute/KerasEmbeddingModelCorrectnessTest.cs new file mode 100644 index 00000000..46a4838b --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasEmbeddingModelCorrectnessTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasEmbeddingModelCorrectnessTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasImageModelCorrectnessTest.cs b/src/TensorFlowNET.Keras/Distribute/KerasImageModelCorrectnessTest.cs new file mode 100644 index 00000000..4bb131d4 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasImageModelCorrectnessTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasImageModelCorrectnessTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasOptimizerV2Test.cs b/src/TensorFlowNET.Keras/Distribute/KerasOptimizerV2Test.cs new file mode 100644 index 00000000..32b20b05 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasOptimizerV2Test.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasOptimizerV2Test + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasPremadeModelsTest.cs b/src/TensorFlowNET.Keras/Distribute/KerasPremadeModelsTest.cs new file mode 100644 index 00000000..78208afd --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasPremadeModelsTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasPremadeModelsTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasRnnModelCorrectnessTest.cs b/src/TensorFlowNET.Keras/Distribute/KerasRnnModelCorrectnessTest.cs new file mode 100644 index 00000000..7e4ed8c1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasRnnModelCorrectnessTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasRnnModelCorrectnessTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasStatefulLstmModelCorrectnessTest.cs b/src/TensorFlowNET.Keras/Distribute/KerasStatefulLstmModelCorrectnessTest.cs new file mode 100644 index 00000000..eea644bb --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasStatefulLstmModelCorrectnessTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasStatefulLstmModelCorrectnessTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/KerasUtilsTest.cs b/src/TensorFlowNET.Keras/Distribute/KerasUtilsTest.cs new file mode 100644 index 00000000..c9a188c0 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/KerasUtilsTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class KerasUtilsTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/MultiWorkerCallbackTF1Test.cs b/src/TensorFlowNET.Keras/Distribute/MultiWorkerCallbackTF1Test.cs new file mode 100644 index 00000000..7fcadbc7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/MultiWorkerCallbackTF1Test.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class MultiWorkerCallbackTF1Test + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/MultiWorkerCallbackTf2Test.cs b/src/TensorFlowNET.Keras/Distribute/MultiWorkerCallbackTf2Test.cs new file mode 100644 index 00000000..2b52a942 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/MultiWorkerCallbackTf2Test.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class MultiWorkerCallbackTf2Test + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/MultiWorkerFaultToleranceTest.cs b/src/TensorFlowNET.Keras/Distribute/MultiWorkerFaultToleranceTest.cs new file mode 100644 index 00000000..b1d3f98a --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/MultiWorkerFaultToleranceTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class MultiWorkerFaultToleranceTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/MultiWorkerTest.cs b/src/TensorFlowNET.Keras/Distribute/MultiWorkerTest.cs new file mode 100644 index 00000000..bbd1a450 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/MultiWorkerTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class MultiWorkerTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/MultiWorkerTestingUtils.cs b/src/TensorFlowNET.Keras/Distribute/MultiWorkerTestingUtils.cs new file mode 100644 index 00000000..74928744 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/MultiWorkerTestingUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class MultiWorkerTestingUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/MultiWorkerTrainingState.cs b/src/TensorFlowNET.Keras/Distribute/MultiWorkerTrainingState.cs new file mode 100644 index 00000000..e3322e80 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/MultiWorkerTrainingState.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class MultiWorkerTrainingState + { + } +} diff --git a/src/TensorFlowNET.Keras/Distribute/MultiWorkerTrainingStateTest.cs b/src/TensorFlowNET.Keras/Distribute/MultiWorkerTrainingStateTest.cs new file mode 100644 index 00000000..78fcb1f6 --- /dev/null +++ b/src/TensorFlowNET.Keras/Distribute/MultiWorkerTrainingStateTest.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Distribute +{ + class MultiWorkerTrainingStateTest + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/BaseLayer.cs b/src/TensorFlowNET.Keras/Engine/BaseLayer.cs new file mode 100644 index 00000000..36c69843 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/BaseLayer.cs @@ -0,0 +1,73 @@ +using Keras.Layers; +using NumSharp; +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + public class TensorFlowOpLayer : Layer + { + public TensorFlowOpLayer(string node_def, string name, NDArray[] constants = null, bool trainable = true, string dtype = null) + { + + } + + public override void call(Tensor[] inputs) + { + throw new NotImplementedException(); + } + + public override Dictionary get_config() + { + throw new NotImplementedException(); + } + + private NodeDef _make_node_def(Graph graph) => throw new NotImplementedException(); + + private Tensor[] _make_op(Tensor[] inputs) => throw new NotImplementedException(); + + private Tensor[] _defun_call(Tensor[] inputs) => throw new NotImplementedException(); + } + + public class AddLoss : Layer + { + public AddLoss(bool unconditional) + { + throw new NotImplementedException(); + } + + public override void call(Tensor[] inputs) + { + throw new NotImplementedException(); + } + + public override Dictionary get_config() + { + throw new NotImplementedException(); + } + } + + public class AddMetric : Layer + { + public AddMetric(string aggregation = null, string metric_name = null) + { + throw new NotImplementedException(); + } + + public override void call(Tensor[] inputs) + { + throw new NotImplementedException(); + } + + public override Dictionary get_config() + { + throw new NotImplementedException(); + } + } + + public class KerasHistory + { + + } +} diff --git a/src/TensorFlowNET.Keras/Engine/BaseLayerUtils.cs b/src/TensorFlowNET.Keras/Engine/BaseLayerUtils.cs new file mode 100644 index 00000000..323e9819 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/BaseLayerUtils.cs @@ -0,0 +1,45 @@ +using Keras.Layers; +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Keras.Initializers; +using Tensorflow.Keras.Metrics; + +namespace Tensorflow.Keras.Engine +{ + public class BaseLayerUtils + { + public static (Metric, Metric) create_mean_metric(Tensor value, string name = null) => throw new NotImplementedException(); + + public static VariableV1 make_variable(string name, TensorShape shape= null, TF_DataType dtype= TF_DataType.TF_FLOAT, Initializer initializer= null, + bool trainable= true, string caching_device= null, bool validate_shape= true, Constraints.ConstraintBase constraint= null, + bool use_resource= false, Graph[] collections= null, VariableSynchronization synchronization= VariableSynchronization.Auto, + VariableAggregation aggregation= VariableAggregation.None) => throw new NotImplementedException(); + + public static Tensor[] collect_previous_mask(TensorArray input_tensors) => throw new NotImplementedException(); + + public bool have_all_keras_metadata(Tensor[] tensors) => throw new NotImplementedException(); + + public static dynamic generate_placeholders_from_shape(TensorShape shape) => throw new NotImplementedException(); + + public Layer[] create_keras_history(Tensor[] tensors) => throw new NotImplementedException(); + + private void _create_keras_history_helper(Tensor[] tensors, TensorFlowOpLayer[] processed_ops, Layer[] created_layers) => throw new NotImplementedException(); + + public Tensor[] unnest_if_single_tensor(Tensor[] input_tensors) => throw new NotImplementedException(); + + public bool needs_keras_history(Tensor[] tensors, bool ignore_call_context= false) => throw new NotImplementedException(); + + public bool is_in_keras_graph() => throw new NotImplementedException(); + + public string is_in_eager_or_tf_function() => throw new NotImplementedException(); + + public bool is_in_tf_function() => throw new NotImplementedException(); + + public bool uses_keras_history(Tensor[] tensors) => throw new NotImplementedException(); + + public Tensor[] mark_checked(Tensor[] tensors) => throw new NotImplementedException(); + + public CallContext call_context() => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Engine/BasePreprocessingLayer.cs b/src/TensorFlowNET.Keras/Engine/BasePreprocessingLayer.cs new file mode 100644 index 00000000..61c57d39 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/BasePreprocessingLayer.cs @@ -0,0 +1,58 @@ +using Keras.Layers; +using NumSharp; +using System; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Data; +using Tensorflow.Keras.Initializers; + +namespace Tensorflow.Keras.Engine +{ + public abstract class PreprocessingLayer : Layer + { + public abstract void adapt(Data.DatasetV1 data, bool reset_state = true); + } + + public abstract class Combiner + { + public abstract dynamic compute(NDArray[] batch_values, dynamic accumulator = null); + + public abstract dynamic merge(dynamic[] accumulators); + + public abstract NDArray[] extract(dynamic accumulator); + + public abstract dynamic restore(Tensor output); + + public abstract string serialize(dynamic accumulator); + + public abstract dynamic deserialize(string encoded_accumulator); + + public override string ToString() + { + throw new NotImplementedException(); + } + } + + public class CombinerPreprocessingLayer : PreprocessingLayer + { + public CombinerPreprocessingLayer(Combiner combiner) + { + throw new NotImplementedException(); + } + + private void _add_state_variable(string name, TensorShape shape, string dtype, Initializer initializer= null, string partitioner= null, bool? use_resource= null) => throw new NotImplementedException(); + + private Dictionary _restore_updates() => throw new NotImplementedException(); + + private bool _dataset_is_infinite(DatasetV1 dataset) => throw new NotImplementedException(); + + private dynamic _get_dataset_iterator(DatasetV1 dataset) => throw new NotImplementedException(); + + private void _set_state_variables(Dictionary updates) => throw new NotImplementedException(); + + public override void adapt(DatasetV1 data, bool reset_state = true) + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Engine/BasePreprocessingLayerV1.cs b/src/TensorFlowNET.Keras/Engine/BasePreprocessingLayerV1.cs new file mode 100644 index 00000000..b2c7d153 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/BasePreprocessingLayerV1.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class BasePreprocessingLayerV1 + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/CallContext.cs b/src/TensorFlowNET.Keras/Engine/CallContext.cs new file mode 100644 index 00000000..8cc38df7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/CallContext.cs @@ -0,0 +1,45 @@ +using Keras.Layers; +using System; +using System.Collections.Generic; +using System.Reflection; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + public class CallContext + { + public bool in_keras_graph + { + get + { + throw new NotImplementedException(); + } + } + public CallContext() + { + + } + + public void enter(Layer layer, Tensor[] inputs, Graph build_graph, bool training) => throw new NotImplementedException(); + + public bool training_arg_passed_to_call(string[] argspec, Dictionary args, Dictionary kwargs) => throw new NotImplementedException(); + + public dynamic autocast_context_manager(string dtype) => throw new NotImplementedException(); + + public bool is_subclassed(Layer layer) => throw new NotImplementedException(); + + public bool from_saved_model(Layer layer) => throw new NotImplementedException(); + + public bool check_graph_consistency(Tensor tensor = null, string method = "add_loss", bool force_raise = false) => throw new NotImplementedException(); + + public dynamic mark_as_return(Tensor[] outputs, dynamic acd) => throw new NotImplementedException(); + + public MethodInfo Default(MemberInfo method) => throw new NotImplementedException(); + + public void enable_v2_dtype_behavior() => throw new NotImplementedException(); + + public void disable_v2_dtype_behavior() => throw new NotImplementedException(); + + public void v2_dtype_behavior_enabled() => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Engine/CompileUtils.cs b/src/TensorFlowNET.Keras/Engine/CompileUtils.cs new file mode 100644 index 00000000..0c054d64 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/CompileUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class CompileUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapter.cs new file mode 100644 index 00000000..406b75bd --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/DataAdapter.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class DataAdapter + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/InputLayer.cs b/src/TensorFlowNET.Keras/Engine/InputLayer.cs new file mode 100644 index 00000000..3ed5f066 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/InputLayer.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class InputLayer + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/InputSpec.cs b/src/TensorFlowNET.Keras/Engine/InputSpec.cs new file mode 100644 index 00000000..7246cce0 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/InputSpec.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class InputSpec + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/Network.cs b/src/TensorFlowNET.Keras/Engine/Network.cs new file mode 100644 index 00000000..f9470f8b --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/Network.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class Network + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/Node.cs b/src/TensorFlowNET.Keras/Engine/Node.cs new file mode 100644 index 00000000..d74e98b6 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/Node.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + public class Node + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/PartialBatchPaddingHandler.cs b/src/TensorFlowNET.Keras/Engine/PartialBatchPaddingHandler.cs new file mode 100644 index 00000000..422ae27e --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/PartialBatchPaddingHandler.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class PartialBatchPaddingHandler + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/Sequential.cs b/src/TensorFlowNET.Keras/Engine/Sequential.cs new file mode 100644 index 00000000..611ab18b --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/Sequential.cs @@ -0,0 +1,11 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + public class Sequential + { + + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrackableWeightHandler.cs b/src/TensorFlowNET.Keras/Engine/TrackableWeightHandler.cs new file mode 100644 index 00000000..c6305809 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrackableWeightHandler.cs @@ -0,0 +1,26 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + public class TrackableWeightHandler + { + public int num_tensors + { + get + { + throw new NotImplementedException(); + } + } + + public TrackableWeightHandler(bool trackable) + { + throw new NotImplementedException(); + } + + public void set_weights(Tensor[] weights) => throw new NotImplementedException(); + + public void _set_weights_v1(Tensor[] weights) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Engine/Training.cs b/src/TensorFlowNET.Keras/Engine/Training.cs new file mode 100644 index 00000000..64a9d5ba --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/Training.cs @@ -0,0 +1,24 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + public class Training + { + public class Model + { + + } + + public class _TrainingEndpoint + { + + } + + public class _TrainingTarget + { + + } + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrainingArrays.cs b/src/TensorFlowNET.Keras/Engine/TrainingArrays.cs new file mode 100644 index 00000000..ca340631 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrainingArrays.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class TrainingArrays + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrainingDistributed.cs b/src/TensorFlowNET.Keras/Engine/TrainingDistributed.cs new file mode 100644 index 00000000..3eef4c6c --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrainingDistributed.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class TrainingDistributed + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrainingEager.cs b/src/TensorFlowNET.Keras/Engine/TrainingEager.cs new file mode 100644 index 00000000..a697bdae --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrainingEager.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class TrainingEager + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrainingGenerator.cs b/src/TensorFlowNET.Keras/Engine/TrainingGenerator.cs new file mode 100644 index 00000000..5b241890 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrainingGenerator.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class TrainingGenerator + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrainingUtils.cs b/src/TensorFlowNET.Keras/Engine/TrainingUtils.cs new file mode 100644 index 00000000..913fa688 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrainingUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class TrainingUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrainingV1.cs b/src/TensorFlowNET.Keras/Engine/TrainingV1.cs new file mode 100644 index 00000000..7dee23ea --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrainingV1.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class TrainingV1 + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrainingV2.cs b/src/TensorFlowNET.Keras/Engine/TrainingV2.cs new file mode 100644 index 00000000..47d11694 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrainingV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class TrainingV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Engine/TrainingV2Utils.cs b/src/TensorFlowNET.Keras/Engine/TrainingV2Utils.cs new file mode 100644 index 00000000..9122a005 --- /dev/null +++ b/src/TensorFlowNET.Keras/Engine/TrainingV2Utils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Engine +{ + class TrainingV2Utils + { + } +} diff --git a/src/TensorFlowNET.Keras/Estimator.cs b/src/TensorFlowNET.Keras/Estimator.cs new file mode 100644 index 00000000..fec0f8e5 --- /dev/null +++ b/src/TensorFlowNET.Keras/Estimator.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + class Estimator + { + } +} diff --git a/src/TensorFlowNET.Keras/IInitializer.cs b/src/TensorFlowNET.Keras/IInitializer.cs deleted file mode 100644 index d69e0d1b..00000000 --- a/src/TensorFlowNET.Keras/IInitializer.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace Keras -{ - interface IInitializer - { - } -} diff --git a/src/TensorFlowNET.Keras/Initializer/BaseInitializer.cs b/src/TensorFlowNET.Keras/Initializer/BaseInitializer.cs deleted file mode 100644 index cd3e473c..00000000 --- a/src/TensorFlowNET.Keras/Initializer/BaseInitializer.cs +++ /dev/null @@ -1,7 +0,0 @@ -namespace Keras.Initializer -{ - class BaseInitializer : IInitializer - { - public int seed; - } -} diff --git a/src/TensorFlowNET.Keras/Initializers/Constant.cs b/src/TensorFlowNET.Keras/Initializers/Constant.cs new file mode 100644 index 00000000..9d942100 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/Constant.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class Constant + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/ConstantV2.cs b/src/TensorFlowNET.Keras/Initializers/ConstantV2.cs new file mode 100644 index 00000000..7622596c --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/ConstantV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class ConstantV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/GlorotNormal.cs b/src/TensorFlowNET.Keras/Initializers/GlorotNormal.cs new file mode 100644 index 00000000..47e84837 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/GlorotNormal.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class GlorotNormal + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/GlorotNormalV2.cs b/src/TensorFlowNET.Keras/Initializers/GlorotNormalV2.cs new file mode 100644 index 00000000..2c00cbdc --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/GlorotNormalV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class GlorotNormalV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/GlorotUniform.cs b/src/TensorFlowNET.Keras/Initializers/GlorotUniform.cs new file mode 100644 index 00000000..f3d7d785 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/GlorotUniform.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class GlorotUniform + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/GlorotUniformV2.cs b/src/TensorFlowNET.Keras/Initializers/GlorotUniformV2.cs new file mode 100644 index 00000000..67d9a975 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/GlorotUniformV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class GlorotUniformV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/HeNormal.cs b/src/TensorFlowNET.Keras/Initializers/HeNormal.cs new file mode 100644 index 00000000..1ec4b282 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/HeNormal.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class HeNormal + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/HeNormalV2.cs b/src/TensorFlowNET.Keras/Initializers/HeNormalV2.cs new file mode 100644 index 00000000..5450898b --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/HeNormalV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class HeNormalV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/HeUniform.cs b/src/TensorFlowNET.Keras/Initializers/HeUniform.cs new file mode 100644 index 00000000..d07cf932 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/HeUniform.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class HeUniform + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/HeUniformV2.cs b/src/TensorFlowNET.Keras/Initializers/HeUniformV2.cs new file mode 100644 index 00000000..0dbcb678 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/HeUniformV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class HeUniformV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/Identity.cs b/src/TensorFlowNET.Keras/Initializers/Identity.cs new file mode 100644 index 00000000..178d70e5 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/Identity.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class Identity + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/IdentityV2.cs b/src/TensorFlowNET.Keras/Initializers/IdentityV2.cs new file mode 100644 index 00000000..5955d41e --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/IdentityV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class IdentityV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/Initializer.cs b/src/TensorFlowNET.Keras/Initializers/Initializer.cs new file mode 100644 index 00000000..5a432be1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/Initializer.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + public abstract class Initializer + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/InitializerV2.cs b/src/TensorFlowNET.Keras/Initializers/InitializerV2.cs new file mode 100644 index 00000000..638785d9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/InitializerV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class InitializerV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/LecunNormal.cs b/src/TensorFlowNET.Keras/Initializers/LecunNormal.cs new file mode 100644 index 00000000..a810dfa8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/LecunNormal.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class LecunNormal + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/LecunNormalV2.cs b/src/TensorFlowNET.Keras/Initializers/LecunNormalV2.cs new file mode 100644 index 00000000..5010ddde --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/LecunNormalV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class LecunNormalV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/LecunUniform.cs b/src/TensorFlowNET.Keras/Initializers/LecunUniform.cs new file mode 100644 index 00000000..96bfb4d4 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/LecunUniform.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class LecunUniform + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/LecunUniformV2.cs b/src/TensorFlowNET.Keras/Initializers/LecunUniformV2.cs new file mode 100644 index 00000000..0eb24dd1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/LecunUniformV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class LecunUniformV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/Ones.cs b/src/TensorFlowNET.Keras/Initializers/Ones.cs new file mode 100644 index 00000000..e30399bb --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/Ones.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class Ones + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/OnesV2.cs b/src/TensorFlowNET.Keras/Initializers/OnesV2.cs new file mode 100644 index 00000000..18b6ee9a --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/OnesV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class OnesV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/Orthogonal.cs b/src/TensorFlowNET.Keras/Initializers/Orthogonal.cs new file mode 100644 index 00000000..984d986b --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/Orthogonal.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class Orthogonal + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/OrthogonalV2.cs b/src/TensorFlowNET.Keras/Initializers/OrthogonalV2.cs new file mode 100644 index 00000000..eedddeb7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/OrthogonalV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class OrthogonalV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/RandomNormal.cs b/src/TensorFlowNET.Keras/Initializers/RandomNormal.cs new file mode 100644 index 00000000..0efe8cb9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/RandomNormal.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class RandomNormal + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/RandomNormalV2.cs b/src/TensorFlowNET.Keras/Initializers/RandomNormalV2.cs new file mode 100644 index 00000000..e1bd3606 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/RandomNormalV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class RandomNormalV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/RandomUniform.cs b/src/TensorFlowNET.Keras/Initializers/RandomUniform.cs new file mode 100644 index 00000000..4547957e --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/RandomUniform.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class RandomUniform + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/RandomUniformV2.cs b/src/TensorFlowNET.Keras/Initializers/RandomUniformV2.cs new file mode 100644 index 00000000..678c27d0 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/RandomUniformV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class RandomUniformV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/TruncatedNormal.cs b/src/TensorFlowNET.Keras/Initializers/TruncatedNormal.cs new file mode 100644 index 00000000..2ba845d8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/TruncatedNormal.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class TruncatedNormal + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/TruncatedNormalV2.cs b/src/TensorFlowNET.Keras/Initializers/TruncatedNormalV2.cs new file mode 100644 index 00000000..2b90b396 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/TruncatedNormalV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class TruncatedNormalV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/VarianceScaling.cs b/src/TensorFlowNET.Keras/Initializers/VarianceScaling.cs new file mode 100644 index 00000000..7d09e46a --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/VarianceScaling.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class VarianceScaling + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/VarianceScalingV2.cs b/src/TensorFlowNET.Keras/Initializers/VarianceScalingV2.cs new file mode 100644 index 00000000..d9fd9f23 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/VarianceScalingV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class VarianceScalingV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/Zeros.cs b/src/TensorFlowNET.Keras/Initializers/Zeros.cs new file mode 100644 index 00000000..dd976c88 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/Zeros.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class Zeros + { + } +} diff --git a/src/TensorFlowNET.Keras/Initializers/ZerosV2.cs b/src/TensorFlowNET.Keras/Initializers/ZerosV2.cs new file mode 100644 index 00000000..00da7715 --- /dev/null +++ b/src/TensorFlowNET.Keras/Initializers/ZerosV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Initializers +{ + class ZerosV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Keras.Core.csproj b/src/TensorFlowNET.Keras/Keras.Core.csproj deleted file mode 100644 index 5c3895d1..00000000 --- a/src/TensorFlowNET.Keras/Keras.Core.csproj +++ /dev/null @@ -1,13 +0,0 @@ - - - - netstandard2.0 - Keras - Keras - - - - - - - diff --git a/src/TensorFlowNET.Keras/KerasParameterized.cs b/src/TensorFlowNET.Keras/KerasParameterized.cs new file mode 100644 index 00000000..f5d65541 --- /dev/null +++ b/src/TensorFlowNET.Keras/KerasParameterized.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + class KerasParameterized + { + } +} diff --git a/src/TensorFlowNET.Keras/KwArgs.cs b/src/TensorFlowNET.Keras/KwArgs.cs new file mode 100644 index 00000000..11a90dd8 --- /dev/null +++ b/src/TensorFlowNET.Keras/KwArgs.cs @@ -0,0 +1,43 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class KwArgs + { + private Dictionary args = new Dictionary(); + + public object this[string name] + { + get + { + return args.ContainsKey(name) ? args[name] : null; + } + set + { + args[name] = value; + } + } + + public T Get(string name) + { + if (!args.ContainsKey(name)) + return default(T); + + return (T)args[name]; + } + + public static explicit operator KwArgs(ValueTuple[] p) + { + KwArgs kwArgs = new KwArgs(); + kwArgs.args = new Dictionary(); + foreach (var item in p) + { + kwArgs.args[item.Item1] = item.Item2; + } + + return kwArgs; + } + } +} diff --git a/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ELU.cs b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ELU.cs new file mode 100644 index 00000000..bf8e7c90 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ELU.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.AdvancedActivations +{ + class ELU + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/AdvancedActivations/LeakyReLU.cs b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/LeakyReLU.cs new file mode 100644 index 00000000..d56203a2 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/LeakyReLU.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.AdvancedActivations +{ + class LeakyReLU + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/AdvancedActivations/PReLU.cs b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/PReLU.cs new file mode 100644 index 00000000..7cb2e20c --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/PReLU.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.AdvancedActivations +{ + class PReLU + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ReLU.cs b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ReLU.cs new file mode 100644 index 00000000..77ee3994 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ReLU.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.AdvancedActivations +{ + class ReLU + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/AdvancedActivations/Softmax.cs b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/Softmax.cs new file mode 100644 index 00000000..694e75a7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/Softmax.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.AdvancedActivations +{ + class Softmax + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ThresholdedReLU.cs b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ThresholdedReLU.cs new file mode 100644 index 00000000..a5b849ca --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/AdvancedActivations/ThresholdedReLU.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.AdvancedActivations +{ + class ThresholdedReLU + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Conv.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv.cs new file mode 100644 index 00000000..f7e6950f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Conv + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Conv1D.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv1D.cs new file mode 100644 index 00000000..91c1a987 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Conv1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Conv2D.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv2D.cs new file mode 100644 index 00000000..a82f89eb --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Conv2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Conv2DTranspose.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv2DTranspose.cs new file mode 100644 index 00000000..2c16bc98 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv2DTranspose.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Conv2DTranspose + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Conv3D.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv3D.cs new file mode 100644 index 00000000..4177dbed --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Conv3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Conv3DTranspose.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv3DTranspose.cs new file mode 100644 index 00000000..1537d48e --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Conv3DTranspose.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Conv3DTranspose + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping1D.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping1D.cs new file mode 100644 index 00000000..5edfea70 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Cropping1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping2D.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping2D.cs new file mode 100644 index 00000000..e3f99bfd --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Cropping2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping3D.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping3D.cs new file mode 100644 index 00000000..e702cfef --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/Cropping3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Cropping3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Convolutional/DepthwiseConv2D.cs b/src/TensorFlowNET.Keras/Layers/Convolutional/DepthwiseConv2D.cs new file mode 100644 index 00000000..53e9271d --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Convolutional/DepthwiseConv2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class DepthwiseConv2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvLSTM2D.cs b/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvLSTM2D.cs new file mode 100644 index 00000000..f8d27d27 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvLSTM2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class ConvLSTM2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvLSTM2DCell.cs b/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvLSTM2DCell.cs new file mode 100644 index 00000000..86195574 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvLSTM2DCell.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class ConvLSTM2DCell + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvRNN2D.cs b/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvRNN2D.cs new file mode 100644 index 00000000..420c2444 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/ConvolutionalRecurrent/ConvRNN2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class ConvRNN2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/Activation.cs b/src/TensorFlowNET.Keras/Layers/Core/Activation.cs new file mode 100644 index 00000000..03f4e8f1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/Activation.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Activation + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/ActivityRegularization.cs b/src/TensorFlowNET.Keras/Layers/Core/ActivityRegularization.cs new file mode 100644 index 00000000..d88d53d5 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/ActivityRegularization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class ActivityRegularization + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Dense.cs b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs similarity index 95% rename from src/TensorFlowNET.Keras/Layers/Dense.cs rename to src/TensorFlowNET.Keras/Layers/Core/Dense.cs index 893dbc10..47ec17cf 100644 --- a/src/TensorFlowNET.Keras/Layers/Dense.cs +++ b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs @@ -23,7 +23,7 @@ using static Tensorflow.Binding; namespace Keras.Layers { - public class Dense : ILayer + public class Dense : Layer { RefVariable W; int units; @@ -37,7 +37,7 @@ namespace Keras.Layers this.units = units; this.name = (string.IsNullOrEmpty(name) || string.IsNullOrWhiteSpace(name))?this.GetType().Name + "_" + this.GetType().GUID:name; } - public ILayer __build__(TensorShape input_shape, int seed = 1, float stddev = -1f) + public Layer __build__(TensorShape input_shape, int seed = 1, float stddev = -1f) { Console.WriteLine("Building Layer \"" + name + "\" ..."); if (stddev == -1) diff --git a/src/TensorFlowNET.Keras/Layers/Core/Dropout.cs b/src/TensorFlowNET.Keras/Layers/Core/Dropout.cs new file mode 100644 index 00000000..c75a9573 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/Dropout.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Dropout + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/Flatten.cs b/src/TensorFlowNET.Keras/Layers/Core/Flatten.cs new file mode 100644 index 00000000..f6e716f4 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/Flatten.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Flatten + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/Lambda.cs b/src/TensorFlowNET.Keras/Layers/Core/Lambda.cs new file mode 100644 index 00000000..d0511b99 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/Lambda.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Lambda + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/Masking.cs b/src/TensorFlowNET.Keras/Layers/Core/Masking.cs new file mode 100644 index 00000000..373d77ee --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/Masking.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Masking + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/Permute.cs b/src/TensorFlowNET.Keras/Layers/Core/Permute.cs new file mode 100644 index 00000000..fa70caad --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/Permute.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Permute + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/RepeatVector.cs b/src/TensorFlowNET.Keras/Layers/Core/RepeatVector.cs new file mode 100644 index 00000000..e1af963c --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/RepeatVector.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class RepeatVector + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/Reshape.cs b/src/TensorFlowNET.Keras/Layers/Core/Reshape.cs new file mode 100644 index 00000000..c0d5c00f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/Reshape.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Reshape + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout1D.cs b/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout1D.cs new file mode 100644 index 00000000..3b3c59de --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class SpatialDropout1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout2D.cs b/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout2D.cs new file mode 100644 index 00000000..639854f4 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class SpatialDropout2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout3D.cs b/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout3D.cs new file mode 100644 index 00000000..b76abc38 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Core/SpatialDropout3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class SpatialDropout3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/CuDNNGRU.cs b/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/CuDNNGRU.cs new file mode 100644 index 00000000..5858b3ec --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/CuDNNGRU.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class CuDNNGRU + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/CuDNNLSTM.cs b/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/CuDNNLSTM.cs new file mode 100644 index 00000000..dc5ff973 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/CuDNNLSTM.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class CuDNNLSTM + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/_CuDNNRNN.cs b/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/_CuDNNRNN.cs new file mode 100644 index 00000000..93d87934 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/CuDnnRecurrent/_CuDNNRNN.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class _CuDNNRNN + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/DenseAttention/AdditiveAttention.cs b/src/TensorFlowNET.Keras/Layers/DenseAttention/AdditiveAttention.cs new file mode 100644 index 00000000..d30a2e79 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/DenseAttention/AdditiveAttention.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class AdditiveAttention + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/DenseAttention/Attention.cs b/src/TensorFlowNET.Keras/Layers/DenseAttention/Attention.cs new file mode 100644 index 00000000..31287bfc --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/DenseAttention/Attention.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Attention + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/DenseAttention/BaseDenseAttention.cs b/src/TensorFlowNET.Keras/Layers/DenseAttention/BaseDenseAttention.cs new file mode 100644 index 00000000..94ec5191 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/DenseAttention/BaseDenseAttention.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class BaseDenseAttention + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/ILayer.cs b/src/TensorFlowNET.Keras/Layers/ILayer.cs deleted file mode 100644 index 9148ca24..00000000 --- a/src/TensorFlowNET.Keras/Layers/ILayer.cs +++ /dev/null @@ -1,12 +0,0 @@ -using Tensorflow; - -namespace Keras.Layers -{ - public interface ILayer - { - TensorShape __shape__(); - ILayer __build__(TensorShape input_shape, int seed = 1, float stddev = -1f); - Tensor __call__(Tensor x); - TensorShape output_shape(TensorShape input_shape); - } -} diff --git a/src/TensorFlowNET.Keras/Layers/Kernelized.cs b/src/TensorFlowNET.Keras/Layers/Kernelized.cs new file mode 100644 index 00000000..94f45d66 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Kernelized.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Kernelized + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Layer.cs b/src/TensorFlowNET.Keras/Layers/Layer.cs new file mode 100644 index 00000000..eb231fad --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Layer.cs @@ -0,0 +1,422 @@ +using NumSharp; +using System; +using System.Collections.Generic; +using Tensorflow; +using Tensorflow.Keras.Constraints; +using Tensorflow.Keras.Initializers; +using Tensorflow.Keras.Losses; +using Tensorflow.Keras.Metrics; +using Tensorflow.Keras.Regularizers; + +namespace Keras.Layers +{ + public abstract class Layer + { + public TF_DataType dtype + { + get + { + throw new NotImplementedException(); + } + } + + public string name + { + get + { + throw new NotImplementedException(); + } + } + + public bool stateful + { + get + { + throw new NotImplementedException(); + } + set + { + throw new NotImplementedException(); + } + } + + public bool trainable + { + get + { + throw new NotImplementedException(); + } + } + + public Regularizer activity_regularizer + { + get + { + throw new NotImplementedException(); + } + set + { + throw new NotImplementedException(); + } + } + + public dynamic input_spec + { + get + { + throw new NotImplementedException(); + } + set + { + throw new NotImplementedException(); + } + } + + public Tensor[] trainable_weights + { + get + { + throw new NotImplementedException(); + } + } + + public Tensor[] non_trainable_weights + { + get + { + throw new NotImplementedException(); + } + } + + private Tensor[] _weights + { + get + { + throw new NotImplementedException(); + } + } + + public Func[] updates + { + get + { + throw new NotImplementedException(); + } + } + + public Tensor[] losses + { + get + { + throw new NotImplementedException(); + } + } + + public Tensor[] metrics + { + get + { + throw new NotImplementedException(); + } + } + + public Tensor[] input_mask + { + get + { + throw new NotImplementedException(); + } + } + + public Tensor[] output_mask + { + get + { + throw new NotImplementedException(); + } + } + + public Tensor[] input + { + get + { + throw new NotImplementedException(); + } + } + + public Tensor[] output + { + get + { + throw new NotImplementedException(); + } + } + + public TensorShape[] input_shape + { + get + { + throw new NotImplementedException(); + } + } + + public TensorShape[] output_shape + { + get + { + throw new NotImplementedException(); + } + } + + public Tensor[] variables + { + get + { + return _weights; + } + } + + public Tensor[] trainable_variables + { + get + { + return trainable_weights; + } + } + + public Tensor[] non_trainable_variables + { + get + { + return non_trainable_weights; + } + } + + private string _compute_dtype + { + get + { + throw new NotImplementedException(); + } + } + + public Layer(bool trainable = true, string name = null, string dtype = null, bool @dynamic = false, Dictionary kwargs = null) + { + + } + + public void build(TensorShape shape) => throw new NotImplementedException(); + + public virtual void call(Tensor[] inputs) => throw new NotImplementedException(); + + public void _add_trackable(dynamic trackable_object, bool trainable) => throw new NotImplementedException(); + + public void add_weight(string name= null, TensorShape shape= null, string dtype= null, Initializer initializer = null, + Regularizer regularizer = null, bool? trainable = null, ConstraintBase constraint = null, + dynamic partitioner= null, bool? use_resource= null, VariableSynchronization synchronization= VariableSynchronization.Auto, + VariableAggregation aggregation= VariableAggregation.None, Dictionary kwargs = null) => throw new NotImplementedException(); + + public virtual Dictionary get_config() => throw new NotImplementedException(); + + public Layer from_config(Dictionary config) => throw new NotImplementedException(); + + public TensorShape compute_output_shape(TensorShape input_shape) => throw new NotImplementedException(); + + public dynamic compute_output_signature(dynamic input_signature) => throw new NotImplementedException(); + + public Tensor[] compute_mask(Tensor[] inputs, Tensor[] mask = null) => throw new NotImplementedException(); + + public void __call__(Tensor[] inputs) => throw new NotImplementedException(); + + public void add_loss(Loss[] losses, Tensor[] inputs = null) => throw new NotImplementedException(); + + public void _clear_losses() => throw new NotImplementedException(); + + public void add_metric(Tensor value, string aggregation= null, string name= null) => throw new NotImplementedException(); + + public void add_update(Func[] updates) => throw new NotImplementedException(); + + public void set_weights(NDArray[] weights) => throw new NotImplementedException(); + + public NDArray[] get_weights() => throw new NotImplementedException(); + + public Func[] get_updates_for(Tensor[] inputs) => throw new NotImplementedException(); + + public Tensor[] get_losses_for(Tensor[] inputs) => throw new NotImplementedException(); + + public Tensor[] get_input_mask_at(int node_index) => throw new NotImplementedException(); + + public Tensor[] get_output_mask_at(int node_index) => throw new NotImplementedException(); + + public TensorShape[] get_input_shape_at(int node_index) => throw new NotImplementedException(); + + public TensorShape[] get_output_shape_at(int node_index) => throw new NotImplementedException(); + + public Tensor[] get_input_at(int node_index) => throw new NotImplementedException(); + + public Tensor[] get_output_at(int node_index) => throw new NotImplementedException(); + + public int count_params() => throw new NotImplementedException(); + + private void _set_dtype_policy(string dtype) => throw new NotImplementedException(); + + private Tensor _maybe_cast_inputs(Tensor inputs) => throw new NotImplementedException(); + + private void _warn_about_input_casting(string input_dtype) => throw new NotImplementedException(); + + private string _name_scope() + { + return name; + } + + private string _obj_reference_counts + { + get + { + throw new NotImplementedException(); + } + } + + private dynamic _attribute_sentinel + { + get + { + throw new NotImplementedException(); + } + } + + private dynamic _call_full_argspec + { + get + { + throw new NotImplementedException(); + } + } + + private string[] _call_fn_args + { + get + { + throw new NotImplementedException(); + } + } + + private string[] _call_accepts_kwargs + { + get + { + throw new NotImplementedException(); + } + } + + private bool _should_compute_mask + { + get + { + throw new NotImplementedException(); + } + } + + private Tensor[] _eager_losses + { + get + { + throw new NotImplementedException(); + } + set + { + throw new NotImplementedException(); + } + } + + private dynamic _trackable_saved_model_saver + { + get + { + throw new NotImplementedException(); + } + } + + private string _object_identifier + { + get + { + throw new NotImplementedException(); + } + } + + private string _tracking_metadata + { + get + { + throw new NotImplementedException(); + } + } + + public Dictionary state + { + get + { + throw new NotImplementedException(); + } + set + { + throw new NotImplementedException(); + } + } + + private void _init_set_name(string name, bool zero_based= true) => throw new NotImplementedException(); + + private Metric _get_existing_metric(string name = null) => throw new NotImplementedException(); + + private void _eager_add_metric(Metric value, string aggregation= null, string name= null) => throw new NotImplementedException(); + + private void _symbolic_add_metric(Metric value, string aggregation = null, string name = null) => throw new NotImplementedException(); + + private void _handle_weight_regularization(string name, VariableV1 variable, Regularizer regularizer) => throw new NotImplementedException(); + + private void _handle_activity_regularization(Tensor[] inputs, Tensor[] outputs) => throw new NotImplementedException(); + + private void _set_mask_metadata(Tensor[] inputs, Tensor[] outputs, Tensor previous_mask) => throw new NotImplementedException(); + + private Tensor[] _collect_input_masks(Tensor[] inputs, Dictionary args, Dictionary kwargs) => throw new NotImplementedException(); + + private bool _call_arg_was_passed(string arg_name, Dictionary args, Dictionary kwargs, bool inputs_in_args= false) => throw new NotImplementedException(); + + private T _get_call_arg_value(string arg_name, Dictionary args, Dictionary kwargs, bool inputs_in_args = false) => throw new NotImplementedException(); + + private (Tensor[], Tensor[]) _set_connectivity_metadata_(Tensor[] inputs, Tensor[] outputs, Dictionary args, Dictionary kwargs) => throw new NotImplementedException(); + + private void _add_inbound_node(Tensor[] input_tensors, Tensor[] output_tensors, Dictionary args = null) => throw new NotImplementedException(); + + private AttrValue _get_node_attribute_at_index(int node_index, string attr, string attr_name) => throw new NotImplementedException(); + + private void _maybe_build(Tensor[] inputs) => throw new NotImplementedException(); + + private void _symbolic_call(Tensor[] inputs) => throw new NotImplementedException(); + + private Dictionary _get_trainable_state() => throw new NotImplementedException(); + + private void _set_trainable_state(bool trainable_state) => throw new NotImplementedException(); + + private void _maybe_create_attribute(string name, object default_value) => throw new NotImplementedException(); + + private void __delattr__(string name) => throw new NotImplementedException(); + + private void __setattr__(string name, object value) => throw new NotImplementedException(); + + private List _gather_children_attribute(string attribute) => throw new NotImplementedException(); + + private List _gather_unique_layers() => throw new NotImplementedException(); + + private List _gather_layers() => throw new NotImplementedException(); + + private bool _is_layer() => throw new NotImplementedException(); + + private void _init_call_fn_args() => throw new NotImplementedException(); + + public dynamic _list_extra_dependencies_for_serialization(dynamic serialization_cache) => throw new NotImplementedException(); + + public dynamic _list_functions_for_serialization(dynamic serialization_cache) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Local/Local.cs b/src/TensorFlowNET.Keras/Layers/Local/Local.cs new file mode 100644 index 00000000..e7920fdd --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Local/Local.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Local + { + + } + + +} diff --git a/src/TensorFlowNET.Keras/Layers/Local/LocallyConnected1D.cs b/src/TensorFlowNET.Keras/Layers/Local/LocallyConnected1D.cs new file mode 100644 index 00000000..aa5eb8c1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Local/LocallyConnected1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class LocallyConnected1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Local/LocallyConnected2D.cs b/src/TensorFlowNET.Keras/Layers/Local/LocallyConnected2D.cs new file mode 100644 index 00000000..0b3cb2fa --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Local/LocallyConnected2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class LocallyConnected2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Add.cs b/src/TensorFlowNET.Keras/Layers/Merge/Add.cs new file mode 100644 index 00000000..c2f7805a --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Add.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Add + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Average.cs b/src/TensorFlowNET.Keras/Layers/Merge/Average.cs new file mode 100644 index 00000000..89f41824 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Average.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Average + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Concatenate.cs b/src/TensorFlowNET.Keras/Layers/Merge/Concatenate.cs new file mode 100644 index 00000000..842f25d4 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Concatenate.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Concatenate + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Dot.cs b/src/TensorFlowNET.Keras/Layers/Merge/Dot.cs new file mode 100644 index 00000000..ac339f67 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Dot.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Dot + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Maximum.cs b/src/TensorFlowNET.Keras/Layers/Merge/Maximum.cs new file mode 100644 index 00000000..862d100f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Maximum.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Maximum + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Merge.cs b/src/TensorFlowNET.Keras/Layers/Merge/Merge.cs new file mode 100644 index 00000000..3e0d80c2 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Merge.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Merge + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Minimum.cs b/src/TensorFlowNET.Keras/Layers/Merge/Minimum.cs new file mode 100644 index 00000000..1030a4aa --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Minimum.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Minimum + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Multiply.cs b/src/TensorFlowNET.Keras/Layers/Merge/Multiply.cs new file mode 100644 index 00000000..21b66d3d --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Multiply.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Multiply + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Merge/Subtract.cs b/src/TensorFlowNET.Keras/Layers/Merge/Subtract.cs new file mode 100644 index 00000000..d0aca561 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Merge/Subtract.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Subtract + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Noise/AlphaDropout.cs b/src/TensorFlowNET.Keras/Layers/Noise/AlphaDropout.cs new file mode 100644 index 00000000..3fe38afc --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Noise/AlphaDropout.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class AlphaDropout + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Noise/GaussianDropout.cs b/src/TensorFlowNET.Keras/Layers/Noise/GaussianDropout.cs new file mode 100644 index 00000000..4a272eb9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Noise/GaussianDropout.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GaussianDropout + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Noise/GaussianNoise.cs b/src/TensorFlowNET.Keras/Layers/Noise/GaussianNoise.cs new file mode 100644 index 00000000..fa944cde --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Noise/GaussianNoise.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GaussianNoise + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs new file mode 100644 index 00000000..4e0b70ea --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class BatchNormalization + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalizationBase.cs b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalizationBase.cs new file mode 100644 index 00000000..82b7764e --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalizationBase.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class BatchNormalizationBase + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalizationV2.cs b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalizationV2.cs new file mode 100644 index 00000000..32eac199 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalizationV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class BatchNormalizationV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs new file mode 100644 index 00000000..ae8b5d0e --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class LayerNormalization + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling1D.cs new file mode 100644 index 00000000..3081a32d --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class AveragePooling1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling2D.cs new file mode 100644 index 00000000..0265353e --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class AveragePooling2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling3D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling3D.cs new file mode 100644 index 00000000..e16f204f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/AveragePooling3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class AveragePooling3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/Embedding.cs b/src/TensorFlowNET.Keras/Layers/Pooling/Embedding.cs new file mode 100644 index 00000000..66937742 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/Embedding.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Embedding + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs new file mode 100644 index 00000000..4ba5b395 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalAveragePooling1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs new file mode 100644 index 00000000..44cad231 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalAveragePooling2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling3D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling3D.cs new file mode 100644 index 00000000..f6fc8572 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalAveragePooling3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs new file mode 100644 index 00000000..0df982b9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalMaxPooling1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs new file mode 100644 index 00000000..1cf9947a --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalMaxPooling2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling3D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling3D.cs new file mode 100644 index 00000000..373b30fb --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalMaxPooling3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling1D.cs new file mode 100644 index 00000000..fc125111 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalPooling1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling2D.cs new file mode 100644 index 00000000..6cc61151 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalPooling2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling3D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling3D.cs new file mode 100644 index 00000000..d4b2533c --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalPooling3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GlobalPooling3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling1D.cs new file mode 100644 index 00000000..6dad38f9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class MaxPooling1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling2D.cs new file mode 100644 index 00000000..886934f8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class MaxPooling2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling3D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling3D.cs new file mode 100644 index 00000000..8660959e --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/MaxPooling3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class MaxPooling3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs new file mode 100644 index 00000000..ddc61f6b --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Pooling1D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs new file mode 100644 index 00000000..47c2c60a --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Pooling2D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling3D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling3D.cs new file mode 100644 index 00000000..610139f7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling3D.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Pooling3D + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Processing/CategoryLookup.cs b/src/TensorFlowNET.Keras/Layers/Processing/CategoryLookup.cs new file mode 100644 index 00000000..6fb1191f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Processing/CategoryLookup.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.Processing +{ + class CategoryLookup + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Processing/ImagePreprocessing.cs b/src/TensorFlowNET.Keras/Layers/Processing/ImagePreprocessing.cs new file mode 100644 index 00000000..debcfe45 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Processing/ImagePreprocessing.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.Processing +{ + class ImagePreprocessing + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Processing/Normalization.cs b/src/TensorFlowNET.Keras/Layers/Processing/Normalization.cs new file mode 100644 index 00000000..07bf2dd6 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Processing/Normalization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.Processing +{ + class Normalization + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Processing/NormalizationV1.cs b/src/TensorFlowNET.Keras/Layers/Processing/NormalizationV1.cs new file mode 100644 index 00000000..0c54ecc9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Processing/NormalizationV1.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.Processing +{ + class NormalizationV1 + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Processing/TextVectorization.cs b/src/TensorFlowNET.Keras/Layers/Processing/TextVectorization.cs new file mode 100644 index 00000000..21b5f334 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Processing/TextVectorization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.Processing +{ + class TextVectorization + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Processing/TextVectorizationV1.cs b/src/TensorFlowNET.Keras/Layers/Processing/TextVectorizationV1.cs new file mode 100644 index 00000000..07fac27c --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Processing/TextVectorizationV1.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers.Processing +{ + class TextVectorizationV1 + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/DeviceWrapper.cs b/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/DeviceWrapper.cs new file mode 100644 index 00000000..2754ba2d --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/DeviceWrapper.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class DeviceWrapper + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/DropoutWrapper.cs b/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/DropoutWrapper.cs new file mode 100644 index 00000000..10f310b1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/DropoutWrapper.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class DropoutWrapper + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/ResidualWrapper.cs b/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/ResidualWrapper.cs new file mode 100644 index 00000000..71d31d17 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/ResidualWrapper.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class ResidualWrapper + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/_RNNCellWrapperV2.cs b/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/_RNNCellWrapperV2.cs new file mode 100644 index 00000000..db920f3b --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/RNNCellWrapper/_RNNCellWrapperV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class _RNNCellWrapperV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/AbstractRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/AbstractRNNCell.cs new file mode 100644 index 00000000..87c2c1b1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/AbstractRNNCell.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class AbstractRNNCell + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/DropoutRNNCellMixin.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/DropoutRNNCellMixin.cs new file mode 100644 index 00000000..7a666b95 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/DropoutRNNCellMixin.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class DropoutRNNCellMixin + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/GRU.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/GRU.cs new file mode 100644 index 00000000..5fe897da --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/GRU.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GRU + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/GRUCell.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/GRUCell.cs new file mode 100644 index 00000000..562b904e --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/GRUCell.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GRUCell + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/GRUCellv2.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/GRUCellv2.cs new file mode 100644 index 00000000..47166e48 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/GRUCellv2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GRUCellv2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/GRUv2.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/GRUv2.cs new file mode 100644 index 00000000..1e218fd7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/GRUv2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class GRUv2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/LSTM.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/LSTM.cs new file mode 100644 index 00000000..6fa6814f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/LSTM.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class LSTM + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMCell.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMCell.cs new file mode 100644 index 00000000..e173281f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMCell.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class LSTMCell + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMCellv2.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMCellv2.cs new file mode 100644 index 00000000..241ed8d1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMCellv2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class LSTMCellv2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMv2.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMv2.cs new file mode 100644 index 00000000..48b4abd7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/LSTMv2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class LSTMv2 + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/PeepholeLSTMCell.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/PeepholeLSTMCell.cs new file mode 100644 index 00000000..b38d1d3c --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/PeepholeLSTMCell.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class PeepholeLSTMCell + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/RNN.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/RNN.cs new file mode 100644 index 00000000..b5ebc14d --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/RNN.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class RNN + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/SimpleRNN.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/SimpleRNN.cs new file mode 100644 index 00000000..431049db --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/SimpleRNN.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class SimpleRNN + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/SimpleRNNCell.cs new file mode 100644 index 00000000..0b7fe9e3 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/SimpleRNNCell.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class SimpleRNNCell + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Recurrent/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Recurrent/StackedRNNCells.cs new file mode 100644 index 00000000..e609c3f4 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Recurrent/StackedRNNCells.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class StackedRNNCells + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Wrapper/Bidirectional.cs b/src/TensorFlowNET.Keras/Layers/Wrapper/Bidirectional.cs new file mode 100644 index 00000000..d60f8f6f --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Wrapper/Bidirectional.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Bidirectional + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Wrapper/Serialization.cs b/src/TensorFlowNET.Keras/Layers/Wrapper/Serialization.cs new file mode 100644 index 00000000..8bae368e --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Wrapper/Serialization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Serialization + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Wrapper/TimeDistributed.cs b/src/TensorFlowNET.Keras/Layers/Wrapper/TimeDistributed.cs new file mode 100644 index 00000000..07ff1f6e --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Wrapper/TimeDistributed.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class TimeDistributed + { + } +} diff --git a/src/TensorFlowNET.Keras/Layers/Wrapper/Wrapper.cs b/src/TensorFlowNET.Keras/Layers/Wrapper/Wrapper.cs new file mode 100644 index 00000000..9b330b33 --- /dev/null +++ b/src/TensorFlowNET.Keras/Layers/Wrapper/Wrapper.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Layers +{ + class Wrapper + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/BinaryCrossentropy.cs b/src/TensorFlowNET.Keras/Losses/BinaryCrossentropy.cs new file mode 100644 index 00000000..20eb319e --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/BinaryCrossentropy.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class BinaryCrossentropy + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/CategoricalCrossentropy.cs b/src/TensorFlowNET.Keras/Losses/CategoricalCrossentropy.cs new file mode 100644 index 00000000..2afbb862 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/CategoricalCrossentropy.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class CategoricalCrossentropy + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/CategoricalHinge.cs b/src/TensorFlowNET.Keras/Losses/CategoricalHinge.cs new file mode 100644 index 00000000..e93934a2 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/CategoricalHinge.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class CategoricalHinge + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/CosineSimilarity.cs b/src/TensorFlowNET.Keras/Losses/CosineSimilarity.cs new file mode 100644 index 00000000..6411d34e --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/CosineSimilarity.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class CosineSimilarity + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/Hinge.cs b/src/TensorFlowNET.Keras/Losses/Hinge.cs new file mode 100644 index 00000000..88f90ef0 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/Hinge.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class Hinge + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/Huber.cs b/src/TensorFlowNET.Keras/Losses/Huber.cs new file mode 100644 index 00000000..54fa95cd --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/Huber.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class Huber + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/KLDivergence.cs b/src/TensorFlowNET.Keras/Losses/KLDivergence.cs new file mode 100644 index 00000000..7cda8b66 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/KLDivergence.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class KLDivergence + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/LogCosh.cs b/src/TensorFlowNET.Keras/Losses/LogCosh.cs new file mode 100644 index 00000000..0aa52e16 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/LogCosh.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class LogCosh + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/Loss.cs b/src/TensorFlowNET.Keras/Losses/Loss.cs new file mode 100644 index 00000000..8acee5ba --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/Loss.cs @@ -0,0 +1,41 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + public abstract class Loss + { + public static Tensor mean_squared_error(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor mean_absolute_error(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor mean_absolute_percentage_error(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor mean_squared_logarithmic_error(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor _maybe_convert_labels(Tensor y_true) => throw new NotImplementedException(); + + public static Tensor squared_hinge(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor hinge(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor categorical_hinge(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor huber_loss(Tensor y_true, Tensor y_pred, float delta = 1) => throw new NotImplementedException(); + + public static Tensor logcosh(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor categorical_crossentropy(Tensor y_true, Tensor y_pred, bool from_logits = false, float label_smoothing = 0) => throw new NotImplementedException(); + + public static Tensor sparse_categorical_crossentropy(Tensor y_true, Tensor y_pred, bool from_logits = false, float axis = -1) => throw new NotImplementedException(); + + public static Tensor binary_crossentropy(Tensor y_true, Tensor y_pred, bool from_logits = false, float label_smoothing = 0) => throw new NotImplementedException(); + + public static Tensor kullback_leibler_divergence(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor poisson(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor cosine_similarity(Tensor y_true, Tensor y_pred, int axis = -1) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Losses/LossFunctionWrapper.cs b/src/TensorFlowNET.Keras/Losses/LossFunctionWrapper.cs new file mode 100644 index 00000000..666760df --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/LossFunctionWrapper.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class LossFunctionWrapper + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs b/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs new file mode 100644 index 00000000..dbdbd790 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class MeanAbsoluteError + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs b/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs new file mode 100644 index 00000000..cff3e683 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class MeanAbsolutePercentageError + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs b/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs new file mode 100644 index 00000000..a76ae4cc --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class MeanSquaredError + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs b/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs new file mode 100644 index 00000000..d3b6c36c --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class MeanSquaredLogarithmicError + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/Poisson.cs b/src/TensorFlowNET.Keras/Losses/Poisson.cs new file mode 100644 index 00000000..254f9949 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/Poisson.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class Poisson + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs b/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs new file mode 100644 index 00000000..00964a89 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class SparseCategoricalCrossentropy + { + } +} diff --git a/src/TensorFlowNET.Keras/Losses/SquaredHinge.cs b/src/TensorFlowNET.Keras/Losses/SquaredHinge.cs new file mode 100644 index 00000000..60d83ef0 --- /dev/null +++ b/src/TensorFlowNET.Keras/Losses/SquaredHinge.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Losses +{ + class SquaredHinge + { + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/AUC.cs b/src/TensorFlowNET.Keras/Metrics/AUC.cs new file mode 100644 index 00000000..c34f61c8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/AUC.cs @@ -0,0 +1,41 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class AUC : Metric + { + public AUC(int num_thresholds= 200, string curve= "ROC", string summation_method= "interpolation", + string name= null, string dtype= null, float thresholds= 0.5f, + bool multi_label= false, Tensor label_weights= null) : base(name, dtype) + { + throw new NotImplementedException(); + } + + private void _build(TensorShape shape) => throw new NotImplementedException(); + + public Tensor interpolate_pr_auc() => throw new NotImplementedException(); + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + + public override void reset_states() + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Accuracy.cs b/src/TensorFlowNET.Keras/Metrics/Accuracy.cs new file mode 100644 index 00000000..cb58ae91 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Accuracy.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class Accuracy : MeanMetricWrapper + { + public Accuracy(string name = "accuracy", string dtype = null) + : base(Metric.accuracy, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/BinaryAccuracy.cs b/src/TensorFlowNET.Keras/Metrics/BinaryAccuracy.cs new file mode 100644 index 00000000..682ed236 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/BinaryAccuracy.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class BinaryAccuracy : MeanMetricWrapper + { + public BinaryAccuracy(string name = "binary_accuracy", string dtype = null, float threshold = 0.5f) + : base(Fn, name, dtype) + { + } + + internal static Tensor Fn(Tensor y_true, Tensor y_pred) + { + return Metric.binary_accuracy(y_true, y_pred); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/BinaryCrossentropy.cs b/src/TensorFlowNET.Keras/Metrics/BinaryCrossentropy.cs new file mode 100644 index 00000000..14ef73b9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/BinaryCrossentropy.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class BinaryCrossentropy : MeanMetricWrapper + { + public BinaryCrossentropy(string name = "binary_crossentropy", string dtype = null, bool from_logits = false, float label_smoothing = 0) + : base(Fn, name, dtype) + { + } + + internal static Tensor Fn(Tensor y_true, Tensor y_pred) + { + return Losses.Loss.binary_crossentropy(y_true, y_pred); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/CategoricalAccuracy.cs b/src/TensorFlowNET.Keras/Metrics/CategoricalAccuracy.cs new file mode 100644 index 00000000..64b31f64 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/CategoricalAccuracy.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class CategoricalAccuracy : MeanMetricWrapper + { + public CategoricalAccuracy(string name = "categorical_accuracy", string dtype = null) + : base(Metric.categorical_accuracy, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/CategoricalCrossentropy.cs b/src/TensorFlowNET.Keras/Metrics/CategoricalCrossentropy.cs new file mode 100644 index 00000000..c83bb5d5 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/CategoricalCrossentropy.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class CategoricalCrossentropy : MeanMetricWrapper + { + public CategoricalCrossentropy(string name = "categorical_crossentropy", string dtype = null, bool from_logits = false, float label_smoothing = 0) + : base(Fn, name, dtype) + { + } + + internal static Tensor Fn(Tensor y_true, Tensor y_pred) + { + return Losses.Loss.categorical_crossentropy(y_true, y_pred); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/CategoricalHinge.cs b/src/TensorFlowNET.Keras/Metrics/CategoricalHinge.cs new file mode 100644 index 00000000..1f82d725 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/CategoricalHinge.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class CategoricalHinge : MeanMetricWrapper + { + public CategoricalHinge(string name = "categorical_hinge", string dtype = null) + : base(Losses.Loss.categorical_hinge, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/CosineSimilarity.cs b/src/TensorFlowNET.Keras/Metrics/CosineSimilarity.cs new file mode 100644 index 00000000..abce27c8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/CosineSimilarity.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class CosineSimilarity : MeanMetricWrapper + { + public CosineSimilarity(string name = "cosine_similarity", string dtype = null, int axis = -1) + : base(Fn, name, dtype) + { + } + + internal static Tensor Fn(Tensor y_true, Tensor y_pred) + { + return Metric.cosine_proximity(y_true, y_pred); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/FalseNegatives.cs b/src/TensorFlowNET.Keras/Metrics/FalseNegatives.cs new file mode 100644 index 00000000..fb27484e --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/FalseNegatives.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class FalseNegatives : _ConfusionMatrixConditionCount + { + public FalseNegatives(float thresholds = 0.5F, string name = null, string dtype = null) + : base(Utils.MetricsUtils.ConfusionMatrix.FALSE_NEGATIVES, thresholds, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/FalsePositives.cs b/src/TensorFlowNET.Keras/Metrics/FalsePositives.cs new file mode 100644 index 00000000..1b97e556 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/FalsePositives.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class FalsePositives : _ConfusionMatrixConditionCount + { + public FalsePositives(float thresholds = 0.5F, string name = null, string dtype = null) + : base(Utils.MetricsUtils.ConfusionMatrix.FALSE_POSITIVES, thresholds, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Hinge.cs b/src/TensorFlowNET.Keras/Metrics/Hinge.cs new file mode 100644 index 00000000..21ebe067 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Hinge.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class Hinge : MeanMetricWrapper + { + public Hinge(string name = "hinge", string dtype = null) + : base(Losses.Loss.hinge, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/KLDivergence.cs b/src/TensorFlowNET.Keras/Metrics/KLDivergence.cs new file mode 100644 index 00000000..814b14ce --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/KLDivergence.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class KLDivergence : MeanMetricWrapper + { + public KLDivergence(string name = "kullback_leibler_divergence", string dtype = null) + : base(Losses.Loss.logcosh, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/LogCoshError.cs b/src/TensorFlowNET.Keras/Metrics/LogCoshError.cs new file mode 100644 index 00000000..595f4aa7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/LogCoshError.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class LogCoshError : MeanMetricWrapper + { + public LogCoshError(string name = "logcosh", string dtype = null) + : base(Losses.Loss.logcosh, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Mean.cs b/src/TensorFlowNET.Keras/Metrics/Mean.cs new file mode 100644 index 00000000..64b8b5db --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Mean.cs @@ -0,0 +1,15 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class Mean : Reduce + { + public Mean(string name, string dtype = null) + : base(Reduction.MEAN, name, dtype) + { + } + + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/MeanAbsoluteError.cs b/src/TensorFlowNET.Keras/Metrics/MeanAbsoluteError.cs new file mode 100644 index 00000000..c326a6dd --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/MeanAbsoluteError.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class MeanAbsoluteError : MeanMetricWrapper + { + public MeanAbsoluteError(string name = "mean_absolute_error", string dtype = null) + : base(Losses.Loss.mean_absolute_error, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/MeanAbsolutePercentageError.cs b/src/TensorFlowNET.Keras/Metrics/MeanAbsolutePercentageError.cs new file mode 100644 index 00000000..0c51a5be --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/MeanAbsolutePercentageError.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class MeanAbsolutePercentageError : MeanMetricWrapper + { + public MeanAbsolutePercentageError(string name = "mean_absolute_percentage_error", string dtype = null) + : base(Losses.Loss.mean_absolute_percentage_error, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/MeanIoU.cs b/src/TensorFlowNET.Keras/Metrics/MeanIoU.cs new file mode 100644 index 00000000..d8975218 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/MeanIoU.cs @@ -0,0 +1,34 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class MeanIoU : Metric + { + public MeanIoU(int num_classes, string name, string dtype) : base(name, dtype) + { + } + + public override void reset_states() + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/MeanMetricWrapper.cs b/src/TensorFlowNET.Keras/Metrics/MeanMetricWrapper.cs new file mode 100644 index 00000000..ccc7922b --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/MeanMetricWrapper.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class MeanMetricWrapper : Mean + { + public MeanMetricWrapper(Func fn, string name, string dtype = null) : base(name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/MeanRelativeError.cs b/src/TensorFlowNET.Keras/Metrics/MeanRelativeError.cs new file mode 100644 index 00000000..9ae76a6a --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/MeanRelativeError.cs @@ -0,0 +1,30 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class MeanRelativeError : Metric + { + public MeanRelativeError(Tensor normalizer, string name, string dtype) : base(name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/MeanSquaredError.cs b/src/TensorFlowNET.Keras/Metrics/MeanSquaredError.cs new file mode 100644 index 00000000..e23b0f41 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/MeanSquaredError.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class MeanSquaredError : MeanMetricWrapper + { + public MeanSquaredError(string name = "mean_squared_error", string dtype = null) + : base(Losses.Loss.mean_squared_error, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/MeanSquaredLogarithmicError.cs b/src/TensorFlowNET.Keras/Metrics/MeanSquaredLogarithmicError.cs new file mode 100644 index 00000000..9f56b9d8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/MeanSquaredLogarithmicError.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class MeanSquaredLogarithmicError : MeanMetricWrapper + { + public MeanSquaredLogarithmicError(string name = "mean_squared_logarithmic_error", string dtype = null) + : base(Losses.Loss.mean_squared_logarithmic_error, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/MeanTensor.cs b/src/TensorFlowNET.Keras/Metrics/MeanTensor.cs new file mode 100644 index 00000000..114329b1 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/MeanTensor.cs @@ -0,0 +1,47 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class MeanTensor : Metric + { + public int total + { + get + { + throw new NotImplementedException(); + } + } + + public int count + { + get + { + throw new NotImplementedException(); + } + } + + public MeanTensor(int num_classes, string name = "mean_tensor", string dtype = null) : base(name, dtype) + { + } + + + private void _build(TensorShape shape) => throw new NotImplementedException(); + + public override void reset_states() + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Metric.cs b/src/TensorFlowNET.Keras/Metrics/Metric.cs new file mode 100644 index 00000000..10a3676b --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Metric.cs @@ -0,0 +1,63 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public abstract class Metric : Layers.Layer + { + public string dtype + { + get + { + throw new NotImplementedException(); + } + } + + public Metric(string name, string dtype) + { + throw new NotImplementedException(); + } + + public void __new__ (Metric cls, Args args, KwArgs kwargs) => throw new NotImplementedException(); + + public Tensor __call__(Metric cls, Args args, KwArgs kwargs) => throw new NotImplementedException(); + + public virtual Hashtable get_config() => throw new NotImplementedException(); + + public virtual void reset_states() => throw new NotImplementedException(); + + public abstract void update_state(Args args, KwArgs kwargs); + + public abstract Tensor result(); + + public void add_weight(string name, TensorShape shape= null, VariableAggregation aggregation= VariableAggregation.Sum, + VariableSynchronization synchronization = VariableSynchronization.OnRead, Initializers.Initializer initializer= null, + string dtype= null) => throw new NotImplementedException(); + + public static Tensor accuracy(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor binary_accuracy(Tensor y_true, Tensor y_pred, float threshold = 0.5f) => throw new NotImplementedException(); + + public static Tensor categorical_accuracy(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor sparse_categorical_accuracy(Tensor y_true, Tensor y_pred) => throw new NotImplementedException(); + + public static Tensor top_k_categorical_accuracy(Tensor y_true, Tensor y_pred, int k = 5) => throw new NotImplementedException(); + + public static Tensor sparse_top_k_categorical_accuracy(Tensor y_true, Tensor y_pred, int k = 5) => throw new NotImplementedException(); + + public static Tensor cosine_proximity(Tensor y_true, Tensor y_pred, int axis = -1) => throw new NotImplementedException(); + + public static Metric clone_metric(Metric metric) => throw new NotImplementedException(); + + public static Metric[] clone_metrics(Metric[] metric) => throw new NotImplementedException(); + + public static string serialize(Metric metric) => throw new NotImplementedException(); + + public static Metric deserialize(string config, object custom_objects = null) => throw new NotImplementedException(); + + public static Metric get(object identifier) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Poisson.cs b/src/TensorFlowNET.Keras/Metrics/Poisson.cs new file mode 100644 index 00000000..7cdf5bd9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Poisson.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class Poisson : MeanMetricWrapper + { + public Poisson(string name = "logcosh", string dtype = null) + : base(Losses.Loss.logcosh, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Precision.cs b/src/TensorFlowNET.Keras/Metrics/Precision.cs new file mode 100644 index 00000000..3d5c7248 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Precision.cs @@ -0,0 +1,41 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class Precision : Metric + { + public Precision(float? thresholds = null, int? top_k = null, int? class_id = null, string name = null, string dtype = null) : base(name, dtype) + { + throw new NotImplementedException(); + } + + public Precision(float[] thresholds = null, int? top_k = null, int? class_id = null, string name = null, string dtype = null) : base(name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + + public override void reset_states() + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/PrecisionAtRecall.cs b/src/TensorFlowNET.Keras/Metrics/PrecisionAtRecall.cs new file mode 100644 index 00000000..05558232 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/PrecisionAtRecall.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class PrecisionAtRecall : SensitivitySpecificityBase + { + public PrecisionAtRecall(float recall, int num_thresholds = 200, string name = null, string dtype = null) : base(recall, num_thresholds, name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Recall.cs b/src/TensorFlowNET.Keras/Metrics/Recall.cs new file mode 100644 index 00000000..804d4461 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Recall.cs @@ -0,0 +1,41 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class Recall : Metric + { + public Recall(float? thresholds = null, int? top_k = null, int? class_id = null, string name = null, string dtype = null) : base(name, dtype) + { + throw new NotImplementedException(); + } + + public Recall(float[] thresholds = null, int? top_k = null, int? class_id = null, string name = null, string dtype = null) : base(name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + + public override void reset_states() + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Reduce.cs b/src/TensorFlowNET.Keras/Metrics/Reduce.cs new file mode 100644 index 00000000..143f441e --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Reduce.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class Reduce : Metric + { + public Reduce(string reduction, string name, string dtype= null) + : base(name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/RootMeanSquaredError.cs b/src/TensorFlowNET.Keras/Metrics/RootMeanSquaredError.cs new file mode 100644 index 00000000..cd7a6968 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/RootMeanSquaredError.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class RootMeanSquaredError : Mean + { + public RootMeanSquaredError(string name = "root_mean_squared_error", string dtype = null) + : base(name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SensitivityAtSpecificity.cs b/src/TensorFlowNET.Keras/Metrics/SensitivityAtSpecificity.cs new file mode 100644 index 00000000..72793d79 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SensitivityAtSpecificity.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class SensitivityAtSpecificity : SensitivitySpecificityBase + { + public SensitivityAtSpecificity(float specificity, int num_thresholds = 200, string name = null, string dtype = null) : base(specificity, num_thresholds, name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SensitivitySpecificityBase.cs b/src/TensorFlowNET.Keras/Metrics/SensitivitySpecificityBase.cs new file mode 100644 index 00000000..7531cdbb --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SensitivitySpecificityBase.cs @@ -0,0 +1,29 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class SensitivitySpecificityBase : Metric + { + public SensitivitySpecificityBase(float value, int num_thresholds= 200, string name = null, string dtype = null) : base(name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + + public override void reset_states() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SparseCategoricalAccuracy.cs b/src/TensorFlowNET.Keras/Metrics/SparseCategoricalAccuracy.cs new file mode 100644 index 00000000..5a57907d --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SparseCategoricalAccuracy.cs @@ -0,0 +1,15 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class SparseCategoricalAccuracy : MeanMetricWrapper + { + public SparseCategoricalAccuracy(string name = "sparse_categorical_accuracy", string dtype = null) + : base(Metric.sparse_categorical_accuracy, name, dtype) + { + } + + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SparseCategoricalCrossentropy.cs b/src/TensorFlowNET.Keras/Metrics/SparseCategoricalCrossentropy.cs new file mode 100644 index 00000000..b2513fd8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SparseCategoricalCrossentropy.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class SparseCategoricalCrossentropy : MeanMetricWrapper + { + public SparseCategoricalCrossentropy(string name = "sparse_categorical_crossentropy", string dtype = null, bool from_logits = false, int axis = -1) + : base(Fn, name, dtype) + { + } + + internal static Tensor Fn(Tensor y_true, Tensor y_pred) + { + return Losses.Loss.sparse_categorical_crossentropy(y_true, y_pred); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SparseTopKCategoricalAccuracy.cs b/src/TensorFlowNET.Keras/Metrics/SparseTopKCategoricalAccuracy.cs new file mode 100644 index 00000000..b02049ad --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SparseTopKCategoricalAccuracy.cs @@ -0,0 +1,20 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class SparseTopKCategoricalAccuracy : MeanMetricWrapper + { + public SparseTopKCategoricalAccuracy(int k = 5, string name = "sparse_top_k_categorical_accuracy", string dtype = null) + : base(Fn, name, dtype) + { + + } + + internal static Tensor Fn(Tensor y_true, Tensor y_pred) + { + return Metric.sparse_top_k_categorical_accuracy(y_true, y_pred); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SpecificityAtSensitivity.cs b/src/TensorFlowNET.Keras/Metrics/SpecificityAtSensitivity.cs new file mode 100644 index 00000000..8742e548 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SpecificityAtSensitivity.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + class SpecificityAtSensitivity + { + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SquaredHinge.cs b/src/TensorFlowNET.Keras/Metrics/SquaredHinge.cs new file mode 100644 index 00000000..04a7bef8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SquaredHinge.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class SquaredHinge : MeanMetricWrapper + { + public SquaredHinge(string name = "squared_hinge", string dtype = null) + : base(Losses.Loss.squared_hinge, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/Sum.cs b/src/TensorFlowNET.Keras/Metrics/Sum.cs new file mode 100644 index 00000000..f466a136 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/Sum.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class Sum : Reduce + { + public Sum(string name, string dtype = null) + : base(Reduction.SUM, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SumOverBatchSize.cs b/src/TensorFlowNET.Keras/Metrics/SumOverBatchSize.cs new file mode 100644 index 00000000..d25654c5 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SumOverBatchSize.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class SumOverBatchSize : Reduce + { + public SumOverBatchSize(string name = "sum_over_batch_size", string dtype = null) : base(Reduction.SUM_OVER_BATCH_SIZE, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/SumOverBatchSizeMetricWrapper.cs b/src/TensorFlowNET.Keras/Metrics/SumOverBatchSizeMetricWrapper.cs new file mode 100644 index 00000000..ff1c0497 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/SumOverBatchSizeMetricWrapper.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class SumOverBatchSizeMetricWrapper : SumOverBatchSize + { + public SumOverBatchSizeMetricWrapper(Func fn, string name, string dtype = null) + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/TopKCategoricalAccuracy.cs b/src/TensorFlowNET.Keras/Metrics/TopKCategoricalAccuracy.cs new file mode 100644 index 00000000..e2c80fad --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/TopKCategoricalAccuracy.cs @@ -0,0 +1,19 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class TopKCategoricalAccuracy : MeanMetricWrapper + { + public TopKCategoricalAccuracy(int k = 5, string name = "top_k_categorical_accuracy", string dtype = null) + : base(Fn, name, dtype) + { + } + + internal static Tensor Fn(Tensor y_true, Tensor y_pred) + { + return Metric.top_k_categorical_accuracy(y_true, y_pred); + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/TrueNegatives.cs b/src/TensorFlowNET.Keras/Metrics/TrueNegatives.cs new file mode 100644 index 00000000..7e81a2fd --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/TrueNegatives.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class TrueNegatives : _ConfusionMatrixConditionCount + { + public TrueNegatives(float thresholds = 0.5F, string name = null, string dtype = null) + : base(Utils.MetricsUtils.ConfusionMatrix.TRUE_NEGATIVES, thresholds, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/TruePositives.cs b/src/TensorFlowNET.Keras/Metrics/TruePositives.cs new file mode 100644 index 00000000..867049be --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/TruePositives.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Metrics +{ + public class TruePositives : _ConfusionMatrixConditionCount + { + public TruePositives(float thresholds = 0.5F, string name = null, string dtype = null) + : base(Utils.MetricsUtils.ConfusionMatrix.TRUE_POSITIVES, thresholds, name, dtype) + { + } + } +} diff --git a/src/TensorFlowNET.Keras/Metrics/_ConfusionMatrixConditionCount.cs b/src/TensorFlowNET.Keras/Metrics/_ConfusionMatrixConditionCount.cs new file mode 100644 index 00000000..3d2be961 --- /dev/null +++ b/src/TensorFlowNET.Keras/Metrics/_ConfusionMatrixConditionCount.cs @@ -0,0 +1,37 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; +using static Tensorflow.Keras.Utils.MetricsUtils; + +namespace Tensorflow.Keras.Metrics +{ + public class _ConfusionMatrixConditionCount : Metric + { + public _ConfusionMatrixConditionCount(string confusion_matrix_cond, float thresholds= 0.5f, string name= null, string dtype= null) + : base(name, dtype) + { + throw new NotImplementedException(); + } + + public override Tensor result() + { + throw new NotImplementedException(); + } + + public override void update_state(Args args, KwArgs kwargs) + { + throw new NotImplementedException(); + } + + public override void reset_states() + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Model.cs b/src/TensorFlowNET.Keras/Model.cs index 034c7f64..0eb7fbce 100644 --- a/src/TensorFlowNET.Keras/Model.cs +++ b/src/TensorFlowNET.Keras/Model.cs @@ -21,25 +21,25 @@ using System.Collections.Generic; using Tensorflow; using static Tensorflow.Binding; -namespace Keras +namespace Tensorflow.Keras { public class Model { public Tensor Flow; - List layer_stack; + List layer_stack; public TensorShape InputShape; public Model() { - layer_stack = new List(); + layer_stack = new List(); } - public Model Add(ILayer layer) + public Model Add(Layer layer) { layer_stack.Add(layer); return this; } - public Model Add(IEnumerable layers) + public Model Add(IEnumerable layers) { layer_stack.AddRange(layers); return this; @@ -83,9 +83,9 @@ namespace Keras Flow = features; for (int i = 0; i < layer_stack.Count; i++) { - layer_stack[i].__build__(flow_shape); - flow_shape = layer_stack[i].output_shape(flow_shape); - Flow = layer_stack[i].__call__(Flow); + //layer_stack[i].build(flow_shape); + //flow_shape = layer_stack[i].output_shape(flow_shape); + //Flow = layer_stack[i].__call__(Flow); } var predictions = tf.sigmoid(tf.squeeze(Flow)); diff --git a/src/TensorFlowNET.Keras/Models.cs b/src/TensorFlowNET.Keras/Models.cs new file mode 100644 index 00000000..0ee59976 --- /dev/null +++ b/src/TensorFlowNET.Keras/Models.cs @@ -0,0 +1,42 @@ +using Keras.Layers; +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; +using Tensorflow.Keras.Engine; + +namespace Tensorflow.Keras +{ + class Models + { + public class Model : Keras.Engine.Training.Model{} + + public static Layer share_weights(Layer layer) => throw new NotImplementedException(); + + private static Layer _clone_layer(Layer layer) => throw new NotImplementedException(); + + private static Layer _insert_ancillary_layers(Model model, Layer ancillary_layers, string[] metrics_names, Node[] new_nodes) => throw new NotImplementedException(); + + private static Node[] _make_new_nodes(Node[] nodes_by_depth, Func layer_fn, Hashtable layer_map, Hashtable tensor_map) => throw new NotImplementedException(); + + private static Model _clone_functional_model(Model model, Tensor[] input_tensors = null, Func layer_fn = null) => throw new NotImplementedException(); + + private static (Hashtable, Layer[]) _clone_layers_and_model_config(Model model, Layer[] input_layers, Func layer_fn) => throw new NotImplementedException(); + + private static (Layer[], Layer[]) _remove_ancillary_layers(Model model, Hashtable layer_map, Layer[] layers) => throw new NotImplementedException(); + + private static Sequential _clone_sequential_model(Model model, Tensor[] input_tensors = null, Func layer_fn = null) => throw new NotImplementedException(); + + public static Model clone_model(Model model, Tensor[] input_tensors = null, Func layer_fn = null) => throw new NotImplementedException(); + + private static void _in_place_subclassed_model_reset(Model model) => throw new NotImplementedException(); + + private static void _reset_build_compile_trackers(Model model) => throw new NotImplementedException(); + + public static void in_place_subclassed_model_state_restoration(Model model) => throw new NotImplementedException(); + + public static void clone_and_build_model(Model model, Tensor[] input_tensors= null, Tensor[] target_tensors= null, object custom_objects= null, + bool compile_clone= true, bool in_place_reset= false, VariableV1 optimizer_iterations= null, Hashtable optimizer_config= null) + => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Optimizer/Adadelta.cs b/src/TensorFlowNET.Keras/Optimizer/Adadelta.cs new file mode 100644 index 00000000..e5d72976 --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizer/Adadelta.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class Adadelta : Optimizer + { + public Adadelta(float lr= 0.01f, float rho = 0.95f, float? epsilon = null, float decay = 0) : base(null) + { + throw new NotImplementedException(); + } + + public override Tensor[] get_updates(Tensor loss, variables @params) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Optimizer/Adagrad.cs b/src/TensorFlowNET.Keras/Optimizer/Adagrad.cs new file mode 100644 index 00000000..4353d79b --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizer/Adagrad.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class Adagrad : Optimizer + { + public Adagrad(float lr= 0.01f, float? epsilon = null, float decay = 0) : base(null) + { + throw new NotImplementedException(); + } + + public override Tensor[] get_updates(Tensor loss, variables @params) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Optimizer/Adam.cs b/src/TensorFlowNET.Keras/Optimizer/Adam.cs new file mode 100644 index 00000000..15053284 --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizer/Adam.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class Adam : Optimizer + { + public Adam(float lr= 0.001f, float beta_1 = 0.9f, float beta_2 = 0.99f, float? epsilon = null, float decay = 0) : base(null) + { + throw new NotImplementedException(); + } + + public override Tensor[] get_updates(Tensor loss, variables @params) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Optimizer/Adamax.cs b/src/TensorFlowNET.Keras/Optimizer/Adamax.cs new file mode 100644 index 00000000..9581c6dc --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizer/Adamax.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class Adamax : Optimizer + { + public Adamax(float lr = 0.002f, float beta_1 = 0.9f, float beta_2 = 0.999f, float? epsilon = null, float decay = 0) : base(null) + { + throw new NotImplementedException(); + } + + public override Tensor[] get_updates(Tensor loss, variables @params) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Optimizer/Nadam.cs b/src/TensorFlowNET.Keras/Optimizer/Nadam.cs new file mode 100644 index 00000000..b933570f --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizer/Nadam.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class Nadam : Optimizer + { + public Nadam(float lr = 0.002f, float beta_1 = 0.9f, float beta_2 = 0.999f, float? epsilon = null, float schedule_decay = 0.004f) : base(null) + { + throw new NotImplementedException(); + } + + public override Tensor[] get_updates(Tensor loss, variables @params) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Optimizer/Optimizer.cs b/src/TensorFlowNET.Keras/Optimizer/Optimizer.cs new file mode 100644 index 00000000..ec8bd68a --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizer/Optimizer.cs @@ -0,0 +1,36 @@ +using NumSharp; +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class Optimizer + { + public Optimizer(KwArgs kwargs) + { + throw new NotImplementedException(); + } + + public virtual Tensor[] get_updates(Tensor loss, variables @params) + { + return null; + } + + public virtual Tensor[] get_gradients(Tensor loss, variables @params) => throw new NotImplementedException(); + + public virtual void set_weights(NDArray[] weights) => throw new NotImplementedException(); + + public virtual NDArray[] get_weights() => throw new NotImplementedException(); + + public virtual Hashtable get_config() => throw new NotImplementedException(); + + public static string serialize(Optimizer optimizer) => throw new NotImplementedException(); + + public static Optimizer deserialize(string config, object custom_objects = null) => throw new NotImplementedException(); + + public static Optimizer get(object identifier) => throw new NotImplementedException(); + + } +} diff --git a/src/TensorFlowNET.Keras/Optimizer/RMSprop.cs b/src/TensorFlowNET.Keras/Optimizer/RMSprop.cs new file mode 100644 index 00000000..79894831 --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizer/RMSprop.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class RMSprop : Optimizer + { + public RMSprop(float lr= 0.01f, float rho = 0f, float? epsilon = null, float decay = 0) : base(null) + { + throw new NotImplementedException(); + } + + public override Tensor[] get_updates(Tensor loss, variables @params) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Optimizer/SGD.cs b/src/TensorFlowNET.Keras/Optimizer/SGD.cs new file mode 100644 index 00000000..17063c54 --- /dev/null +++ b/src/TensorFlowNET.Keras/Optimizer/SGD.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras +{ + public class SGD : Optimizer + { + public SGD(float lr= 0.01f, float momentum= 0, float decay= 0, bool nesterov= false) : base(null) + { + throw new NotImplementedException(); + } + + public override Tensor[] get_updates(Tensor loss, variables @params) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/Adadelta.cs b/src/TensorFlowNET.Keras/OptimizersV2/Adadelta.cs new file mode 100644 index 00000000..1ba244da --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/Adadelta.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class Adadelta + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/Adagrad.cs b/src/TensorFlowNET.Keras/OptimizersV2/Adagrad.cs new file mode 100644 index 00000000..9781c898 --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/Adagrad.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class Adagrad + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/Adam.cs b/src/TensorFlowNET.Keras/OptimizersV2/Adam.cs new file mode 100644 index 00000000..7e08d517 --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/Adam.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class Adam + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/Adamax.cs b/src/TensorFlowNET.Keras/OptimizersV2/Adamax.cs new file mode 100644 index 00000000..73f37ad9 --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/Adamax.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class Adamax + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/Ftrl.cs b/src/TensorFlowNET.Keras/OptimizersV2/Ftrl.cs new file mode 100644 index 00000000..758698a8 --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/Ftrl.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class Ftrl + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/LearningRateSchedule.cs b/src/TensorFlowNET.Keras/OptimizersV2/LearningRateSchedule.cs new file mode 100644 index 00000000..2dd3df40 --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/LearningRateSchedule.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class LearningRateSchedule + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/Nadam.cs b/src/TensorFlowNET.Keras/OptimizersV2/Nadam.cs new file mode 100644 index 00000000..ec247c41 --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/Nadam.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class Nadam + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/OptimizerV2.cs b/src/TensorFlowNET.Keras/OptimizersV2/OptimizerV2.cs new file mode 100644 index 00000000..ecb9780a --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/OptimizerV2.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class OptimizerV2 + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/RMSProp.cs b/src/TensorFlowNET.Keras/OptimizersV2/RMSProp.cs new file mode 100644 index 00000000..62d9f57b --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/RMSProp.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class RMSProp + { + } +} diff --git a/src/TensorFlowNET.Keras/OptimizersV2/SGD.cs b/src/TensorFlowNET.Keras/OptimizersV2/SGD.cs new file mode 100644 index 00000000..8e72c486 --- /dev/null +++ b/src/TensorFlowNET.Keras/OptimizersV2/SGD.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.OptimizersV2 +{ + class SGD + { + } +} diff --git a/src/TensorFlowNET.Keras/Premade/LinearModel.cs b/src/TensorFlowNET.Keras/Premade/LinearModel.cs new file mode 100644 index 00000000..7b3d1276 --- /dev/null +++ b/src/TensorFlowNET.Keras/Premade/LinearModel.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Premade +{ + class LinearModel + { + } +} diff --git a/src/TensorFlowNET.Keras/Premade/WideDeepModel.cs b/src/TensorFlowNET.Keras/Premade/WideDeepModel.cs new file mode 100644 index 00000000..108c689b --- /dev/null +++ b/src/TensorFlowNET.Keras/Premade/WideDeepModel.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Premade +{ + class WideDeepModel + { + } +} diff --git a/src/TensorFlowNET.Keras/Preprocessing/Image.cs b/src/TensorFlowNET.Keras/Preprocessing/Image.cs new file mode 100644 index 00000000..ad9c9b12 --- /dev/null +++ b/src/TensorFlowNET.Keras/Preprocessing/Image.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Preprocessing +{ + class Image + { + } +} diff --git a/src/TensorFlowNET.Keras/Preprocessing/Sequence.cs b/src/TensorFlowNET.Keras/Preprocessing/Sequence.cs new file mode 100644 index 00000000..3773001f --- /dev/null +++ b/src/TensorFlowNET.Keras/Preprocessing/Sequence.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Preprocessing +{ + class Sequence + { + } +} diff --git a/src/TensorFlowNET.Keras/Preprocessing/Text.cs b/src/TensorFlowNET.Keras/Preprocessing/Text.cs new file mode 100644 index 00000000..7f6012c7 --- /dev/null +++ b/src/TensorFlowNET.Keras/Preprocessing/Text.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Preprocessing +{ + class Text + { + } +} diff --git a/src/TensorFlowNET.Keras/Regularizers/L1L2.cs b/src/TensorFlowNET.Keras/Regularizers/L1L2.cs new file mode 100644 index 00000000..927b3319 --- /dev/null +++ b/src/TensorFlowNET.Keras/Regularizers/L1L2.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Regularizers +{ + public class L1L2 : Regularizer + { + public L1L2(float l1 = 0f, float l2 = 0f) + { + throw new NotImplementedException(); + } + + public override float call(Tensor x) + { + throw new NotImplementedException(); + } + + public override Hashtable get_config() + { + throw new NotImplementedException(); + } + } +} diff --git a/src/TensorFlowNET.Keras/Regularizers/Regularizer.cs b/src/TensorFlowNET.Keras/Regularizers/Regularizer.cs new file mode 100644 index 00000000..047b035f --- /dev/null +++ b/src/TensorFlowNET.Keras/Regularizers/Regularizer.cs @@ -0,0 +1,40 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Regularizers +{ + public abstract class Regularizer + { + public virtual float call(Tensor x) + { + return 0f; + } + + public static Regularizer from_config(Hashtable hashtable) => throw new NotImplementedException(); + + public virtual Hashtable get_config() => throw new NotImplementedException(); + + public static Regularizer l1(float l = 0.01f) + { + return new L1L2(l1: l); + } + + public static Regularizer l2(float l = 0.01f) + { + return new L1L2(l2: l); + } + + public static Regularizer l1_l2(float l1 = 0.01f, float l2 = 0.01f) + { + return new L1L2(l1, l2); + } + + public static string serialize(Regularizer regularizer) => throw new NotImplementedException(); + + public static string deserialize(string config, dynamic custom_objects = null) => throw new NotImplementedException(); + + public static Regularizer get(object identifier) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Saving/HDF5Format.cs b/src/TensorFlowNET.Keras/Saving/HDF5Format.cs new file mode 100644 index 00000000..52ed591c --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/HDF5Format.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving +{ + class HDF5Format + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/ModelConfig.cs b/src/TensorFlowNET.Keras/Saving/ModelConfig.cs new file mode 100644 index 00000000..934e9429 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/ModelConfig.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving +{ + class ModelConfig + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/Save.cs b/src/TensorFlowNET.Keras/Saving/Save.cs new file mode 100644 index 00000000..f4469902 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/Save.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving +{ + class Save + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/BaseSerialization.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/BaseSerialization.cs new file mode 100644 index 00000000..90102a06 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/BaseSerialization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class BaseSerialization + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/Constants.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/Constants.cs new file mode 100644 index 00000000..85daf45d --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/Constants.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class Constants + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/LayerSerialization.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/LayerSerialization.cs new file mode 100644 index 00000000..bbf067fb --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/LayerSerialization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class LayerSerialization + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/Load.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/Load.cs new file mode 100644 index 00000000..2508f7f6 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/Load.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class Load + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/ModelSerialization.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/ModelSerialization.cs new file mode 100644 index 00000000..4a3e1336 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/ModelSerialization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class ModelSerialization + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/NetworkSerialization.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/NetworkSerialization.cs new file mode 100644 index 00000000..6eb17318 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/NetworkSerialization.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class NetworkSerialization + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/Save.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/Save.cs new file mode 100644 index 00000000..45933877 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/Save.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class Save + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/SaveImpl.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/SaveImpl.cs new file mode 100644 index 00000000..67a5f0dc --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/SaveImpl.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class SaveImpl + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/SerializedAttributes.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/SerializedAttributes.cs new file mode 100644 index 00000000..d1b19ccf --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/SerializedAttributes.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class SerializedAttributes + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModel/Utils.cs b/src/TensorFlowNET.Keras/Saving/SavedModel/Utils.cs new file mode 100644 index 00000000..8beebdea --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModel/Utils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving.SavedModel +{ + class Utils + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavedModelExperimental.cs b/src/TensorFlowNET.Keras/Saving/SavedModelExperimental.cs new file mode 100644 index 00000000..0455b622 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavedModelExperimental.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving +{ + class SavedModelExperimental + { + } +} diff --git a/src/TensorFlowNET.Keras/Saving/SavingUtils.cs b/src/TensorFlowNET.Keras/Saving/SavingUtils.cs new file mode 100644 index 00000000..b5f03de8 --- /dev/null +++ b/src/TensorFlowNET.Keras/Saving/SavingUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Saving +{ + class SavingUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj new file mode 100644 index 00000000..76cf4a3e --- /dev/null +++ b/src/TensorFlowNET.Keras/Tensorflow.Keras.csproj @@ -0,0 +1,17 @@ + + + + netstandard2.0 + Tensorflow.Keras + Tensorflow.Keras + + + + + + + + + + + diff --git a/src/TensorFlowNET.Keras/Utils/ConvUtils.cs b/src/TensorFlowNET.Keras/Utils/ConvUtils.cs new file mode 100644 index 00000000..604db158 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/ConvUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class ConvUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/DataUtils.cs b/src/TensorFlowNET.Keras/Utils/DataUtils.cs new file mode 100644 index 00000000..2f5e3646 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/DataUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class DataUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/GenericUtils.cs b/src/TensorFlowNET.Keras/Utils/GenericUtils.cs new file mode 100644 index 00000000..edc8f7fe --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/GenericUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class GenericUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/IOUtils.cs b/src/TensorFlowNET.Keras/Utils/IOUtils.cs new file mode 100644 index 00000000..0cc9c930 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/IOUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class IOUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/KernelizedUtils.cs b/src/TensorFlowNET.Keras/Utils/KernelizedUtils.cs new file mode 100644 index 00000000..30c950c6 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/KernelizedUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class KernelizedUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/LayerUtils.cs b/src/TensorFlowNET.Keras/Utils/LayerUtils.cs new file mode 100644 index 00000000..70ffa9a4 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/LayerUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class LayerUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/LossesUtils.cs b/src/TensorFlowNET.Keras/Utils/LossesUtils.cs new file mode 100644 index 00000000..8fd35ca6 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/LossesUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class LossesUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/MetricsUtils.cs b/src/TensorFlowNET.Keras/Utils/MetricsUtils.cs new file mode 100644 index 00000000..1e51b099 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/MetricsUtils.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using System.Reflection; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + public class MetricsUtils + { + public static class Reduction + { + public const string SUM = "sum"; + public const string SUM_OVER_BATCH_SIZE = "sum_over_batch_size"; + public const string WEIGHTED_MEAN = "weighted_mean"; + } + + public static class ConfusionMatrix + { + public const string TRUE_POSITIVES = "tp"; + public const string FALSE_POSITIVES = "fp"; + public const string TRUE_NEGATIVES = "tn"; + public const string FALSE_NEGATIVES = "fn"; + } + + public static class AUCCurve + { + public const string ROC = "ROC"; + public const string PR = "PR"; + + public static string from_str(string key) => throw new NotImplementedException(); + } + + public static class AUCSummationMethod + { + public const string INTERPOLATION = "interpolation"; + public const string MAJORING = "majoring"; + public const string MINORING = "minoring"; + + public static string from_str(string key) => throw new NotImplementedException(); + } + + public static dynamic update_state_wrapper(Func> update_state_fn) => throw new NotImplementedException(); + + public static dynamic result_wrapper(Func result_fn) => throw new NotImplementedException(); + + public static WeakReference weakmethod(MethodInfo method) => throw new NotImplementedException(); + + public static void assert_thresholds_range(float[] thresholds) => throw new NotImplementedException(); + + public static void parse_init_thresholds(float[] thresholds, float default_threshold = 0.5f) => throw new NotImplementedException(); + + public static Operation update_confusion_matrix_variables(variables variables_to_update, Tensor y_true, Tensor y_pred, float[] thresholds, + int? top_k= null,int? class_id= null, Tensor sample_weight= null, bool multi_label= false, + Tensor label_weights= null) => throw new NotImplementedException(); + + private static Tensor _filter_top_k(Tensor x, int k) => throw new NotImplementedException(); + + private static (Tensor[], Tensor) ragged_assert_compatible_and_get_flat_values(Tensor[] values, Tensor mask = null) => throw new NotImplementedException(); + } +} diff --git a/src/TensorFlowNET.Keras/Utils/ModeKeys.cs b/src/TensorFlowNET.Keras/Utils/ModeKeys.cs new file mode 100644 index 00000000..03ba5e44 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/ModeKeys.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class ModeKeys + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/MultiGpuUtils.cs b/src/TensorFlowNET.Keras/Utils/MultiGpuUtils.cs new file mode 100644 index 00000000..347438a2 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/MultiGpuUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class MultiGpuUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/NPUtils.cs b/src/TensorFlowNET.Keras/Utils/NPUtils.cs new file mode 100644 index 00000000..e8bbe68e --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/NPUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class NPUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/TFUtils.cs b/src/TensorFlowNET.Keras/Utils/TFUtils.cs new file mode 100644 index 00000000..8be02c8d --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/TFUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class TFUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/VersionUtils.cs b/src/TensorFlowNET.Keras/Utils/VersionUtils.cs new file mode 100644 index 00000000..a18d70d9 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/VersionUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class VersionUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Utils/VisUtils.cs b/src/TensorFlowNET.Keras/Utils/VisUtils.cs new file mode 100644 index 00000000..79ac0132 --- /dev/null +++ b/src/TensorFlowNET.Keras/Utils/VisUtils.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Utils +{ + class VisUtils + { + } +} diff --git a/src/TensorFlowNET.Keras/Wrappers/ScikitLearn.cs b/src/TensorFlowNET.Keras/Wrappers/ScikitLearn.cs new file mode 100644 index 00000000..0704509a --- /dev/null +++ b/src/TensorFlowNET.Keras/Wrappers/ScikitLearn.cs @@ -0,0 +1,10 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Keras.Wrappers +{ + class ScikitLearn + { + } +} diff --git a/src/TensorFlowNet.Benchmarks/Benchmark.csproj b/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj similarity index 62% rename from src/TensorFlowNet.Benchmarks/Benchmark.csproj rename to src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj index 0dcc771f..9f28cffa 100644 --- a/src/TensorFlowNet.Benchmarks/Benchmark.csproj +++ b/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj @@ -9,13 +9,18 @@ true + + true + + - + + diff --git a/tensorflowlib/README.md b/tensorflowlib/README.md index 318e5dc9..1450dc44 100644 --- a/tensorflowlib/README.md +++ b/tensorflowlib/README.md @@ -81,5 +81,5 @@ TF_CAPI_EXPORT extern void UpdateEdge(TF_Graph* graph, TF_Output new_src, TF_Inp TF_CAPI_EXPORT extern void RemoveAllControlInputs(TF_Graph* graph, TF_Operation* op); ``` - - +For Linux version, these APIs symbols should also be put into `tensorflow/c/version_script.lds` to be exported. +Please refer to commit `https://github.com/SciSharp/tensorflow/commit/58122da06be3e7707500ad889dfd5c760a3e0424` diff --git a/test/TensorFlowNET.UnitTest/EnforcedSinglethreadingTests.cs b/test/TensorFlowNET.UnitTest/EnforcedSinglethreadingTests.cs new file mode 100644 index 00000000..b7efc116 --- /dev/null +++ b/test/TensorFlowNET.UnitTest/EnforcedSinglethreadingTests.cs @@ -0,0 +1,107 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.IO; +using System.Linq; +using System.Runtime.InteropServices; +using System.Threading; +using FluentAssertions; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using NumSharp; +using Tensorflow; +using Tensorflow.Util; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest +{ + [TestClass] + public class EnforcedSinglethreadingTests : CApiTest + { + private static readonly object _singlethreadLocker = new object(); + + /// Initializes a new instance of the class. + public EnforcedSinglethreadingTests() + { + ops.IsSingleThreaded = true; + } + + [TestMethod, Ignore("Has to be tested manually.")] + public void SessionCreation() + { + lock (_singlethreadLocker) + { + ops.IsSingleThreaded.Should().BeTrue(); + + ops.uid(); //increment id by one + + //the core method + tf.peak_default_graph().Should().BeNull(); + + using (var sess = tf.Session()) + { + var default_graph = tf.peak_default_graph(); + var sess_graph = sess.GetPrivate("_graph"); + sess_graph.Should().NotBeNull(); + default_graph.Should().NotBeNull() + .And.BeEquivalentTo(sess_graph); + + var (graph, session) = Parallely(() => (tf.get_default_graph(), tf.get_default_session())); + + graph.Should().BeEquivalentTo(default_graph); + session.Should().BeEquivalentTo(sess); + } + } + } + + T Parallely(Func fnc) + { + var mrh = new ManualResetEventSlim(); + T ret = default; + Exception e = default; + new Thread(() => + { + try + { + ret = fnc(); + } catch (Exception ee) + { + e = ee; + throw; + } finally + { + mrh.Set(); + } + }).Start(); + + if (!Debugger.IsAttached) + mrh.Wait(10000).Should().BeTrue(); + else + mrh.Wait(-1); + e.Should().BeNull(e?.ToString()); + return ret; + } + + void Parallely(Action fnc) + { + var mrh = new ManualResetEventSlim(); + Exception e = default; + new Thread(() => + { + try + { + fnc(); + } catch (Exception ee) + { + e = ee; + throw; + } finally + { + mrh.Set(); + } + }).Start(); + + mrh.Wait(10000).Should().BeTrue(); + e.Should().BeNull(e.ToString()); + } + } +} \ No newline at end of file diff --git a/test/TensorFlowNET.UnitTest/GraphTest.cs b/test/TensorFlowNET.UnitTest/GraphTest.cs index 6a117ac1..80cf6088 100644 --- a/test/TensorFlowNET.UnitTest/GraphTest.cs +++ b/test/TensorFlowNET.UnitTest/GraphTest.cs @@ -139,19 +139,19 @@ namespace TensorFlowNET.UnitTest // Compare with first GraphDef + added NodeDef. graph_def.Node.Add(node_def); - EXPECT_EQ(graph_def.ToString(), graph_def2.ToString()); + EXPECT_EQ(graph_def, graph_def2); // Look up some nodes by name. Operation neg2 = c_api.TF_GraphOperationByName(graph, "neg"); EXPECT_EQ(neg, neg2); var node_def2 = neg2.node_def; - EXPECT_EQ(node_def.ToString(), node_def2.ToString()); + EXPECT_EQ(node_def, node_def2); Operation feed2 = c_api.TF_GraphOperationByName(graph, "feed"); EXPECT_EQ(feed, feed2); node_def = feed.node_def; node_def2 = feed2.node_def; - EXPECT_EQ(node_def.ToString(), node_def2.ToString()); + EXPECT_EQ(node_def, node_def2); // Test iterating through the nodes of a graph. found_placeholder = false; diff --git a/test/TensorFlowNET.UnitTest/Hub/MnistModelLoaderTest.cs b/test/TensorFlowNET.UnitTest/Hub/MnistModelLoaderTest.cs new file mode 100644 index 00000000..26dfd3b6 --- /dev/null +++ b/test/TensorFlowNET.UnitTest/Hub/MnistModelLoaderTest.cs @@ -0,0 +1,24 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Threading.Tasks; +using Tensorflow.Hub; + +namespace UnitTest +{ + [TestClass] + public class MnistModelLoaderTest + { + [TestMethod] + public async Task TestLoad() + { + var loader = new MnistModelLoader(); + var result = await loader.LoadAsync(new ModelLoadSetting + { + TrainDir = "mnist", + OneHot = true, + ValidationSize = 5000, + }); + + Assert.IsNotNull(result); + } + } +} diff --git a/test/TensorFlowNET.UnitTest/Keras/EmbeddingTest.cs b/test/TensorFlowNET.UnitTest/Keras/EmbeddingTest.cs index 0168f22c..d3484d5e 100644 --- a/test/TensorFlowNET.UnitTest/Keras/EmbeddingTest.cs +++ b/test/TensorFlowNET.UnitTest/Keras/EmbeddingTest.cs @@ -14,6 +14,7 @@ namespace TensorFlowNET.UnitTest.Keras [TestClass] public class EmbeddingTest { + [Ignore] [TestMethod] public void Embedding() { diff --git a/test/TensorFlowNET.UnitTest/MultithreadingTests.cs b/test/TensorFlowNET.UnitTest/MultithreadingTests.cs index f4f3f141..adae4fad 100644 --- a/test/TensorFlowNET.UnitTest/MultithreadingTests.cs +++ b/test/TensorFlowNET.UnitTest/MultithreadingTests.cs @@ -283,14 +283,11 @@ namespace TensorFlowNET.UnitTest } } - private static string modelPath = "./model/"; + private static readonly string modelPath = Path.GetFullPath("./Utilities/models/example1/"); [TestMethod] public void TF_GraphOperationByName_FromModel() { - if (!Directory.Exists(modelPath)) - return; - MultiThreadedUnitTestExecuter.Run(8, Core); //the core method diff --git a/test/TensorFlowNET.UnitTest/PythonTest.cs b/test/TensorFlowNET.UnitTest/PythonTest.cs index d2ae36d7..5ceeb9b5 100644 --- a/test/TensorFlowNET.UnitTest/PythonTest.cs +++ b/test/TensorFlowNET.UnitTest/PythonTest.cs @@ -52,6 +52,17 @@ namespace TensorFlowNET.UnitTest assertItemsEqual(given, expected); } + public void assertFloat32Equal(float expected, float actual, string msg) + { + float eps = 1e-6f; + Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); + } + + public void assertFloat64Equal(double expected, double actual, string msg) + { + double eps = 1e-16f; + Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); + } public void assertEqual(object given, object expected) { @@ -70,6 +81,16 @@ namespace TensorFlowNET.UnitTest assertItemsEqual(given as ICollection, expected as ICollection); return; } + if (given is float && expected is float) + { + assertFloat32Equal((float)expected, (float)given, ""); + return; + } + if (given is double && expected is double) + { + assertFloat64Equal((double)expected, (double)given, ""); + return; + } Assert.AreEqual(expected, given); } diff --git a/test/TensorFlowNET.UnitTest/SessionTest.cs b/test/TensorFlowNET.UnitTest/SessionTest.cs index f1453c0e..706ace7a 100644 --- a/test/TensorFlowNET.UnitTest/SessionTest.cs +++ b/test/TensorFlowNET.UnitTest/SessionTest.cs @@ -109,7 +109,7 @@ namespace TensorFlowNET.UnitTest var c = tf.strings.substr(a, 4, 8); using (var sess = tf.Session()) { - var result = (string) c.eval(sess); + var result = UTF8Encoding.UTF8.GetString((byte[])c.eval(sess)); Console.WriteLine(result); result.Should().Be("heythere"); } @@ -126,8 +126,8 @@ namespace TensorFlowNET.UnitTest var c = tf.strings.substr(a, 0, size - 5000); using (var sess = tf.Session()) { - var result = (string) c.eval(sess); - Console.WriteLine((string) result); + var result = UTF8Encoding.UTF8.GetString((byte[])c.eval(sess)); + Console.WriteLine(result); result.Should().HaveLength(size - 5000).And.ContainAll("a"); } } @@ -137,12 +137,12 @@ namespace TensorFlowNET.UnitTest public void Autocast_Case1() { var sess = tf.Session().as_default(); - var input = tf.placeholder(tf.float64, shape: new TensorShape(6)); + var input = tf.placeholder(tf.float32, shape: new TensorShape(6)); var op = tf.reshape(input, new int[] {2, 3}); sess.run(tf.global_variables_initializer()); var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6))); - ret.Should().BeOfType().And.BeShaped(2, 3).And.BeOfValues(1, 2, 3, 4, 5, 6); + ret.Should().BeOfType().And.BeShaped(2, 3).And.BeOfValues(1, 2, 3, 4, 5, 6); print(ret.dtype); print(ret); } @@ -165,12 +165,12 @@ namespace TensorFlowNET.UnitTest public void Autocast_Case3() { var sess = tf.Session().as_default(); - var input = tf.placeholder(tf.int16, shape: new TensorShape(6)); + var input = tf.placeholder(tf.int64, shape: new TensorShape(6)); var op = tf.reshape(input, new int[] {2, 3}); sess.run(tf.global_variables_initializer()); var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6).astype(NPTypeCode.Single) + 0.1f)); - ret.Should().BeOfType().And.BeShaped(2, 3).And.BeOfValues(1, 2, 3, 4, 5, 6); + ret.Should().BeOfType().And.BeShaped(2, 3).And.BeOfValues(1, 2, 3, 4, 5, 6); print(ret.dtype); print(ret); } diff --git a/test/TensorFlowNET.UnitTest/UnitTest.csproj b/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj similarity index 71% rename from test/TensorFlowNET.UnitTest/UnitTest.csproj rename to test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj index 7e00bc1d..cff48481 100644 --- a/test/TensorFlowNET.UnitTest/UnitTest.csproj +++ b/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj @@ -28,21 +28,25 @@ - - - - - + + + + + - + + PreserveNewest + + Always + diff --git a/test/TensorFlowNET.UnitTest/Utilities/models/example1/saved_model.pb b/test/TensorFlowNET.UnitTest/Utilities/models/example1/saved_model.pb new file mode 100644 index 00000000..f37debb5 Binary files /dev/null and b/test/TensorFlowNET.UnitTest/Utilities/models/example1/saved_model.pb differ diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs index eea640ac..ea7c48f2 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs @@ -10,7 +10,6 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test [TestClass] public class CondTestCases : PythonTest { - [Ignore("need tesnroflow expose AddControlInput API")] [TestMethod] public void testCondTrue_ConstOnly() { @@ -30,7 +29,6 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test } } - [Ignore("need tesnroflow expose AddControlInput API")] [TestMethod] public void testCondFalse_ConstOnly() { @@ -50,7 +48,6 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test } } - [Ignore("need tesnroflow expose AddControlInput API")] [TestMethod] public void testCondTrue() { @@ -67,7 +64,6 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test assertEquals(result, 34); } - [Ignore("need tesnroflow expose AddControlInput API")] [TestMethod] public void testCondFalse() { diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs index 80ff71db..4ffc5342 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs @@ -18,7 +18,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test var i = constant_op.constant(0, name: "i"); var c = new Func(x => tf.less(x, 10, name: "c")); var b = new Func(x => tf.add(x, 1, name: "c")); - var r = control_flow_ops.while_loop(c, b, i); + //var r = control_flow_ops.while_loop(c, b, i); } private void _testWhileContextHelper(int maximum_iterations) @@ -29,8 +29,8 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test var i = constant_op.constant(0, name: "i"); var c = new Func(x => gen_math_ops.less(x, 10, name: "c")); var b = new Func(x => gen_math_ops.add(x, 1, name: "c")); - control_flow_ops.while_loop( - c, b, i , maximum_iterations: tf.constant(maximum_iterations)); + //control_flow_ops.while_loop( + // c, b, i , maximum_iterations: tf.constant(maximum_iterations)); foreach (Operation op in sess.graph.get_operations()) { var control_flow_context = op._get_control_flow_context(); diff --git a/test/TensorFlowNET.UnitTest/functional_ops_test/ScanTestCase.cs b/test/TensorFlowNET.UnitTest/functional_ops_test/ScanTestCase.cs new file mode 100644 index 00000000..265ff3cf --- /dev/null +++ b/test/TensorFlowNET.UnitTest/functional_ops_test/ScanTestCase.cs @@ -0,0 +1,41 @@ +using System; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using NumSharp; +using Tensorflow; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.functional_ops_test +{ + /// + /// https://www.tensorflow.org/api_docs/python/tf/scan + /// + [TestClass] + public class ScanTestCase + { + [Ignore("TODO")] + [TestMethod] + public void ScanForward() + { + var fn = new Func((a, x) => tf.add(a, x)); + + var sess = tf.Session().as_default(); + + var input = tf.placeholder(TF_DataType.TF_INT32, new TensorShape(6)); + var scan = functional_ops.scan(fn, input); + sess.run(scan, (input, np.array(1,2,3,4,5,6))).Should().Be(np.array(1,3,6,10,15,21)); + } + + [Ignore("TODO")] + [TestMethod] + public void ScanReverse() + { + var fn = new Func((a, x) => tf.add(a, x)); + + var sess = tf.Session().as_default(); + + var input = tf.placeholder(TF_DataType.TF_INT32, new TensorShape(6)); + var scan = functional_ops.scan(fn, input, reverse:true); + sess.run(scan, (input, np.array(1,2,3,4,5,6))).Should().Be(np.array(21,20,18,15,11,6)); + } + } +} diff --git a/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs b/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs index 2fae1e5b..c7a26cdd 100644 --- a/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs +++ b/test/TensorFlowNET.UnitTest/gradients_test/GradientsTest.cs @@ -94,6 +94,93 @@ namespace TensorFlowNET.UnitTest.gradients_test } } + [TestMethod] + public void testSimpleGradients() + { + (T, T) evaluateDerivatives(Func f, T xval) where T : unmanaged + { + var x = tf.constant(xval); + var y = f(x); + var g = tf.gradients(y, x); + + using (var session = tf.Session()) + { + var result = session.run(new[] { y, g[0] }); + return (result[0].GetData()[0], result[1].GetData()[0]); + } + } + + void test(string name, Func tfF, Func targetF, double[] values) + { + foreach (var x in values) + { + var (expectedY, expectedDY) = targetF(x); + + { + var (actualY, actualDY) = evaluateDerivatives(tfF, x); + self.assertFloat64Equal(expectedY, actualY, $"value {name}/float64 at {x}"); + self.assertFloat64Equal(expectedDY, actualDY, $"derivative {name}/float64 at {x}"); + } + + { + var (actualY, actualDY) = evaluateDerivatives(tfF, (float)x); + self.assertFloat32Equal((float)expectedY, actualY, $"value {name}/float32 at {x}"); + self.assertFloat32Equal((float)expectedDY, actualDY, $"derivative {name}/float32 at {x}"); + } + } + } + + test("tf.exp", + x => tf.exp(5 * x), + x => (Math.Exp(5.0 * x), 5.0 * Math.Exp(5.0 * x)), + new[] { -1.0, 0.0, 1.0, 1.5 }); + + test("tf.log", + x => tf.log(x), + x => (Math.Log(x), 1.0 / x), + new[] { 0.5, 1.0, 1.5, 2.0 }); + + test("tf.sqrt", + x => tf.sqrt(x), + x => (Math.Sqrt(x), 0.5 / Math.Sqrt(x)), + new[] { 0.5, 1.0, 1.1, 1.5, 2.0 }); + + test("tf.sin", + x => tf.sin(x), + x => (Math.Sin(x), Math.Cos(x)), + new[] { -1.0, 0.0, 1.0, 1.5, 2.0 }); + + test("tf.sinh", + x => tf.sinh(x), + x => (Math.Sinh(x), Math.Cosh(x)), + new[] { -1.0, 0.0, 1.0, 1.5, 2.0 }); + + test("tf.cos", + x => tf.cos(x), + x => (Math.Cos(x), -Math.Sin(x)), + new[] { -1.0, 0.0, 1.0, 1.5, 2.0 }); + + test("tf.cosh", + x => tf.cosh(x), + x => (Math.Cosh(x), Math.Sinh(x)), + new[] { -1.0, 0.0, 1.0, 1.5, 2.0 }); + + test("tf.tanh", + x => tf.tanh(x), + x => (Math.Tanh(x), 1.0 - Math.Pow(Math.Tanh(x), 2.0)), + new[] { -1.0, 0.0, 1.0, 1.5, 2.0 }); + + test("tf.maximum", + x => tf.maximum(x, tf.constant(0.0, dtype: x.dtype)), + x => (Math.Max(x, 0.0), (x > 0.0) ? 1.0 : 0.0), + new[] { -1.0, 1.0 }); + + test("tf.minimum", + x => tf.minimum(x, tf.constant(0.0, dtype: x.dtype)), + x => (Math.Min(x, 0.0), (x < 0.0) ? 1.0 : 0.0), + new[] { -1.0, 1.0 }); + } + [TestMethod] public void testTanhGradient() { diff --git a/test/Tensorflow.Keras.UnitTest/OptimizerTest.cs b/test/Tensorflow.Keras.UnitTest/OptimizerTest.cs new file mode 100644 index 00000000..1aad1868 --- /dev/null +++ b/test/Tensorflow.Keras.UnitTest/OptimizerTest.cs @@ -0,0 +1,14 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Collections.Generic; + +namespace Tensorflow.Keras.UnitTest +{ + [TestClass] + public class OptimizerTest + { + [TestMethod] + public void BaseConstruct() + { + } + } +} diff --git a/test/Tensorflow.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/Tensorflow.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj new file mode 100644 index 00000000..41dbf2e4 --- /dev/null +++ b/test/Tensorflow.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj @@ -0,0 +1,20 @@ + + + + netcoreapp3.1 + + false + + + + + + + + + + + + + +