diff --git a/src/TensorFlowNET.Core/Operations/array_ops.py.cs b/src/TensorFlowNET.Core/Operations/array_ops.py.cs index 680510eb..6f8f2f05 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.py.cs @@ -1,208 +1,243 @@ -using NumSharp.Core; -using System; -using System.Collections.Generic; -using System.Text; - -namespace Tensorflow -{ - public class array_ops - { - public static Tensor placeholder_with_default(T input, int[] shape, string name = null) => gen_array_ops.placeholder_with_default(input, shape, name); - - public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) - { - dtype = dtype.as_base_dtype(); - return Python.with(new ops.name_scope(name, "zeros", shape), scope => - { - name = scope; - switch (dtype) - { - case TF_DataType.TF_BOOL: - return _constant_if_small(false, shape, dtype, name); - case TF_DataType.TF_DOUBLE: - return _constant_if_small(0.0D, shape, dtype, name); - case TF_DataType.TF_FLOAT: - return _constant_if_small(0.0F, shape, dtype, name); - case TF_DataType.TF_INT32: - return _constant_if_small(0, shape, dtype, name); - default: - throw new TypeError("can't find type for zeros"); - } - }); - } - - private static Tensor _constant_if_small(T value, Shape shape, TF_DataType dtype, string name) - { - Tensor tShape = null; - if (shape.Size < 1000) - { - return constant_op.constant(value, shape: shape, dtype: dtype, name: name); - } - else - { - tShape = constant_op._tensor_shape_tensor_conversion_function(shape.as_shape()); - var c = constant_op.constant(0); - return gen_array_ops.fill(tShape, c, name: name); - } - } - - public static Tensor expand_dims(Tensor input, int axis = -1, string name = null, int dim = -1) => expand_dims_v2(input, axis, name); - - private static Tensor expand_dims_v2(Tensor input, int axis, string name = null) => gen_array_ops.expand_dims(input, axis, name); - - public static Tensor rank(Tensor input, string name = null) - { - return math_ops.rank_internal(input, name, optimize: true); - } - - /// - /// Creates a tensor with all elements set to 1. - /// - /// - /// - /// - /// - /// - public static Tensor ones_like(T tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) - => ones_like_impl(tensor, dtype, name, optimize); - - private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true) - { - return Python.with(new ops.name_scope(name, "ones_like", new { tensor }), scope => - { - name = scope; - var tensor1 = ops.convert_to_tensor(tensor, name: "tensor"); - var ones_shape = shape_internal(tensor1, optimize: optimize); - if (dtype == TF_DataType.DtInvalid) - dtype = tensor1.dtype; - var ret = ones(ones_shape, dtype: dtype, name: name); - ret.shape = tensor1.shape; - return ret; - }); - } - - public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) - { - dtype = dtype.as_base_dtype(); - return Python.with(new ops.name_scope(name, "ones", new { shape }), scope => - { - name = scope; - var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name); - return output; - }); - } - - public static Tensor where(Tensor condition, Tensor x = null, Tensor y = null, string name = null) - { - if( x == null && y == null) - { - throw new NotImplementedException("where"); - } - else if(x != null && y != null) - { - return gen_array_ops.select(condition, x, y, name); - } - else - { - throw new ValueError("x and y must both be non-None or both be None."); - } - } - - /// - /// Returns the shape of a tensor. - /// - /// A `Tensor` or `SparseTensor`. - /// A name for the operation (optional). - /// - /// (Optional) The specified output type of the operation - /// (`int32` or `int64`). Defaults to `tf.int32`. - /// - /// A `Tensor` of type `out_type`. - public static Tensor shape(Tensor input, string name = null, TF_DataType out_type = TF_DataType.TF_INT32) - { - return shape_internal(input, name, optimize: true, out_type: out_type); - } - - public static Tensor size(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) - { - return size_internal(input, name, optimize: optimize, out_type: out_type); - } - - private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) - { - return Python.with(new ops.name_scope(name, "Shape", new { input }), scope => - { - name = scope; - - if (!tf.context.executing_eagerly()) - { - var input_tensor = ops.convert_to_tensor(input); - var input_shape = tensor_util.to_shape(input_tensor.shape); - if (optimize && input_tensor.NDims > -1 && input_shape.is_fully_defined()) - { - var nd = np.array(input_tensor.shape, out_type.as_numpy_datatype()); - return constant_op.constant(nd, name: name); - } - } - - return gen_array_ops.shape(input, name: name, out_type: out_type); - }); - } - - private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) - { - return Python.with(new ops.name_scope(name, "Size", new Tensor[] { input }), scope => - { - name = scope; - - if (!tf.context.executing_eagerly()) - { - var input_tensor = ops.convert_to_tensor(input); - var input_shape = tensor_util.to_shape(input_tensor.shape); - if (optimize) - { - if (input_shape.is_fully_defined()) - { - var nd = np.array(input_tensor.shape, out_type.as_numpy_datatype()); - return constant_op.constant(nd, name: name); - } - } - - return gen_array_ops.size(input, name: name, out_type: out_type); - } - else - { - // result = gen_array_ops.shape(); - throw new NotImplementedException("array_ops.size_internal"); - } - - return null; - }); - } - - public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) - { - return Python.with(new ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => - { - name = scope; - tensor = ops.convert_to_tensor(tensor, name: "tensor"); - - // is_fully_defined return unexpected value. - if (optimize && tensor_util.to_shape(tensor.shape).is_fully_defined() && dtype != TF_DataType.TF_VARIANT) - { - - } - - if(dtype != TF_DataType.DtInvalid && dtype != tensor.dtype && dtype != TF_DataType.TF_VARIANT) - { - throw new NotImplementedException("zeros_like"); - // return zeros(shape_internal(tensor, optimize: optimize), dtype: dtype, name: name); - } - else - { - return gen_array_ops.zeros_like(tensor, name: name); - } - }); - } - } -} +using NumSharp.Core; +using System; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow +{ + public class array_ops + { + public static Tensor placeholder_with_default(T input, int[] shape, string name = null) => gen_array_ops.placeholder_with_default(input, shape, name); + + public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + { + dtype = dtype.as_base_dtype(); + return Python.with(new ops.name_scope(name, "zeros", shape), scope => + { + name = scope; + switch (dtype) + { + case TF_DataType.TF_BOOL: + return _constant_if_small(false, shape, dtype, name); + case TF_DataType.TF_DOUBLE: + return _constant_if_small(0.0D, shape, dtype, name); + case TF_DataType.TF_FLOAT: + return _constant_if_small(0.0F, shape, dtype, name); + case TF_DataType.TF_INT32: + return _constant_if_small(0, shape, dtype, name); + default: + throw new TypeError("can't find type for zeros"); + } + }); + } + + private static Tensor _constant_if_small(T value, Shape shape, TF_DataType dtype, string name) + { + Tensor tShape = null; + if (shape.Size < 1000) + { + return constant_op.constant(value, shape: shape, dtype: dtype, name: name); + } + else + { + tShape = constant_op._tensor_shape_tensor_conversion_function(shape.as_shape()); + var c = constant_op.constant(0); + return gen_array_ops.fill(tShape, c, name: name); + } + } + + public static Tensor expand_dims(Tensor input, int axis = -1, string name = null, int dim = -1) => expand_dims_v2(input, axis, name); + + private static Tensor expand_dims_v2(Tensor input, int axis, string name = null) => gen_array_ops.expand_dims(input, axis, name); + + public static Tensor rank(Tensor input, string name = null) + { + return math_ops.rank_internal(input, name, optimize: true); + } + + /// + /// Creates a tensor with all elements set to 1. + /// + /// + /// + /// + /// + /// + public static Tensor ones_like(T tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) + => ones_like_impl(tensor, dtype, name, optimize); + + private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true) + { + return Python.with(new ops.name_scope(name, "ones_like", new { tensor }), scope => + { + name = scope; + var tensor1 = ops.convert_to_tensor(tensor, name: "tensor"); + var ones_shape = shape_internal(tensor1, optimize: optimize); + if (dtype == TF_DataType.DtInvalid) + dtype = tensor1.dtype; + var ret = ones(ones_shape, dtype: dtype, name: name); + ret.shape = tensor1.shape; + return ret; + }); + } + + public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + { + dtype = dtype.as_base_dtype(); + return Python.with(new ops.name_scope(name, "ones", new { shape }), scope => + { + name = scope; + var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name); + return output; + }); + } + + public static Tensor where(Tensor condition, Tensor x = null, Tensor y = null, string name = null) + { + if( x == null && y == null) + { + throw new NotImplementedException("where"); + } + else if(x != null && y != null) + { + return gen_array_ops.select(condition, x, y, name); + } + else + { + throw new ValueError("x and y must both be non-None or both be None."); + } + } + + /// + /// Returns the shape of a tensor. + /// + /// A `Tensor` or `SparseTensor`. + /// A name for the operation (optional). + /// + /// (Optional) The specified output type of the operation + /// (`int32` or `int64`). Defaults to `tf.int32`. + /// + /// A `Tensor` of type `out_type`. + public static Tensor shape(Tensor input, string name = null, TF_DataType out_type = TF_DataType.TF_INT32) + { + return shape_internal(input, name, optimize: true, out_type: out_type); + } + + public static Tensor size(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) + { + return size_internal(input, name, optimize: optimize, out_type: out_type); + } + + private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) + { + return Python.with(new ops.name_scope(name, "Shape", new { input }), scope => + { + name = scope; + + if (!tf.context.executing_eagerly()) + { + var input_tensor = ops.convert_to_tensor(input); + var input_shape = tensor_util.to_shape(input_tensor.shape); + if (optimize && input_tensor.NDims > -1 && input_shape.is_fully_defined()) + { + var nd = np.array(input_tensor.shape, out_type.as_numpy_datatype()); + return constant_op.constant(nd, name: name); + } + } + + return gen_array_ops.shape(input, name: name, out_type: out_type); + }); + } + + private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) + { + return Python.with(new ops.name_scope(name, "Size", new Tensor[] { input }), scope => + { + name = scope; + + if (!tf.context.executing_eagerly()) + { + var input_tensor = ops.convert_to_tensor(input); + var input_shape = tensor_util.to_shape(input_tensor.shape); + if (optimize) + { + if (input_shape.is_fully_defined()) + { + var nd = np.array(input_tensor.shape, out_type.as_numpy_datatype()); + return constant_op.constant(nd, name: name); + } + } + + return gen_array_ops.size(input, name: name, out_type: out_type); + } + else + { + // result = gen_array_ops.shape(); + throw new NotImplementedException("array_ops.size_internal"); + } + + return null; + }); + } + + public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) + { + return Python.with(new ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => + { + name = scope; + tensor = ops.convert_to_tensor(tensor, name: "tensor"); + + // is_fully_defined return unexpected value. + if (optimize && tensor_util.to_shape(tensor.shape).is_fully_defined() && dtype != TF_DataType.TF_VARIANT) + { + + } + + if(dtype != TF_DataType.DtInvalid && dtype != tensor.dtype && dtype != TF_DataType.TF_VARIANT) + { + throw new NotImplementedException("zeros_like"); + // return zeros(shape_internal(tensor, optimize: optimize), dtype: dtype, name: name); + } + else + { + return gen_array_ops.zeros_like(tensor, name: name); + } + }); + } + + /// + /// When building ops to compute gradients, this op prevents the contribution of + /// its inputs to be taken into account.Normally, the gradient generator adds ops /// to a graph to compute the derivatives of a specified 'loss' by recursively /// finding out inputs that contributed to its computation.If you insert this op /// in the graph it inputs are masked from the gradient generator. They are not + /// taken into account for computing gradients. + /// + /// + /// + /// + public static Tensor stop_gradient(Tensor input, string name = null) + { + return gen_array_ops.stop_gradient(input, name); + } + + /// + /// Removes dimensions of size 1 from the shape of a tensor. + /// Given a tensor `input`, this operation returns a tensor of the same type with + /// all dimensions of size 1 removed.If you don't want to remove all size 1 + /// dimensions, you can remove specific size 1 dimensions by specifying + /// `axis`. + /// + /// A `Tensor`. The `input` to squeeze. + /// An optional list of `ints`. Defaults to `[]`. + /// If specified, only squeezes the dimensions listed.The dimension + /// index starts at 0. It is an error to squeeze a dimension that is not 1. + /// Must be in the range `[-rank(input), rank(input))`. + /// A name for the operation (optional). + /// Deprecated keyword argument that is now axis. + /// A `Tensor`. Has the same type as `input`. + /// Contains the same data as `input`, but has one or more dimensions of + /// size 1 removed. + public static Tensor squeeze(Tensor input, int[] axis = null, string name = null, int[] squeeze_dims = null) + { + return gen_array_ops.squeeze(input, axis, name); + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs index 76122991..2a4617e4 100644 --- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs @@ -155,5 +155,28 @@ namespace Tensorflow var _op = _op_def_lib._apply_op_helper("ZerosLike", name, new { x }); return _op.outputs[0]; } + public static Tensor stop_gradient(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("StopGradient", name, args: new { x }); + + return _op.outputs[0]; + } + /// + /// Removes dimensions of size 1 from the shape of a tensor. + /// Given a tensor `input`, this operation returns a tensor of the same type with + /// all dimensions of size 1 removed.If you don't want to remove all size 1 + /// dimensions, you can remove specific size 1 dimensions by specifying + /// `axis`. + /// + /// A `Tensor`. The `input` to squeeze. + /// An optional list of `ints`. Defaults to `[]`. If specified, only squeezes the dimensions listed. + /// A name for the operation (optional). + /// A `Tensor`. Has the same type as `input`. + public static Tensor squeeze(Tensor input, int[] axis = null, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Squeeze", name, args: new { input, axis, name }); + + return _op.outputs[0]; + } } } diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index b36cb9d9..24d7607b 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -1,128 +1,152 @@ -using NumSharp.Core; -using System; -using System.Collections.Generic; -using System.IO; -using System.Text; - -namespace Tensorflow -{ - public static class gen_math_ops - { - public static OpDefLibrary _op_def_lib = new OpDefLibrary(); - - public static Tensor add(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= "") - { - var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); - - return _op.outputs[0]; - } - - public static Tensor neg(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor sub(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor mul(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor real_div(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor floor_mod(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor floor_div(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y }); - - return _op.outputs[0]; - } - - /// - /// Multiply the matrix "a" by the matrix "b". - /// - /// - /// - /// - /// - /// - /// - public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); - - return _op.outputs[0]; - } - - /// - /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. - /// - /// - /// - /// - /// - public static Tensor maximum(T1 x, T2 y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Maximum", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor pow(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor sum(Tensor input, Tensor axis = null, bool keep_dims = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims }); - - return _op.outputs[0]; - } - - /// - /// Creates a sequence of numbers. - /// - /// - /// - /// - /// - /// - public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); - - return _op.outputs[0]; - } - } -} +using NumSharp.Core; +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; + +namespace Tensorflow +{ + public static class gen_math_ops + { + public static OpDefLibrary _op_def_lib = new OpDefLibrary(); + /// + /// Computes the mean of elements across dimensions of a tensor. + /// Reduces `input` along the dimensions given in `axis`. Unless /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in /// `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. + /// + /// A `Tensor`. Must be one of the following types: + /// `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + /// The tensor to reduce. + /// A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. + /// An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. + /// A name for the operation (optional). + /// A `Tensor`. Has the same type as `input`. + public static Tensor mean(Tensor input, Tensor axis, bool keep_dims= false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, axis }); + + return _op.outputs[0]; + } + + public static Tensor add(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor squared_difference(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= "") + { + var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); + + return _op.outputs[0]; + } + + public static Tensor neg(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor sub(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor mul(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor real_div(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor floor_mod(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor floor_div(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y }); + + return _op.outputs[0]; + } + + /// + /// Multiply the matrix "a" by the matrix "b". + /// + /// + /// + /// + /// + /// + /// + public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); + + return _op.outputs[0]; + } + + /// + /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. + /// + /// + /// + /// + /// + public static Tensor maximum(T1 x, T2 y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Maximum", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor pow(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor sum(Tensor input, Tensor axis = null, bool keep_dims = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims }); + + return _op.outputs[0]; + } + + /// + /// Creates a sequence of numbers. + /// + /// + /// + /// + /// + /// + public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); + + return _op.outputs[0]; + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/math_ops.py.cs b/src/TensorFlowNET.Core/Operations/math_ops.py.cs index 83ee4719..ed952e53 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.py.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.py.cs @@ -24,33 +24,34 @@ namespace Tensorflow }); } /// - /// + /// Computes the mean of elements across dimensions of a tensor. + /// Reduces `input_tensor` along the dimensions given in `axis`. + /// Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each + /// entry in `axis`. If `keepdims` is true, the reduced dimensionsare retained with length 1. + /// If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. /// - /// - /// - /// - /// - public static Tensor reduce_mean(Tensor input_tensor, int[] axes = null, bool keepdims = false, string name = null) + /// The tensor to reduce. Should have numeric type. + /// The dimensions to reduce. If `None` (the default), reduces all + /// dimensions.Must be in the range `[-rank(input_tensor), rank(input_tensor))`. + /// If true, retains reduced dimensions with length 1. + /// A name for the operation (optional). + public static Tensor reduce_mean(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) { - throw new NotFiniteNumberException(); + var r = _ReductionDims(input_tensor, new Tensor(axis)); + var m = gen_math_ops.mean(input_tensor, r); + return _may_reduce_to_scalar(keepdims, m); } /// - /// Reduction Operation + /// Returns (x - y)(x - y) element-wise. /// - /// - /// - /// - public void _ReductionDims(Tensor x, int[] axis, int[] reduction_indices = null) + /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. + /// A `Tensor`. Must have the same type as `x`. + /// A name for the operation (optional). + /// A `Tensor`. Has the same type as `x`. + public static Tensor square_difference(Tensor x, Tensor y, string name = null) { - if (reduction_indices != null || reduction_indices.Length != 0) - { - if (axis != null) - { - - } - } - - throw new NotSupportedException("Can't specify both axis' and 'reduction_indices'."); + var m = gen_math_ops.squared_difference(x, y); + return m; } /// diff --git a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs index 56c4fb46..3005863c 100644 --- a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs @@ -9,24 +9,46 @@ namespace Tensorflow /// /// Calculate the mean and variance of `x` /// - /// - /// - /// - /// - /// - public (Tensor, Tensor) moments(Tensor x, + /// A `Tensor`. + /// Array of ints. Axes along which to compute mean and variance. + /// Name used to scope the operations that compute the moments. + /// Produce moments with the same dimensionality as the input. + /// Two `Tensor` objects: `mean` and `variance`. + public Tuple moments(Tensor x, int[] axes, string name = null, bool keep_dims = false) { + Tuple t = null; with(new ops.name_scope(name, "moments", new { x, axes }), scope => { + // The dynamic range of fp16 is too limited to support the collection of + // sufficient statistics. As a workaround we simply perform the operations + // on 32-bit floats before converting the mean and variance back to fp16 var y = math_ops.cast(x, TF_DataType.TF_FLOAT); - // mean = math_ops.reduce_mean(y, axes, keepdims = True, name = "mean") - + // Compute true mean while keeping the dims for proper broadcasting. + var mean = math_ops.reduce_mean(y, axes, keep_dims = true, name = "mean"); + // Sample variance, not unbiased variance + // Note: stop_gradient does not change the gradient that gets + // backpropagated to the mean from the variance calculation, + // because that gradient is zero + var variance = math_ops.reduce_mean(math_ops.reduce_mean(math_ops.square_difference(y, array_ops.stop_gradient(mean)), axes, keep_dims = true, name = "Variance")); + if (!keep_dims) + { + mean = array_ops.squeeze(mean, axes); + variance = array_ops.squeeze(variance, axes); + } + // TODO: if x.dtype == dtypes.float16: + if (x.dtype == TF_DataType.TF_FLOAT) + { + t = Tuple.Create(math_ops.cast(mean, x.dtype), math_ops.cast(variance, x.dtype)); + return; + } + else { + t = Tuple.Create(mean, variance); + } }); - - throw new NotImplementedException(""); + return t; } } } diff --git a/test/TensorFlowNET.Examples/NaiveBayesClassifier.cs b/test/TensorFlowNET.Examples/NaiveBayesClassifier.cs index 948b9fff..6d6c02ab 100644 --- a/test/TensorFlowNET.Examples/NaiveBayesClassifier.cs +++ b/test/TensorFlowNET.Examples/NaiveBayesClassifier.cs @@ -19,20 +19,41 @@ namespace TensorFlowNET.Examples public void fit(NDArray X, NDArray y) { - // separate training points by class - // shape : nb_class * nb_samples * nb_features NDArray unique_y = y.unique(); - NDArray points_by_class = np.array(y.Data().Where(ys => unique_y.Data().Contains(ys))); - - foreach (long cls in unique_y) + + Dictionary> dic = new Dictionary>(); + // Init uy in dic + foreach (int uy in unique_y.Data()) { - + dic.Add(uy, new List()); + } + // Separate training points by class + // Shape : nb_classes * nb_samples * nb_features + int maxCount = 0; + foreach (var (x, t) in zip(X.Data(), y.Data())) + { + int curClass = (y[t, 0] as NDArray).Data().First(); + List l = dic[curClass]; + l.Add(x); + if (l.Count > maxCount) + { + maxCount = l.Count; + } + dic.Add(curClass, l); + } + NDArray points_by_class = np.zeros(dic.Count,maxCount,X.shape[1]); + foreach (KeyValuePair> kv in dic) + { + var cls = kv.Value.ToArray(); + for (int i = 0; i < dic.Count; i++) + { + points_by_class[i] = dic[i]; + } } - // estimate mean and variance for each class / feature // shape : nb_classes * nb_features - + var cons = tf.constant(points_by_class); } } }