| @@ -36,8 +36,12 @@ namespace Tensorflow | |||||
| /// <param name="input"></param> | /// <param name="input"></param> | ||||
| /// <param name="axis"></param> | /// <param name="axis"></param> | ||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor reduce_sum(Tensor input, int[] axis = null, int? reduction_indices = null) | |||||
| => math_ops.reduce_sum(input); | |||||
| public static Tensor reduce_sum(Tensor input, int? axis = null, int? reduction_indices = null) | |||||
| { | |||||
| if(!axis.HasValue && reduction_indices.HasValue) | |||||
| return math_ops.reduce_sum(input, reduction_indices.Value); | |||||
| return math_ops.reduce_sum(input); | |||||
| } | |||||
| public static Tensor reduce_mean(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null, int? reduction_indices = null) | public static Tensor reduce_mean(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null, int? reduction_indices = null) | ||||
| => math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices); | => math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices); | ||||
| @@ -207,6 +207,13 @@ namespace Tensorflow | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor sum(Tensor input, int axis, bool keep_dims = false, string name = null) | |||||
| { | |||||
| var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims }); | |||||
| return _op.outputs[0]; | |||||
| } | |||||
| /// <summary> | /// <summary> | ||||
| /// Creates a sequence of numbers. | /// Creates a sequence of numbers. | ||||
| /// </summary> | /// </summary> | ||||
| @@ -209,6 +209,12 @@ namespace Tensorflow | |||||
| return _may_reduce_to_scalar(keepdims, m); | return _may_reduce_to_scalar(keepdims, m); | ||||
| } | } | ||||
| public static Tensor reduce_sum(Tensor input_tensor, int axis, bool keepdims = false) | |||||
| { | |||||
| var m = gen_math_ops.sum(input_tensor, axis); | |||||
| return _may_reduce_to_scalar(keepdims, m); | |||||
| } | |||||
| private static Tensor _may_reduce_to_scalar(bool keepdims, Tensor output) | private static Tensor _may_reduce_to_scalar(bool keepdims, Tensor output) | ||||
| { | { | ||||
| output.shape = new long[0]; | output.shape = new long[0]; | ||||
| @@ -233,7 +239,7 @@ namespace Tensorflow | |||||
| return range(0, rank, 1); | return range(0, rank, 1); | ||||
| } | } | ||||
| } | } | ||||
| private static Tensor _ReductionDims(Tensor x, int[] axis) | private static Tensor _ReductionDims(Tensor x, int[] axis) | ||||
| { | { | ||||
| if (axis != null) | if (axis != null) | ||||
| @@ -26,7 +26,7 @@ namespace TensorFlowNET.Examples | |||||
| private void PrepareData() | private void PrepareData() | ||||
| { | { | ||||
| var mnist = MnistDataSet.read_data_sets("logistic_regression", one_hot: true); | |||||
| //var mnist = MnistDataSet.read_data_sets("logistic_regression", one_hot: true); | |||||
| // tf Graph Input | // tf Graph Input | ||||
| var x = tf.placeholder(tf.float32, new TensorShape(-1, 784)); // mnist data image of shape 28*28=784 | var x = tf.placeholder(tf.float32, new TensorShape(-1, 784)); // mnist data image of shape 28*28=784 | ||||
| @@ -40,8 +40,11 @@ namespace TensorFlowNET.Examples | |||||
| var pred = tf.nn.softmax(tf.matmul(x, W) + b); // Softmax | var pred = tf.nn.softmax(tf.matmul(x, W) + b); // Softmax | ||||
| // Minimize error using cross entropy | // Minimize error using cross entropy | ||||
| var sum = -tf.reduce_sum(y * tf.log(pred), reduction_indices: 1); | |||||
| var cost = tf.reduce_mean(sum); | |||||
| var log = tf.log(pred); | |||||
| var mul = y * log; | |||||
| var sum = tf.reduce_sum(mul, reduction_indices: 1); | |||||
| var neg = -sum; | |||||
| var cost = tf.reduce_mean(neg); | |||||
| // Gradient Descent | // Gradient Descent | ||||
| var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost); | var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost); | ||||