| @@ -36,6 +36,16 @@ namespace Tensorflow | |||
| public static Tensor expand_dims(Tensor input, int axis = -1, string name = null, int dim = -1) | |||
| => array_ops.expand_dims(input, axis, name, dim); | |||
| /// <summary> | |||
| /// Creates a tensor filled with a scalar value. | |||
| /// </summary> | |||
| /// <param name="dims"></param> | |||
| /// <param name="value"></param> | |||
| /// <param name="name"></param> | |||
| /// <returns></returns> | |||
| public static Tensor fill<T>(Tensor dims, T value, string name = null) | |||
| => gen_array_ops.fill(dims, value, name: name); | |||
| /// <summary> | |||
| /// Return the elements, either from `x` or `y`, depending on the `condition`. | |||
| /// </summary> | |||
| @@ -6,7 +6,7 @@ namespace Tensorflow | |||
| { | |||
| public static partial class tf | |||
| { | |||
| public static object gradients(Tensor[] ys, | |||
| public static Tensor[] gradients(Tensor[] ys, | |||
| Tensor[] xs, | |||
| Tensor[] grad_ys = null, | |||
| string name = "gradients", | |||
| @@ -41,5 +41,23 @@ namespace Tensorflow | |||
| gate_gradients, | |||
| stop_gradients: stop_gradients); | |||
| } | |||
| public static Tensor[] gradients(Tensor ys, | |||
| Tensor xs, | |||
| Tensor[] grad_ys = null, | |||
| string name = "gradients", | |||
| bool colocate_gradients_with_ops = false, | |||
| bool gate_gradients = false, | |||
| int? aggregation_method = null, | |||
| Tensor[] stop_gradients = null) | |||
| { | |||
| return gradients_util._GradientsHelper(new Tensor[] { ys }, | |||
| new Tensor[] { xs }, | |||
| grad_ys, | |||
| name, | |||
| colocate_gradients_with_ops, | |||
| gate_gradients, | |||
| stop_gradients: stop_gradients); | |||
| } | |||
| } | |||
| } | |||
| @@ -257,6 +257,16 @@ namespace Tensorflow | |||
| public static Tensor negative(Tensor x, string name = null) | |||
| => gen_math_ops.neg(x, name); | |||
| /// <summary> | |||
| /// Divides x / y elementwise (using Python 2 division operator semantics). | |||
| /// </summary> | |||
| /// <param name="x"></param> | |||
| /// <param name="y"></param> | |||
| /// <param name="name"></param> | |||
| /// <returns></returns> | |||
| public static Tensor div(Tensor x, Tensor y, string name = null) | |||
| => math_ops.div(x, y, name: name); | |||
| public static Tensor divide<T>(Tensor x, T[] y, string name = null) where T : struct | |||
| => x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"); | |||
| @@ -168,6 +168,96 @@ namespace Tensorflow.Gradients | |||
| return new Tensor[] { math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), null }; | |||
| } | |||
| /// <summary> | |||
| /// Gradient for Max. | |||
| /// </summary> | |||
| /// <param name="op"></param> | |||
| /// <param name="grads"></param> | |||
| /// <returns></returns> | |||
| [RegisterGradient("Max")] | |||
| public static Tensor[] _MaxGrad(Operation op, Tensor[] grads) | |||
| { | |||
| return _MinOrMaxGrad(op, grads); | |||
| } | |||
| /// <summary> | |||
| /// Gradient for Min. | |||
| /// </summary> | |||
| /// <param name="op"></param> | |||
| /// <param name="grads"></param> | |||
| /// <returns></returns> | |||
| [RegisterGradient("Min")] | |||
| public static Tensor[] _MinGrad(Operation op, Tensor[] grads) | |||
| { | |||
| return _MinOrMaxGrad(op, grads); | |||
| } | |||
| private static Tensor[] _MinOrMaxGrad(Operation op, Tensor[] grads) | |||
| { | |||
| var grad = grads[0]; | |||
| var input_shape = array_ops.shape(op.inputs[0]); | |||
| var output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]); | |||
| var y = op.outputs[0]; | |||
| y = array_ops.reshape(y, output_shape_kept_dims); | |||
| grad = array_ops.reshape(grad, output_shape_kept_dims); | |||
| // Compute the number of selected (maximum or minimum) elements in each | |||
| // reduction dimension. If there are multiple minimum or maximum elements | |||
| // then the gradient will be divided between them. | |||
| var indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype); | |||
| var num_selected = array_ops.reshape(math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims); | |||
| return new Tensor[] { math_ops.div(indicators, num_selected) * grad, null }; | |||
| } | |||
| /// <summary> | |||
| /// Returns grad*(x > y, x <= y) with type of grad. | |||
| /// </summary> | |||
| /// <param name="op"></param> | |||
| /// <param name="grads"></param> | |||
| /// <returns></returns> | |||
| [RegisterGradient("Maximum")] | |||
| public static Tensor[] _MaximumGrad(Operation op, Tensor[] grads) | |||
| { | |||
| return _MaximumMinimumGrad(op, grads[0]); | |||
| } | |||
| /// <summary> | |||
| /// Returns grad*(x < y, x >= y) with type of grad. | |||
| /// </summary> | |||
| /// <param name="op"></param> | |||
| /// <param name="grads"></param> | |||
| /// <returns></returns> | |||
| [RegisterGradient("Minimum")] | |||
| public static Tensor[] _MinimumGrad(Operation op, Tensor[] grads) | |||
| { | |||
| return _MaximumMinimumGrad(op, grads[0]); | |||
| } | |||
| /// <summary> | |||
| /// Factor out the code for the gradient of Maximum or Minimum. | |||
| /// </summary> | |||
| /// <param name="op"></param> | |||
| /// <param name="grad"></param> | |||
| /// <returns></returns> | |||
| private static Tensor[] _MaximumMinimumGrad(Operation op, Tensor grad) | |||
| { | |||
| var x = op.inputs[0]; | |||
| var y = op.inputs[1]; | |||
| var gdtype = grad.dtype; | |||
| var sx = array_ops.shape(x); | |||
| var sy = array_ops.shape(y); | |||
| var gradshape = array_ops.shape(grad); | |||
| var zeros = array_ops.zeros(gradshape, gdtype); | |||
| var xmask = gen_math_ops.greater_equal(x, y); | |||
| var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); | |||
| var xgrad = array_ops.where(xmask, grad, zeros); | |||
| var ygrad = array_ops.where(xmask, zeros, grad); | |||
| var gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx); | |||
| var gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy); | |||
| return new Tensor[] { gx, gy }; | |||
| } | |||
| [RegisterGradient("Neg")] | |||
| public static Tensor[] _NegGrad(Operation op, Tensor[] grads) | |||
| { | |||
| @@ -36,6 +36,29 @@ namespace Tensorflow | |||
| }); | |||
| } | |||
| public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
| { | |||
| dtype = dtype.as_base_dtype(); | |||
| return with(ops.name_scope(name, "zeros", shape), scope => | |||
| { | |||
| name = scope; | |||
| switch (dtype) | |||
| { | |||
| case TF_DataType.TF_BOOL: | |||
| return gen_array_ops.fill(shape, tf.constant(false, dtype: dtype), name: name); | |||
| case TF_DataType.TF_DOUBLE: | |||
| return gen_array_ops.fill(shape, tf.constant(0.0D, dtype: dtype), name: name); | |||
| case TF_DataType.TF_FLOAT: | |||
| return gen_array_ops.fill(shape, tf.constant(0.0F, dtype: dtype), name: name); | |||
| case TF_DataType.TF_INT32: | |||
| return gen_array_ops.fill(shape, tf.constant(0, dtype: dtype), name: name); | |||
| default: | |||
| throw new TypeError("can't find type for zeros"); | |||
| } | |||
| }); | |||
| } | |||
| private static Tensor _constant_if_small(int value, Tensor shape) | |||
| { | |||
| return shape < 1000; | |||
| @@ -65,6 +65,31 @@ namespace Tensorflow | |||
| }); | |||
| } | |||
| /// <summary> | |||
| /// Divide two values using Python 2 semantics. Used for Tensor.__div__. | |||
| /// </summary> | |||
| /// <param name="x">`Tensor` numerator of real numeric type.</param> | |||
| /// <param name="y">`Tensor` denominator of real numeric type.</param> | |||
| /// <param name="name">A name for the operation</param> | |||
| /// <returns>`x / y` returns the quotient of x and y.</returns> | |||
| public static Tensor div(Tensor x, Tensor y, string name = null) | |||
| { | |||
| return with(ops.name_scope(name, "div", (x, y)), name_scope => | |||
| { | |||
| name = name_scope; | |||
| x = ops.convert_to_tensor(x, name: "x"); | |||
| y = ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name = "y"); | |||
| var x_dtype = x.dtype.as_base_dtype(); | |||
| var y_dtype = y.dtype.as_base_dtype(); | |||
| if (x_dtype != y_dtype) | |||
| throw new TypeError($"x and y must have the same dtype, got {x_dtype} != {y_dtype}"); | |||
| if (x_dtype.is_floating() || x_dtype.is_complex()) | |||
| return gen_math_ops.real_div(x, y, name: name); | |||
| else | |||
| return gen_math_ops.floor_div(x, y, name: name); | |||
| }); | |||
| } | |||
| /// <summary> | |||
| /// Returns 0 if the denominator is zero. | |||
| /// </summary> | |||