diff --git a/src/TensorFlowNET.Core/APIs/tf.queue.cs b/src/TensorFlowNET.Core/APIs/tf.queue.cs index a5dfac15..f81f5726 100644 --- a/src/TensorFlowNET.Core/APIs/tf.queue.cs +++ b/src/TensorFlowNET.Core/APIs/tf.queue.cs @@ -52,7 +52,6 @@ namespace Tensorflow => new PaddingFIFOQueue(capacity, new [] { dtype }, new[] { shape }, - new[] { name }, shared_name: shared_name, name: name); diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs index 53ee9699..7b5d2ea7 100644 --- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs @@ -47,6 +47,15 @@ namespace Tensorflow.Gradients return new Tensor[] { gen_nn_ops.relu_grad(grads[0], op.outputs[0]) }; } + [RegisterGradient("LeakyRelu")] + public static Tensor[] _LeakyReluGrad(Operation op, Tensor[] grads) + { + var grad = grads[0]; + var x = op.inputs[0]; + var alpha = (float)op.get_attr("alpha"); + return new Tensor[] { gen_nn_ops.leaky_relu_grad(grad, x, alpha: alpha)}; + } + /// /// The derivative of the softmax nonlinearity. /// diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs index c79f89b2..82085683 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs @@ -284,6 +284,18 @@ namespace Tensorflow.Operations }); return _op.outputs[0]; + } + + public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null) + { + var _op = _op_def_lib._apply_op_helper("LeakyReluGrad", name: name, args: new + { + gradients, + features, + alpha + }); + + return _op.output; } public static Tensor softmax(Tensor logits, string name = null)