diff --git a/src/TensorFlowNET.Core/Operations/gen_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ops.cs new file mode 100644 index 00000000..d3e120e9 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/gen_ops.cs @@ -0,0 +1,39071 @@ +using System; +using System.Linq; +using System.Collections.Generic; +using System.Text; + +namespace Tensorflow.Operations +{ + public class gen_ops + { + static readonly OpDefLibrary _op_def_lib; + static gen_ops() { _op_def_lib = new OpDefLibrary(); } + + /// + /// Raise a exception to abort the process when called. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Abort'. + /// + /// + /// A string which is the message associated with the exception. + /// + /// + /// + /// + /// Returns the description of the operation + /// + /// + /// If exit_without_error is true, the process will exit normally, + /// otherwise it will exit with a SIGABORT signal. + /// + /// Returns nothing but an exception. + /// + public static Operation abort (string error_msg = null, bool? exit_without_error = null, string name = "Abort") + { + var dict = new Dictionary(); + if (error_msg != null) + dict["error_msg"] = error_msg; + if (exit_without_error.HasValue) + dict["exit_without_error"] = exit_without_error.Value; + var op = _op_def_lib._apply_op_helper("Abort", name: name, keywords: dict); + return op; + } + + /// + /// Computes the absolute value of a tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Abs'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor x, this operation returns a tensor containing the absolute + /// value of each element in x. For example, if x is an input element and y is + /// an output element, this operation computes \\(y = |x|\\). + /// + public static Tensor abs (Tensor x, string name = "Abs") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Abs", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the element-wise sum of a list of tensors. + /// + /// + /// A list of Tensor objects, each with same shape and type. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AccumulateNV2'. + /// + /// + /// Optional argument + /// Shape of elements of inputs. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// tf.accumulate_n_v2 performs the same operation as tf.add_n, but does not + /// wait for all of its inputs to be ready before beginning to sum. This can + /// save memory if inputs are ready at different times, since minimum temporary + /// storage is proportional to the output size rather than the inputs size. + /// + /// Unlike the original accumulate_n, accumulate_n_v2 is differentiable. + /// + /// Returns a Tensor of same shape and type as the elements of inputs. + /// + public static Tensor accumulate_n_v2 (Tensor[] inputs, TensorShape shape, string name = "AccumulateNV2") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("AccumulateNV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies a gradient to a given accumulator. + /// + /// + /// The handle to a accumulator. + /// + /// + /// The local_step value at which the gradient was computed. + /// + /// + /// A tensor of the gradient to be accumulated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AccumulatorApplyGradient'. + /// + /// + /// Returns the description of the operation + /// + /// + /// Does not add if local_step is lesser than the accumulator's global_step. + /// + public static Operation accumulator_apply_gradient (Tensor handle, Tensor local_step, Tensor gradient, string name = "AccumulatorApplyGradient") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["local_step"] = local_step; + dict["gradient"] = gradient; + var op = _op_def_lib._apply_op_helper("AccumulatorApplyGradient", name: name, keywords: dict); + return op; + } + + /// + /// Returns the number of gradients aggregated in the given accumulators. + /// + /// + /// The handle to an accumulator. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AccumulatorNumAccumulated'. + /// + /// + /// The number of gradients aggregated in the given accumulator. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor accumulator_num_accumulated (Tensor handle, string name = "AccumulatorNumAccumulated") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("AccumulatorNumAccumulated", name: name, keywords: dict); + return op.output; + } + + /// + /// Updates the accumulator with a new value for global_step. + /// + /// + /// The handle to an accumulator. + /// + /// + /// The new global_step value to set. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AccumulatorSetGlobalStep'. + /// + /// + /// Returns the description of the operation + /// + /// + /// Logs warning if the accumulator's value is already higher than + /// new_global_step. + /// + public static Operation accumulator_set_global_step (Tensor handle, Tensor new_global_step, string name = "AccumulatorSetGlobalStep") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["new_global_step"] = new_global_step; + var op = _op_def_lib._apply_op_helper("AccumulatorSetGlobalStep", name: name, keywords: dict); + return op; + } + + /// + /// Extracts the average gradient in the given ConditionalAccumulator. + /// + /// + /// The handle to an accumulator. + /// + /// + /// Number of gradients required before we return an aggregate. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AccumulatorTakeGradient'. + /// + /// + /// Optional argument + /// The data type of accumulated gradients. Needs to correspond to the type + /// of the accumulator. + /// + /// + /// The average of the accumulated gradients. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The op blocks until sufficient (i.e., more than num_required) + /// gradients have been accumulated. If the accumulator has already + /// aggregated more than num_required gradients, it returns the average of + /// the accumulated gradients. Also automatically increments the recorded + /// global_step in the accumulator by 1, and resets the aggregate to 0. + /// + public static Tensor accumulator_take_gradient (Tensor handle, Tensor num_required, TF_DataType dtype, string name = "AccumulatorTakeGradient") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["num_required"] = num_required; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("AccumulatorTakeGradient", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes acos of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Acos'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor acos (Tensor x, string name = "Acos") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Acos", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes inverse hyperbolic cosine of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Acosh'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor acosh (Tensor x, string name = "Acosh") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Acosh", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns x + y element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Add'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Add supports broadcasting. AddN does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor add (Tensor x, Tensor y, string name = "Add") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Add", name: name, keywords: dict); + return op.output; + } + + /// + /// Add an N-minibatch SparseTensor to a SparseTensorsMap, return N handles. + /// + /// + /// 2-D. The indices of the minibatch SparseTensor. + /// sparse_indices[:, 0] must be ordered values in [0, N). + /// + /// + /// 1-D. The values of the minibatch SparseTensor. + /// + /// + /// 1-D. The shape of the minibatch SparseTensor. + /// The minibatch size N == sparse_shape[0]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AddManySparseToTensorsMap'. + /// + /// + /// The container name for the SparseTensorsMap created by this op. + /// + /// + /// The shared name for the SparseTensorsMap created by this op. + /// If blank, the new Operation's unique name is used. + /// + /// + /// 1-D. The handles of the SparseTensor now stored in the + /// SparseTensorsMap. Shape: [N]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// A SparseTensor of rank R is represented by three tensors: sparse_indices, + /// sparse_values, and sparse_shape, where + /// + /// + /// sparse_indices.shape[1] == sparse_shape.shape[0] == R + /// + /// + /// An N-minibatch of SparseTensor objects is represented as a SparseTensor + /// having a first sparse_indices column taking values between [0, N), where + /// the minibatch size N == sparse_shape[0]. + /// + /// The input SparseTensor must have rank R greater than 1, and the first + /// dimension is treated as the minibatch dimension. Elements of the SparseTensor + /// must be sorted in increasing order of this first dimension. The stored + /// SparseTensor objects pointed to by each row of the output sparse_handles + /// will have rank R-1. + /// + /// The SparseTensor values can then be read out as part of a minibatch by passing + /// the given keys as vector elements to TakeManySparseFromTensorsMap. To ensure + /// the correct SparseTensorsMap is accessed, ensure that the same + /// container and shared_name are passed to that Op. If no shared_name + /// is provided here, instead use the *name* of the Operation created by calling + /// AddManySparseToTensorsMap as the shared_name passed to + /// TakeManySparseFromTensorsMap. Ensure the Operations are colocated. + /// + public static Tensor add_many_sparse_to_tensors_map (Tensor sparse_indices, Tensor sparse_values, Tensor sparse_shape, string container = null, string shared_name = null, string name = "AddManySparseToTensorsMap") + { + var dict = new Dictionary(); + dict["sparse_indices"] = sparse_indices; + dict["sparse_values"] = sparse_values; + dict["sparse_shape"] = sparse_shape; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("AddManySparseToTensorsMap", name: name, keywords: dict); + return op.output; + } + + /// + /// Add all input tensors element wise. + /// + /// + /// Must all be the same size and shape. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AddN'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor add_n (Tensor[] inputs, string name = "AddN") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + var op = _op_def_lib._apply_op_helper("AddN", name: name, keywords: dict); + return op.output; + } + + /// + /// Add a SparseTensor to a SparseTensorsMap return its handle. + /// + /// + /// 2-D. The indices of the SparseTensor. + /// + /// + /// 1-D. The values of the SparseTensor. + /// + /// + /// 1-D. The shape of the SparseTensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AddSparseToTensorsMap'. + /// + /// + /// The container name for the SparseTensorsMap created by this op. + /// + /// + /// The shared name for the SparseTensorsMap created by this op. + /// If blank, the new Operation's unique name is used. + /// + /// + /// 0-D. The handle of the SparseTensor now stored in the + /// SparseTensorsMap. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// A SparseTensor is represented by three tensors: sparse_indices, + /// sparse_values, and sparse_shape. + /// + /// This operator takes the given SparseTensor and adds it to a container + /// object (a SparseTensorsMap). A unique key within this container is generated + /// in the form of an int64, and this is the value that is returned. + /// + /// The SparseTensor can then be read out as part of a minibatch by passing + /// the key as a vector element to TakeManySparseFromTensorsMap. To ensure + /// the correct SparseTensorsMap is accessed, ensure that the same + /// container and shared_name are passed to that Op. If no shared_name + /// is provided here, instead use the *name* of the Operation created by calling + /// AddSparseToTensorsMap as the shared_name passed to + /// TakeManySparseFromTensorsMap. Ensure the Operations are colocated. + /// + public static Tensor add_sparse_to_tensors_map (Tensor sparse_indices, Tensor sparse_values, Tensor sparse_shape, string container = null, string shared_name = null, string name = "AddSparseToTensorsMap") + { + var dict = new Dictionary(); + dict["sparse_indices"] = sparse_indices; + dict["sparse_values"] = sparse_values; + dict["sparse_shape"] = sparse_shape; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("AddSparseToTensorsMap", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns x + y element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AddV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Add supports broadcasting. AddN does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor add_v2 (Tensor x, Tensor y, string name = "AddV2") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("AddV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated. Disallowed in GraphDef version >= 2. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AdjustContrast'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor adjust_contrast (Tensor images, Tensor contrast_factor, Tensor min_value, Tensor max_value, string name = "AdjustContrast") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["contrast_factor"] = contrast_factor; + dict["min_value"] = min_value; + dict["max_value"] = max_value; + var op = _op_def_lib._apply_op_helper("AdjustContrast", name: name, keywords: dict); + return op.output; + } + + /// + /// Adjust the contrast of one or more images. + /// + /// + /// Images to adjust. At least 3-D. + /// + /// + /// A float multiplier for adjusting contrast. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AdjustContrastv2'. + /// + /// + /// The contrast-adjusted image or images. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// images is a tensor of at least 3 dimensions. The last 3 dimensions are + /// interpreted as [height, width, channels]. The other dimensions only + /// represent a collection of images, such as [batch, height, width, channels]. + /// + /// Contrast is adjusted independently for each channel of each image. + /// + /// For each channel, the Op first computes the mean of the image pixels in the + /// channel and then adjusts each component of each pixel to + /// (x - mean) * contrast_factor + mean. + /// + public static Tensor adjust_contrastv2 (Tensor images, Tensor contrast_factor, string name = "AdjustContrastv2") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["contrast_factor"] = contrast_factor; + var op = _op_def_lib._apply_op_helper("AdjustContrastv2", name: name, keywords: dict); + return op.output; + } + + /// + /// Adjust the hue of one or more images. + /// + /// + /// Images to adjust. At least 3-D. + /// + /// + /// A float delta to add to the hue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AdjustHue'. + /// + /// + /// The hue-adjusted image or images. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// images is a tensor of at least 3 dimensions. The last dimension is + /// interpretted as channels, and must be three. + /// + /// The input image is considered in the RGB colorspace. Conceptually, the RGB + /// colors are first mapped into HSV. A delta is then applied all the hue values, + /// and then remapped back to RGB colorspace. + /// + public static Tensor adjust_hue (Tensor images, Tensor delta, string name = "AdjustHue") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["delta"] = delta; + var op = _op_def_lib._apply_op_helper("AdjustHue", name: name, keywords: dict); + return op.output; + } + + /// + /// Adjust the saturation of one or more images. + /// + /// + /// Images to adjust. At least 3-D. + /// + /// + /// A float scale to add to the saturation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AdjustSaturation'. + /// + /// + /// The hue-adjusted image or images. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// images is a tensor of at least 3 dimensions. The last dimension is + /// interpretted as channels, and must be three. + /// + /// The input image is considered in the RGB colorspace. Conceptually, the RGB + /// colors are first mapped into HSV. A scale is then applied all the saturation + /// values, and then remapped back to RGB colorspace. + /// + public static Tensor adjust_saturation (Tensor images, Tensor scale, string name = "AdjustSaturation") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["scale"] = scale; + var op = _op_def_lib._apply_op_helper("AdjustSaturation", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the "logical and" of elements across dimensions of a tensor. + /// + /// + /// The tensor to reduce. + /// + /// + /// The dimensions to reduce. Must be in the range + /// [-rank(input), rank(input)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'All'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// The reduced tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Reduces input along the dimensions given in axis. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// axis. If keep_dims is true, the reduced dimensions are + /// retained with length 1. + /// + public static Tensor all (Tensor input, Tensor reduction_indices, bool? keep_dims = null, string name = "All") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["reduction_indices"] = reduction_indices; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("All", name: name, keywords: dict); + return op.output; + } + + /// + /// Generates labels for candidate sampling with a learned unigram distribution. + /// + /// + /// A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AllCandidateSampler'. + /// + /// + /// Optional argument + /// Number of true labels per context. + /// + /// + /// Optional argument + /// Number of candidates to produce. + /// + /// + /// Optional argument + /// If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sampled_candidates : A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// true_expected_count : A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// sampled_expected_count : A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + public static (Tensor sampled_candidates, Tensor true_expected_count, Tensor sampled_expected_count) all_candidate_sampler (Tensor true_classes, int num_true, int num_sampled, bool unique, int? seed = null, int? seed2 = null, string name = "AllCandidateSampler") + { + var dict = new Dictionary(); + dict["true_classes"] = true_classes; + dict["num_true"] = num_true; + dict["num_sampled"] = num_sampled; + dict["unique"] = unique; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("AllCandidateSampler", name: name, keywords: dict); + int _idx = 0; + var sampled_candidates = op.outputs[_idx++]; + var true_expected_count = op.outputs[_idx++]; + var sampled_expected_count = op.outputs[_idx++]; + return (sampled_candidates, true_expected_count, sampled_expected_count); + } + + /// + /// Returns the argument of a complex number. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Angle'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor input of complex numbers, this operation returns a tensor of + /// type float that is the argument of each element in input. All elements in + /// input must be complex numbers of the form \\(a + bj\\), where *a* + /// is the real part and *b* is the imaginary part. + /// + /// The argument returned by this operation is of the form \\(atan2(b, a)\\). + /// + /// For example: + /// + /// + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.angle(input) ==> [2.0132, 1.056] + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.angle. + /// @end_compatibility + /// + public static Tensor angle (Tensor input, TF_DataType? Tout = null, string name = "Angle") + { + var dict = new Dictionary(); + dict["input"] = input; + if (Tout.HasValue) + dict["Tout"] = Tout.Value; + var op = _op_def_lib._apply_op_helper("Angle", name: name, keywords: dict); + return op.output; + } + + /// + /// A container for an iterator resource. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AnonymousIterator'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// A handle to the iterator that can be passed to a "MakeIterator" or + /// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents + /// resource sharing by name, and does not keep a reference to the resource + /// container. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor anonymous_iterator (TF_DataType[] output_types, TensorShape[] output_shapes, string name = "AnonymousIterator") + { + var dict = new Dictionary(); + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("AnonymousIterator", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the "logical or" of elements across dimensions of a tensor. + /// + /// + /// The tensor to reduce. + /// + /// + /// The dimensions to reduce. Must be in the range + /// [-rank(input), rank(input)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Any'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// The reduced tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Reduces input along the dimensions given in axis. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// axis. If keep_dims is true, the reduced dimensions are + /// retained with length 1. + /// + public static Tensor any (Tensor input, Tensor reduction_indices, bool? keep_dims = null, string name = "Any") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["reduction_indices"] = reduction_indices; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("Any", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the AdaMax algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Must be a scalar. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Momentum factor. Must be a scalar. + /// + /// + /// Momentum factor. Must be a scalar. + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyAdaMax'. + /// + /// + /// If True, updating of the var, m, and v tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// v_t <- max(beta2 * v_{t-1}, abs(g)) + /// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) + /// + public static Tensor apply_ada_max (Tensor var, Tensor m, Tensor v, Tensor beta1_power, Tensor lr, Tensor beta1, Tensor beta2, Tensor epsilon, Tensor grad, bool? use_locking = null, string name = "ApplyAdaMax") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["m"] = m; + dict["v"] = v; + dict["beta1_power"] = beta1_power; + dict["lr"] = lr; + dict["beta1"] = beta1; + dict["beta2"] = beta2; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyAdaMax", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the adadelta scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay factor. Must be a scalar. + /// + /// + /// Constant factor. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyAdadelta'. + /// + /// + /// If True, updating of the var, accum and update_accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// accum = rho() * accum + (1 - rho()) * grad.square(); + /// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + /// update_accum = rho() * update_accum + (1 - rho()) * update.square(); + /// var -= update; + /// + public static Tensor apply_adadelta (Tensor var, Tensor accum, Tensor accum_update, Tensor lr, Tensor rho, Tensor epsilon, Tensor grad, bool? use_locking = null, string name = "ApplyAdadelta") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["accum_update"] = accum_update; + dict["lr"] = lr; + dict["rho"] = rho; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyAdadelta", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the adagrad scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyAdagrad'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// accum += grad * grad + /// var -= lr * grad * (1 / sqrt(accum)) + /// + public static Tensor apply_adagrad (Tensor var, Tensor accum, Tensor lr, Tensor grad, bool? use_locking = null, bool? update_slots = null, string name = "ApplyAdagrad") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (update_slots.HasValue) + dict["update_slots"] = update_slots.Value; + var op = _op_def_lib._apply_op_helper("ApplyAdagrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the proximal adagrad scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// Training step number. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyAdagradDA'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor apply_adagrad_d_a (Tensor var, Tensor gradient_accumulator, Tensor gradient_squared_accumulator, Tensor grad, Tensor lr, Tensor l1, Tensor l2, Tensor global_step, bool? use_locking = null, string name = "ApplyAdagradDA") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["gradient_accumulator"] = gradient_accumulator; + dict["gradient_squared_accumulator"] = gradient_squared_accumulator; + dict["grad"] = grad; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["global_step"] = global_step; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyAdagradDA", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the Adam algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Momentum factor. Must be a scalar. + /// + /// + /// Momentum factor. Must be a scalar. + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyAdam'. + /// + /// + /// If True, updating of the var, m, and v tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// If True, uses the nesterov update. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ + /// $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ + /// $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ + /// $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ + /// + public static Tensor apply_adam (Tensor var, Tensor m, Tensor v, Tensor beta1_power, Tensor beta2_power, Tensor lr, Tensor beta1, Tensor beta2, Tensor epsilon, Tensor grad, bool? use_locking = null, bool? use_nesterov = null, string name = "ApplyAdam") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["m"] = m; + dict["v"] = v; + dict["beta1_power"] = beta1_power; + dict["beta2_power"] = beta2_power; + dict["lr"] = lr; + dict["beta1"] = beta1; + dict["beta2"] = beta2; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (use_nesterov.HasValue) + dict["use_nesterov"] = use_nesterov.Value; + var op = _op_def_lib._apply_op_helper("ApplyAdam", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the AddSign update. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyAddSign'. + /// + /// + /// If True, updating of the var and m tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// update <- (alpha + sign_decay * sign(g) *sign(m)) * g + /// variable <- variable - lr_t * update + /// + public static Tensor apply_add_sign (Tensor var, Tensor m, Tensor lr, Tensor alpha, Tensor sign_decay, Tensor beta, Tensor grad, bool? use_locking = null, string name = "ApplyAddSign") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["m"] = m; + dict["lr"] = lr; + dict["alpha"] = alpha; + dict["sign_decay"] = sign_decay; + dict["beta"] = beta; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyAddSign", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the centered RMSProp algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay rate. Must be a scalar. + /// + /// + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyCenteredRMSProp'. + /// + /// + /// If True, updating of the var, mg, ms, and mom tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The centered RMSProp algorithm uses an estimate of the centered second moment + /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which + /// uses the (uncentered) second moment. This often helps with training, but is + /// slightly more expensive in terms of computation and memory. + /// + /// Note that in dense implementation of this algorithm, mg, ms, and mom will + /// update even if the grad is zero, but in this sparse implementation, mg, ms, + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// mean_grad = decay * mean_grad + (1-decay) * gradient + /// + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + /// + /// mg <- rho * mg_{t-1} + (1-rho) * grad + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + /// var <- var - mom + /// + public static Tensor apply_centered_r_m_s_prop (Tensor var, Tensor mg, Tensor ms, Tensor mom, Tensor lr, Tensor rho, Tensor momentum, Tensor epsilon, Tensor grad, bool? use_locking = null, string name = "ApplyCenteredRMSProp") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["mg"] = mg; + dict["ms"] = ms; + dict["mom"] = mom; + dict["lr"] = lr; + dict["rho"] = rho; + dict["momentum"] = momentum; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyCenteredRMSProp", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the Ftrl-proximal scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regulariation. Must be a scalar. + /// + /// + /// L2 regulariation. Must be a scalar. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyFtrl'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// accum_new = accum + grad * grad + /// linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + public static Tensor apply_ftrl (Tensor var, Tensor accum, Tensor linear, Tensor grad, Tensor lr, Tensor l1, Tensor l2, Tensor lr_power, bool? use_locking = null, string name = "ApplyFtrl") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["linear"] = linear; + dict["grad"] = grad; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["lr_power"] = lr_power; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyFtrl", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the Ftrl-proximal scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regulariation. Must be a scalar. + /// + /// + /// L2 shrinkage regulariation. Must be a scalar. + /// + /// + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyFtrlV2'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var + /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + /// linear += grad_with_shrinkage + + /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + public static Tensor apply_ftrl_v2 (Tensor var, Tensor accum, Tensor linear, Tensor grad, Tensor lr, Tensor l1, Tensor l2, Tensor l2_shrinkage, Tensor lr_power, bool? use_locking = null, string name = "ApplyFtrlV2") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["linear"] = linear; + dict["grad"] = grad; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["l2_shrinkage"] = l2_shrinkage; + dict["lr_power"] = lr_power; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyFtrlV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' by subtracting 'alpha' * 'delta' from it. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// The change. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyGradientDescent'. + /// + /// + /// If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor apply_gradient_descent (Tensor var, Tensor alpha, Tensor delta, bool? use_locking = null, string name = "ApplyGradientDescent") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["alpha"] = alpha; + dict["delta"] = delta; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyGradientDescent", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the momentum scheme. Set use_nesterov = True if you + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// Momentum. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyMomentum'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// If True, the tensor passed to compute grad will be + /// var - lr * momentum * accum, so in the end, the var you get is actually + /// var - lr * momentum * accum. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// want to use Nesterov momentum. + /// + /// accum = accum * momentum + grad + /// var -= lr * accum + /// + public static Tensor apply_momentum (Tensor var, Tensor accum, Tensor lr, Tensor grad, Tensor momentum, bool? use_locking = null, bool? use_nesterov = null, string name = "ApplyMomentum") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["grad"] = grad; + dict["momentum"] = momentum; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (use_nesterov.HasValue) + dict["use_nesterov"] = use_nesterov.Value; + var op = _op_def_lib._apply_op_helper("ApplyMomentum", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the AddSign update. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyPowerSign'. + /// + /// + /// If True, updating of the var and m tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + /// variable <- variable - lr_t * update + /// + public static Tensor apply_power_sign (Tensor var, Tensor m, Tensor lr, Tensor logbase, Tensor sign_decay, Tensor beta, Tensor grad, bool? use_locking = null, string name = "ApplyPowerSign") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["m"] = m; + dict["lr"] = lr; + dict["logbase"] = logbase; + dict["sign_decay"] = sign_decay; + dict["beta"] = beta; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyPowerSign", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyProximalAdagrad'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// accum += grad * grad + /// prox_v = var - lr * grad * (1 / sqrt(accum)) + /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} + /// + public static Tensor apply_proximal_adagrad (Tensor var, Tensor accum, Tensor lr, Tensor l1, Tensor l2, Tensor grad, bool? use_locking = null, string name = "ApplyProximalAdagrad") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyProximalAdagrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' as FOBOS algorithm with fixed learning rate. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// The change. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyProximalGradientDescent'. + /// + /// + /// If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// prox_v = var - alpha * delta + /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} + /// + public static Tensor apply_proximal_gradient_descent (Tensor var, Tensor alpha, Tensor l1, Tensor l2, Tensor delta, bool? use_locking = null, string name = "ApplyProximalGradientDescent") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["alpha"] = alpha; + dict["l1"] = l1; + dict["l2"] = l2; + dict["delta"] = delta; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyProximalGradientDescent", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the RMSProp algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay rate. Must be a scalar. + /// + /// + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApplyRMSProp'. + /// + /// + /// If True, updating of the var, ms, and mom tensors is protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Note that in dense implementation of this algorithm, ms and mom will + /// update even if the grad is zero, but in this sparse implementation, ms + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + /// + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + /// var <- var - mom + /// + public static Tensor apply_r_m_s_prop (Tensor var, Tensor ms, Tensor mom, Tensor lr, Tensor rho, Tensor momentum, Tensor epsilon, Tensor grad, bool? use_locking = null, string name = "ApplyRMSProp") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["ms"] = ms; + dict["mom"] = mom; + dict["lr"] = lr; + dict["rho"] = rho; + dict["momentum"] = momentum; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ApplyRMSProp", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of abs(x-y) < tolerance element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ApproximateEqual'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor approximate_equal (Tensor x, Tensor y, float? tolerance = null, string name = "ApproximateEqual") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + if (tolerance.HasValue) + dict["tolerance"] = tolerance.Value; + var op = _op_def_lib._apply_op_helper("ApproximateEqual", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the index with the largest value across dimensions of a tensor. + /// + /// + /// + /// + /// int32 or int64, must be in the range [-rank(input), rank(input)). + /// Describes which dimension of the input Tensor to reduce across. For vectors, + /// use dimension = 0. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ArgMax'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Note that in case of ties the identity of the return value is not guaranteed. + /// + public static Tensor arg_max (Tensor input, Tensor dimension, TF_DataType? output_type = null, string name = "ArgMax") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["dimension"] = dimension; + if (output_type.HasValue) + dict["output_type"] = output_type.Value; + var op = _op_def_lib._apply_op_helper("ArgMax", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the index with the smallest value across dimensions of a tensor. + /// + /// + /// + /// + /// int32 or int64, must be in the range [-rank(input), rank(input)). + /// Describes which dimension of the input Tensor to reduce across. For vectors, + /// use dimension = 0. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ArgMin'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Note that in case of ties the identity of the return value is not guaranteed. + /// + public static Tensor arg_min (Tensor input, Tensor dimension, TF_DataType? output_type = null, string name = "ArgMin") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["dimension"] = dimension; + if (output_type.HasValue) + dict["output_type"] = output_type.Value; + var op = _op_def_lib._apply_op_helper("ArgMin", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts each entry in the given tensor to strings. Supports many numeric + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AsString'. + /// + /// + /// The post-decimal precision to use for floating point numbers. + /// Only used if precision > -1. + /// + /// + /// Use scientific notation for floating point numbers. + /// + /// + /// Use shortest representation (either scientific or standard) for + /// floating point numbers. + /// + /// + /// Pad pre-decimal numbers to this width. + /// Applies to both floating point and integer numbers. + /// Only used if width > -1. + /// + /// + /// The value to pad if width > -1. If empty, pads with spaces. + /// Another typical value is '0'. String cannot be longer than 1 character. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// types and boolean. + /// + public static Tensor as_string (Tensor input, int? precision = null, bool? scientific = null, bool? shortest = null, int? width = null, string fill = null, string name = "AsString") + { + var dict = new Dictionary(); + dict["input"] = input; + if (precision.HasValue) + dict["precision"] = precision.Value; + if (scientific.HasValue) + dict["scientific"] = scientific.Value; + if (shortest.HasValue) + dict["shortest"] = shortest.Value; + if (width.HasValue) + dict["width"] = width.Value; + if (fill != null) + dict["fill"] = fill; + var op = _op_def_lib._apply_op_helper("AsString", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes asin of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Asin'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor asin (Tensor x, string name = "Asin") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Asin", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes inverse hyperbolic sine of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Asinh'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor asinh (Tensor x, string name = "Asinh") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Asinh", name: name, keywords: dict); + return op.output; + } + + /// + /// Asserts that the given condition is true. + /// + /// + /// The condition to evaluate. + /// + /// + /// The tensors to print out when condition is false. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Assert'. + /// + /// + /// Print this many entries of each tensor. + /// + /// + /// Returns the description of the operation + /// + /// + /// If condition evaluates to false, print the list of tensors in data. + /// summarize determines how many entries of the tensors to print. + /// + public static Operation assert (Tensor condition, Tensor[] data, int? summarize = null, string name = "Assert") + { + var dict = new Dictionary(); + dict["condition"] = condition; + dict["data"] = data; + if (summarize.HasValue) + dict["summarize"] = summarize.Value; + var op = _op_def_lib._apply_op_helper("Assert", name: name, keywords: dict); + return op; + } + + /// + /// Update 'ref' by assigning 'value' to it. + /// + /// + /// Should be from a Variable node. May be uninitialized. + /// + /// + /// The value to be assigned to the variable. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Assign'. + /// + /// + /// If true, the operation will validate that the shape + /// of 'value' matches the shape of the Tensor being assigned to. If false, + /// 'ref' will take on the shape of 'value'. + /// + /// + /// If True, the assignment will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as "ref". Returned as a convenience for operations that want + /// to use the new value after the variable has been reset. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation outputs "ref" after the assignment is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + public static Tensor assign (Tensor referecne, Tensor value, bool? validate_shape = null, bool? use_locking = null, string name = "Assign") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["value"] = value; + if (validate_shape.HasValue) + dict["validate_shape"] = validate_shape.Value; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("Assign", name: name, keywords: dict); + return op.output; + } + + /// + /// Update 'ref' by adding 'value' to it. + /// + /// + /// Should be from a Variable node. + /// + /// + /// The value to be added to the variable. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AssignAdd'. + /// + /// + /// If True, the addition will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as "ref". Returned as a convenience for operations that want + /// to use the new value after the variable has been updated. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation outputs "ref" after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + public static Tensor assign_add (Tensor referecne, Tensor value, bool? use_locking = null, string name = "AssignAdd") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["value"] = value; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("AssignAdd", name: name, keywords: dict); + return op.output; + } + + /// + /// Adds a value to the current value of a variable. + /// + /// + /// handle to the resource in which to store the variable. + /// + /// + /// the value by which the variable will be incremented. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AssignAddVariableOp'. + /// + /// + /// Returns the description of the operation + /// + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to + /// see the incremented value or a subsequent newer one. + /// + public static Operation assign_add_variable_op (Tensor resource, Tensor value, string name = "AssignAddVariableOp") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["value"] = value; + var op = _op_def_lib._apply_op_helper("AssignAddVariableOp", name: name, keywords: dict); + return op; + } + + /// + /// Update 'ref' by subtracting 'value' from it. + /// + /// + /// Should be from a Variable node. + /// + /// + /// The value to be subtracted to the variable. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AssignSub'. + /// + /// + /// If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as "ref". Returned as a convenience for operations that want + /// to use the new value after the variable has been updated. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation outputs "ref" after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + public static Tensor assign_sub (Tensor referecne, Tensor value, bool? use_locking = null, string name = "AssignSub") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["value"] = value; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("AssignSub", name: name, keywords: dict); + return op.output; + } + + /// + /// Subtracts a value from the current value of a variable. + /// + /// + /// handle to the resource in which to store the variable. + /// + /// + /// the value by which the variable will be incremented. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AssignSubVariableOp'. + /// + /// + /// Returns the description of the operation + /// + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to + /// see the decremented value or a subsequent newer one. + /// + public static Operation assign_sub_variable_op (Tensor resource, Tensor value, string name = "AssignSubVariableOp") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["value"] = value; + var op = _op_def_lib._apply_op_helper("AssignSubVariableOp", name: name, keywords: dict); + return op; + } + + /// + /// Assigns a new value to a variable. + /// + /// + /// handle to the resource in which to store the variable. + /// + /// + /// the value to set the new tensor to use. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AssignVariableOp'. + /// + /// + /// Returns the description of the operation + /// + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to return + /// this value or a subsequent newer value of the variable. + /// + public static Operation assign_variable_op (Tensor resource, Tensor value, string name = "AssignVariableOp") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["value"] = value; + var op = _op_def_lib._apply_op_helper("AssignVariableOp", name: name, keywords: dict); + return op; + } + + /// + /// Computes atan of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Atan'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor atan (Tensor x, string name = "Atan") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Atan", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes arctangent of y/x element-wise, respecting signs of the arguments. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Atan2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is the angle \( \theta \in [-\pi, \pi] \) such that + /// \[ x = r \cos(\theta) \] + /// and + /// \[ y = r \sin(\theta) \] + /// where \(r = \sqrt(x^2 + y^2) \). + /// + public static Tensor atan2 (Tensor y, Tensor x, string name = "Atan2") + { + var dict = new Dictionary(); + dict["y"] = y; + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Atan2", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes inverse hyperbolic tangent of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Atanh'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor atanh (Tensor x, string name = "Atanh") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Atanh", name: name, keywords: dict); + return op.output; + } + + /// + /// Produces a visualization of audio data over time. + /// + /// + /// Float representation of audio data. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AudioSpectrogram'. + /// + /// + /// Optional argument + /// How wide the input window is in samples. For the highest efficiency + /// this should be a power of two, but other values are accepted. + /// + /// + /// Optional argument + /// How widely apart the center of adjacent sample windows should be. + /// + /// + /// Whether to return the squared magnitude or just the + /// magnitude. Using squared magnitude can avoid extra calculations. + /// + /// + /// 3D representation of the audio frequencies as an image. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Spectrograms are a standard way of representing audio information as a series of + /// slices of frequency information, one slice for each window of time. By joining + /// these together into a sequence, they form a distinctive fingerprint of the sound + /// over time. + /// + /// This op expects to receive audio data as an input, stored as floats in the range + /// -1 to 1, together with a window width in samples, and a stride specifying how + /// far to move the window between slices. From this it generates a three + /// dimensional output. The lowest dimension has an amplitude value for each + /// frequency during that time slice. The next dimension is time, with successive + /// frequency slices. The final dimension is for the channels in the input, so a + /// stereo audio input would have two here for example. + /// + /// This means the layout when converted and saved as an image is rotated 90 degrees + /// clockwise from a typical spectrogram. Time is descending down the Y axis, and + /// the frequency decreases from left to right. + /// + /// Each value in the result represents the square root of the sum of the real and + /// imaginary parts of an FFT on the current window of samples. In this way, the + /// lowest dimension represents the power of each frequency in the current window, + /// and adjacent windows are concatenated in the next dimension. + /// + /// To get a more intuitive and visual look at what this operation does, you can run + /// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the + /// resulting spectrogram as a PNG image. + /// + public static Tensor audio_spectrogram (Tensor input, int window_size, int stride, bool? magnitude_squared = null, string name = "AudioSpectrogram") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["window_size"] = window_size; + dict["stride"] = stride; + if (magnitude_squared.HasValue) + dict["magnitude_squared"] = magnitude_squared.Value; + var op = _op_def_lib._apply_op_helper("AudioSpectrogram", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs a Summary protocol buffer with audio. + /// + /// + /// Scalar. Used to build the tag attribute of the summary values. + /// + /// + /// 2-D of shape [batch_size, frames]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AudioSummary'. + /// + /// + /// Optional argument + /// The sample rate of the signal in hertz. + /// + /// + /// Max number of batch elements to generate audio for. + /// + /// + /// Scalar. Serialized Summary protocol buffer. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The summary has up to max_outputs summary values containing audio. The + /// audio is built from tensor which must be 3-D with shape [batch_size, + /// frames, channels] or 2-D with shape [batch_size, frames]. The values are + /// assumed to be in the range of [-1.0, 1.0] with a sample rate of sample_rate. + /// + /// The tag argument is a scalar Tensor of type string. It is used to + /// build the tag of the summary values: + /// + /// * If max_outputs is 1, the summary value tag is '*tag*/audio'. + /// * If max_outputs is greater than 1, the summary value tags are + /// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. + /// + public static Tensor audio_summary (Tensor tag, Tensor tensor, float sample_rate, int? max_outputs = null, string name = "AudioSummary") + { + var dict = new Dictionary(); + dict["tag"] = tag; + dict["tensor"] = tensor; + dict["sample_rate"] = sample_rate; + if (max_outputs.HasValue) + dict["max_outputs"] = max_outputs.Value; + var op = _op_def_lib._apply_op_helper("AudioSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs a Summary protocol buffer with audio. + /// + /// + /// Scalar. Used to build the tag attribute of the summary values. + /// + /// + /// 2-D of shape [batch_size, frames]. + /// + /// + /// The sample rate of the signal in hertz. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AudioSummaryV2'. + /// + /// + /// Max number of batch elements to generate audio for. + /// + /// + /// Scalar. Serialized Summary protocol buffer. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The summary has up to max_outputs summary values containing audio. The + /// audio is built from tensor which must be 3-D with shape [batch_size, + /// frames, channels] or 2-D with shape [batch_size, frames]. The values are + /// assumed to be in the range of [-1.0, 1.0] with a sample rate of sample_rate. + /// + /// The tag argument is a scalar Tensor of type string. It is used to + /// build the tag of the summary values: + /// + /// * If max_outputs is 1, the summary value tag is '*tag*/audio'. + /// * If max_outputs is greater than 1, the summary value tags are + /// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. + /// + public static Tensor audio_summary_v2 (Tensor tag, Tensor tensor, Tensor sample_rate, int? max_outputs = null, string name = "AudioSummaryV2") + { + var dict = new Dictionary(); + dict["tag"] = tag; + dict["tensor"] = tensor; + dict["sample_rate"] = sample_rate; + if (max_outputs.HasValue) + dict["max_outputs"] = max_outputs.Value; + var op = _op_def_lib._apply_op_helper("AudioSummaryV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs average pooling on the input. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AvgPool'. + /// + /// + /// Optional argument + /// The size of the sliding window for each dimension of value. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of value. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// The average pooled output tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Each entry in output is the mean of the corresponding size ksize + /// window in value. + /// + public static Tensor avg_pool (Tensor value, int[] ksize, int[] strides, string padding, string data_format = null, string name = "AvgPool") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("AvgPool", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs 3D average pooling on the input. + /// + /// + /// Shape [batch, depth, rows, cols, channels] tensor to pool over. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AvgPool3D'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have ksize[0] = ksize[4] = 1. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// The average pooled output tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor avg_pool3d (Tensor input, int[] ksize, int[] strides, string padding, string data_format = null, string name = "AvgPool3D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("AvgPool3D", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients of average pooling function. + /// + /// + /// The original input dimensions. + /// + /// + /// Output backprop of shape [batch, depth, rows, cols, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AvgPool3DGrad'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have ksize[0] = ksize[4] = 1. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// The backprop for input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor avg_pool3d_grad (Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = null, string name = "AvgPool3DGrad") + { + var dict = new Dictionary(); + dict["orig_input_shape"] = orig_input_shape; + dict["grad"] = grad; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("AvgPool3DGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients of the average pooling function. + /// + /// + /// 1-D. Shape of the original input to avg_pool. + /// + /// + /// 4-D with shape [batch, height, width, channels]. Gradients w.r.t. + /// the output of avg_pool. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'AvgPoolGrad'. + /// + /// + /// Optional argument + /// The size of the sliding window for each dimension of the input. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// 4-D. Gradients w.r.t. the input of avg_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor avg_pool_grad (Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = null, string name = "AvgPoolGrad") + { + var dict = new Dictionary(); + dict["orig_input_shape"] = orig_input_shape; + dict["grad"] = grad; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("AvgPoolGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Defines a barrier that persists across different graph executions. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Barrier'. + /// + /// + /// Optional argument + /// The type of each component in a value. + /// + /// + /// The shape of each component in a value. Each shape must be 1 in the + /// first dimension. The length of this attr must be the same as the length of + /// component_types. + /// + /// + /// The capacity of the barrier. The default capacity is MAX_INT32, + /// which is the largest capacity of the underlying queue. + /// + /// + /// If non-empty, this barrier is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this barrier will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the barrier. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// A barrier represents a key-value map, where each key is a string, and + /// each value is a tuple of tensors. + /// + /// At runtime, the barrier contains 'complete' and 'incomplete' + /// elements. A complete element has defined tensors for all components of + /// its value tuple, and may be accessed using BarrierTakeMany. An + /// incomplete element has some undefined components in its value tuple, + /// and may be updated using BarrierInsertMany. + /// + public static Tensor barrier (TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "Barrier") + { + var dict = new Dictionary(); + dict["component_types"] = component_types; + if (shapes != null) + dict["shapes"] = shapes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("Barrier", name: name, keywords: dict); + return op.output; + } + + /// + /// Closes the given barrier. + /// + /// + /// The handle to a barrier. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BarrierClose'. + /// + /// + /// If true, all pending enqueue requests that are + /// blocked on the barrier's queue will be canceled. InsertMany will fail, even + /// if no new key is introduced. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation signals that no more new elements will be inserted in the + /// given barrier. Subsequent InsertMany that try to introduce a new key will fail. + /// Subsequent InsertMany operations that just add missing components to already + /// existing elements will continue to succeed. Subsequent TakeMany operations will + /// continue to succeed if sufficient completed elements remain in the barrier. + /// Subsequent TakeMany operations that would block will fail immediately. + /// + public static Operation barrier_close (Tensor handle, bool? cancel_pending_enqueues = null, string name = "BarrierClose") + { + var dict = new Dictionary(); + dict["handle"] = handle; + if (cancel_pending_enqueues.HasValue) + dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value; + var op = _op_def_lib._apply_op_helper("BarrierClose", name: name, keywords: dict); + return op; + } + + /// + /// Computes the number of incomplete elements in the given barrier. + /// + /// + /// The handle to a barrier. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BarrierIncompleteSize'. + /// + /// + /// The number of incomplete elements (i.e. those with some of their value + /// components not set) in the barrier. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor barrier_incomplete_size (Tensor handle, string name = "BarrierIncompleteSize") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("BarrierIncompleteSize", name: name, keywords: dict); + return op.output; + } + + /// + /// For each key, assigns the respective value to the specified component. + /// + /// + /// The handle to a barrier. + /// + /// + /// A one-dimensional tensor of keys, with length n. + /// + /// + /// An any-dimensional tensor of values, which are associated with the + /// respective keys. The 0th dimension must have length n. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BarrierInsertMany'. + /// + /// + /// Optional argument + /// The component of the barrier elements that is being assigned. + /// + /// + /// Returns the description of the operation + /// + /// + /// If a key is not found in the barrier, this operation will create a new + /// incomplete element. If a key is found in the barrier, and the element + /// already has a value at component_index, this operation will fail with + /// INVALID_ARGUMENT, and leave the barrier in an undefined state. + /// + public static Operation barrier_insert_many (Tensor handle, Tensor keys, Tensor values, int component_index, string name = "BarrierInsertMany") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["keys"] = keys; + dict["values"] = values; + dict["component_index"] = component_index; + var op = _op_def_lib._apply_op_helper("BarrierInsertMany", name: name, keywords: dict); + return op; + } + + /// + /// Computes the number of complete elements in the given barrier. + /// + /// + /// The handle to a barrier. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BarrierReadySize'. + /// + /// + /// The number of complete elements (i.e. those with all of their value + /// components set) in the barrier. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor barrier_ready_size (Tensor handle, string name = "BarrierReadySize") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("BarrierReadySize", name: name, keywords: dict); + return op.output; + } + + /// + /// Takes the given number of completed elements from a barrier. + /// + /// + /// The handle to a barrier. + /// + /// + /// A single-element tensor containing the number of elements to + /// take. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BarrierTakeMany'. + /// + /// + /// Optional argument + /// The type of each component in a value. + /// + /// + /// Allow to return less than num_elements items if barrier is + /// already closed. + /// + /// + /// + /// + /// If the queue is empty, this operation will block for up to + /// timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// indices : A one-dimensional tensor of indices, with length num_elems. + /// These indices refer to the batch in which the values were placed into the + /// barrier (starting with MIN_LONG and increasing with each BarrierInsertMany). + /// keys : A one-dimensional tensor of keys, with length num_elements. + /// values : One any-dimensional tensor per component in a barrier element. All + /// values have length num_elements in the 0th dimension. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This operation concatenates completed-element component tensors along + /// the 0th dimension to make a single component tensor. + /// + /// Elements come out of the barrier when they are complete, and in the order + /// in which they were placed into the barrier. The indices output provides + /// information about the batch in which each element was originally inserted + /// into the barrier. + /// + public static (Tensor indices, Tensor keys, Tensor[] values) barrier_take_many (Tensor handle, Tensor num_elements, TF_DataType[] component_types, bool? allow_small_batch = null, bool? wait_for_incomplete = null, int? timeout_ms = null, string name = "BarrierTakeMany") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["num_elements"] = num_elements; + dict["component_types"] = component_types; + if (allow_small_batch.HasValue) + dict["allow_small_batch"] = allow_small_batch.Value; + if (wait_for_incomplete.HasValue) + dict["wait_for_incomplete"] = wait_for_incomplete.Value; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("BarrierTakeMany", name: name, keywords: dict); + int _idx = 0; + var indices = op.outputs[_idx++]; + var keys = op.outputs[_idx++]; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (indices, keys, values); + } + + /// + /// Batches all input tensors nondeterministically. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Batch'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// batched_tensors : + /// batch_index : + /// id : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// When many instances of this Op are being run concurrently with the same + /// container/shared_name in the same device, some will output zero-shaped Tensors + /// and others will output Tensors of size up to max_batch_size. + /// + /// All Tensors in in_tensors are batched together (so, for example, labels and + /// features should be batched with a single instance of this operation. + /// + /// Each invocation of batch emits an id scalar which will be used to identify + /// this particular invocation when doing unbatch or its gradient. + /// + /// Each op which emits a non-empty batch will also emit a non-empty batch_index + /// Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, + /// start, and length of elements of each set of Tensors present in batched_tensors. + /// + /// Batched tensors are concatenated along the first dimension, and all tensors in + /// in_tensors must have the first dimension of the same size. + /// + /// in_tensors: The tensors to be batched. + /// num_batch_threads: Number of scheduling threads for processing batches of work. + /// Determines the number of batches processed in parallel. + /// max_batch_size: Batch sizes will never be bigger than this. + /// batch_timeout_micros: Maximum number of microseconds to wait before outputting + /// an incomplete batch. + /// allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does + /// nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + /// batches up to one of those sizes. The entries must increase monotonically, and + /// the final entry must equal max_batch_size. + /// grad_timeout_micros: The timeout to use for the gradient. See Unbatch. + /// batched_tensors: Either empty tensors or a batch of concatenated Tensors. + /// batch_index: If out_tensors is non-empty, has information to invert it. + /// container: Controls the scope of sharing of this batch. + /// id: always contains a scalar with a unique ID for this invocation of Batch. + /// shared_name: Concurrently running instances of batch in the same device with the + /// same container and shared_name will batch their elements together. If left + /// empty, the op name will be used as the shared name. + /// T: the types of tensors to be batched. + /// + public static (Tensor[] batched_tensors, Tensor batch_index, Tensor id) batch (Tensor[] in_tensors, int num_batch_threads, int max_batch_size, int batch_timeout_micros, int grad_timeout_micros, int? max_enqueued_batches = null, int[] allowed_batch_sizes = null, string container = null, string shared_name = null, string batching_queue = null, string name = "Batch") + { + var dict = new Dictionary(); + dict["in_tensors"] = in_tensors; + dict["num_batch_threads"] = num_batch_threads; + dict["max_batch_size"] = max_batch_size; + dict["batch_timeout_micros"] = batch_timeout_micros; + dict["grad_timeout_micros"] = grad_timeout_micros; + if (max_enqueued_batches.HasValue) + dict["max_enqueued_batches"] = max_enqueued_batches.Value; + if (allowed_batch_sizes != null) + dict["allowed_batch_sizes"] = allowed_batch_sizes; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (batching_queue != null) + dict["batching_queue"] = batching_queue; + var op = _op_def_lib._apply_op_helper("Batch", name: name, keywords: dict); + int _idx = 0; + var batched_tensors = Enumerable.Range(0, op.OutputListLength("batched_tensors")).Select(_ => op.outputs[_idx++]).ToArray(); + var batch_index = op.outputs[_idx++]; + var id = op.outputs[_idx++]; + return (batched_tensors, batch_index, id); + } + + /// + /// Creates a dataset that batches batch_size elements from input_dataset. + /// + /// + /// + /// + /// A scalar representing the number of elements to accumulate in a + /// batch. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BatchDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor batch_dataset (Tensor input_dataset, Tensor batch_size, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "BatchDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["batch_size"] = batch_size; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("BatchDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that batches batch_size elements from input_dataset. + /// + /// + /// + /// + /// A scalar representing the number of elements to accumulate in a batch. + /// + /// + /// A scalar representing whether the last batch should be dropped in case its size + /// is smaller than desired. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BatchDatasetV2'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor batch_dataset_v2 (Tensor input_dataset, Tensor batch_size, Tensor drop_remainder, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "BatchDatasetV2") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["batch_size"] = batch_size; + dict["drop_remainder"] = drop_remainder; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("BatchDatasetV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Multiplies slices of two tensors in batches. + /// + /// + /// 2-D or higher with shape [..., r_x, c_x]. + /// + /// + /// 2-D or higher with shape [..., r_y, c_y]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BatchMatMul'. + /// + /// + /// If True, adjoint the slices of x. Defaults to False. + /// + /// + /// If True, adjoint the slices of y. Defaults to False. + /// + /// + /// 3-D or higher with shape [..., r_o, c_o] + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Multiplies all slices of Tensor x and y (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the adj_x or adj_y flag to True, which are by default False. + /// + /// The input tensors x and y are 2-D or higher with shape [..., r_x, c_x] + /// and [..., r_y, c_y]. + /// + /// The output tensor is 2-D or higher with shape [..., r_o, c_o], where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + public static Tensor batch_mat_mul (Tensor x, Tensor y, bool? adj_x = null, bool? adj_y = null, string name = "BatchMatMul") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + if (adj_x.HasValue) + dict["adj_x"] = adj_x.Value; + if (adj_y.HasValue) + dict["adj_y"] = adj_y.Value; + var op = _op_def_lib._apply_op_helper("BatchMatMul", name: name, keywords: dict); + return op.output; + } + + /// + /// Batch normalization. + /// + /// + /// A 4D input Tensor. + /// + /// + /// A 1D mean Tensor with size matching the last dimension of t. + /// This is the first output from tf.nn.moments, + /// or a saved moving average thereof. + /// + /// + /// A 1D variance Tensor with size matching the last dimension of t. + /// This is the second output from tf.nn.moments, + /// or a saved moving average thereof. + /// + /// + /// A 1D beta Tensor with size matching the last dimension of t. + /// An offset to be added to the normalized tensor. + /// + /// + /// A 1D gamma Tensor with size matching the last dimension of t. + /// If "scale_after_normalization" is true, this tensor will be multiplied + /// with the normalized tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BatchNormWithGlobalNormalization'. + /// + /// + /// Optional argument + /// A small float number to avoid dividing by 0. + /// + /// + /// Optional argument + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op is deprecated. Prefer tf.nn.batch_normalization. + /// + public static Tensor batch_norm_with_global_normalization (Tensor t, Tensor m, Tensor v, Tensor beta, Tensor gamma, float variance_epsilon, bool scale_after_normalization, string name = "BatchNormWithGlobalNormalization") + { + var dict = new Dictionary(); + dict["t"] = t; + dict["m"] = m; + dict["v"] = v; + dict["beta"] = beta; + dict["gamma"] = gamma; + dict["variance_epsilon"] = variance_epsilon; + dict["scale_after_normalization"] = scale_after_normalization; + var op = _op_def_lib._apply_op_helper("BatchNormWithGlobalNormalization", name: name, keywords: dict); + return op.output; + } + + /// + /// Gradients for batch normalization. + /// + /// + /// A 4D input Tensor. + /// + /// + /// A 1D mean Tensor with size matching the last dimension of t. + /// This is the first output from tf.nn.moments, + /// or a saved moving average thereof. + /// + /// + /// A 1D variance Tensor with size matching the last dimension of t. + /// This is the second output from tf.nn.moments, + /// or a saved moving average thereof. + /// + /// + /// A 1D gamma Tensor with size matching the last dimension of t. + /// If "scale_after_normalization" is true, this Tensor will be multiplied + /// with the normalized Tensor. + /// + /// + /// 4D backprop Tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BatchNormWithGlobalNormalizationGrad'. + /// + /// + /// Optional argument + /// A small float number to avoid dividing by 0. + /// + /// + /// Optional argument + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// dx : 4D backprop tensor for input. + /// dm : 1D backprop tensor for mean. + /// dv : 1D backprop tensor for variance. + /// db : 1D backprop tensor for beta. + /// dg : 1D backprop tensor for gamma. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This op is deprecated. See tf.nn.batch_normalization. + /// + public static (Tensor dx, Tensor dm, Tensor dv, Tensor db, Tensor dg) batch_norm_with_global_normalization_grad (Tensor t, Tensor m, Tensor v, Tensor gamma, Tensor backprop, float variance_epsilon, bool scale_after_normalization, string name = "BatchNormWithGlobalNormalizationGrad") + { + var dict = new Dictionary(); + dict["t"] = t; + dict["m"] = m; + dict["v"] = v; + dict["gamma"] = gamma; + dict["backprop"] = backprop; + dict["variance_epsilon"] = variance_epsilon; + dict["scale_after_normalization"] = scale_after_normalization; + var op = _op_def_lib._apply_op_helper("BatchNormWithGlobalNormalizationGrad", name: name, keywords: dict); + int _idx = 0; + var dx = op.outputs[_idx++]; + var dm = op.outputs[_idx++]; + var dv = op.outputs[_idx++]; + var db = op.outputs[_idx++]; + var dg = op.outputs[_idx++]; + return (dx, dm, dv, db, dg); + } + + /// + /// BatchToSpace for 4-D tensors of type T. + /// + /// + /// 4-D tensor with shape + /// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + /// depth]. Note that the batch size of the input tensor must be divisible by + /// block_size * block_size. + /// + /// + /// 2-D tensor of non-negative integers with shape [2, 2]. It specifies + /// how many elements to crop from the intermediate result across the spatial + /// dimensions as follows: + /// + /// crops = [[crop_top, crop_bottom], [crop_left, crop_right]] + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BatchToSpace'. + /// + /// + /// Optional argument + /// + /// + /// 4-D with shape [batch, height, width, depth], where: + /// + /// height = height_pad - crop_top - crop_bottom + /// width = width_pad - crop_left - crop_right + /// + /// The attr block_size must be greater than one. It indicates the block size. + /// + /// Some examples: + /// + /// (1) For the following input of shape [4, 1, 1, 1] and block_size of 2: + /// + /// + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// + /// + /// The output tensor has shape [1, 2, 2, 1] and value: + /// + /// + /// x = [[[[1], [2]], [[3], [4]]]] + /// + /// + /// (2) For the following input of shape [4, 1, 1, 3] and block_size of 2: + /// + /// + /// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + /// + /// + /// The output tensor has shape [1, 2, 2, 3] and value: + /// + /// + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// + /// (3) For the following input of shape [4, 2, 2, 1] and block_size of 2: + /// + /// + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// + /// + /// The output tensor has shape [1, 4, 4, 1] and value: + /// + /// + /// x = [[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]] + /// + /// + /// (4) For the following input of shape [8, 1, 2, 1] and block_size of 2: + /// + /// + /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + /// + /// + /// The output tensor has shape [2, 2, 4, 1] and value: + /// + /// + /// x = [[[[1], [3]], [[5], [7]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is a legacy version of the more general BatchToSpaceND. + /// + /// Rearranges (permutes) data from batch into blocks of spatial data, followed by + /// cropping. This is the reverse transformation of SpaceToBatch. More specifically, + /// this op outputs a copy of the input tensor where values from the batch + /// dimension are moved in spatial blocks to the height and width dimensions, + /// followed by cropping along the height and width dimensions. + /// + public static Tensor batch_to_space (Tensor input, Tensor crops, int block_size, string name = "BatchToSpace") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["crops"] = crops; + dict["block_size"] = block_size; + var op = _op_def_lib._apply_op_helper("BatchToSpace", name: name, keywords: dict); + return op.output; + } + + /// + /// BatchToSpace for N-D tensors of type T. + /// + /// + /// N-D with shape input_shape = [batch] + spatial_shape + remaining_shape, + /// where spatial_shape has M dimensions. + /// + /// + /// 1-D with shape [M], all values must be >= 1. + /// + /// + /// 2-D with shape [M, 2], all values must be >= 0. + /// crops[i] = [crop_start, crop_end] specifies the amount to crop from input + /// dimension i + 1, which corresponds to spatial dimension i. It is + /// required that + /// crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]. + /// + /// This operation is equivalent to the following steps: + /// + /// 1. Reshape input to reshaped of shape: + /// [block_shape[0], ..., block_shape[M-1], + /// batch / prod(block_shape), + /// input_shape[1], ..., input_shape[N-1]] + /// + /// 2. Permute dimensions of reshaped to produce permuted of shape + /// [batch / prod(block_shape), + /// + /// input_shape[1], block_shape[0], + /// ..., + /// input_shape[M], block_shape[M-1], + /// + /// input_shape[M+1], ..., input_shape[N-1]] + /// + /// 3. Reshape permuted to produce reshaped_permuted of shape + /// [batch / prod(block_shape), + /// + /// input_shape[1] * block_shape[0], + /// ..., + /// input_shape[M] * block_shape[M-1], + /// + /// input_shape[M+1], + /// ..., + /// input_shape[N-1]] + /// + /// 4. Crop the start and end of dimensions [1, ..., M] of + /// reshaped_permuted according to crops to produce the output of shape: + /// [batch / prod(block_shape), + /// + /// input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], + /// ..., + /// input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], + /// + /// input_shape[M+1], ..., input_shape[N-1]] + /// + /// Some examples: + /// + /// (1) For the following input of shape [4, 1, 1, 1], block_shape = [2, 2], and + /// crops = [[0, 0], [0, 0]]: + /// + /// + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// + /// + /// The output tensor has shape [1, 2, 2, 1] and value: + /// + /// + /// x = [[[[1], [2]], [[3], [4]]]] + /// + /// + /// (2) For the following input of shape [4, 1, 1, 3], block_shape = [2, 2], and + /// crops = [[0, 0], [0, 0]]: + /// + /// + /// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + /// + /// + /// The output tensor has shape [1, 2, 2, 3] and value: + /// + /// + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// + /// (3) For the following input of shape [4, 2, 2, 1], block_shape = [2, 2], and + /// crops = [[0, 0], [0, 0]]: + /// + /// + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// + /// + /// The output tensor has shape [1, 4, 4, 1] and value: + /// + /// + /// x = [[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]] + /// + /// + /// (4) For the following input of shape [8, 1, 3, 1], block_shape = [2, 2], and + /// crops = [[0, 0], [2, 0]]: + /// + /// + /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + /// [[[0], [2], [4]]], [[[0], [10], [12]]], + /// [[[0], [5], [7]]], [[[0], [13], [15]]], + /// [[[0], [6], [8]]], [[[0], [14], [16]]]] + /// + /// + /// The output tensor has shape [2, 2, 4, 1] and value: + /// + /// + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BatchToSpaceND'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation reshapes the "batch" dimension 0 into M + 1 dimensions of shape + /// block_shape + [batch], interleaves these blocks back into the grid defined by + /// the spatial dimensions [1, ..., M], to obtain a result with the same rank as + /// the input. The spatial dimensions of this intermediate result are then + /// optionally cropped according to crops to produce the output. This is the + /// reverse of SpaceToBatch. See below for a precise description. + /// + public static Tensor batch_to_space_n_d (Tensor input, Tensor block_shape, Tensor crops, string name = "BatchToSpaceND") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["block_shape"] = block_shape; + dict["crops"] = crops; + var op = _op_def_lib._apply_op_helper("BatchToSpaceND", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the Bessel i0e function of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BesselI0e'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Exponentially scaled modified Bessel function of order 0 defined as + /// bessel_i0e(x) = exp(-abs(x)) bessel_i0(x). + /// + /// This function is faster and numerically stabler than bessel_i0(x). + /// + public static Tensor bessel_i0e (Tensor x, string name = "BesselI0e") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("BesselI0e", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the Bessel i1e function of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BesselI1e'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Exponentially scaled modified Bessel function of order 0 defined as + /// bessel_i1e(x) = exp(-abs(x)) bessel_i1(x). + /// + /// This function is faster and numerically stabler than bessel_i1(x). + /// + public static Tensor bessel_i1e (Tensor x, string name = "BesselI1e") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("BesselI1e", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute the regularized incomplete beta integral \\(I_x(a, b)\\). + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Betainc'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The regularized incomplete beta integral is defined as: + /// + /// + /// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) + /// + /// where + /// + /// + /// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) + /// + /// + /// is the incomplete beta function and \\(B(a, b)\\) is the *complete* + /// beta function. + /// + public static Tensor betainc (Tensor a, Tensor b, Tensor x, string name = "Betainc") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["b"] = b; + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Betainc", name: name, keywords: dict); + return op.output; + } + + /// + /// Adds bias to value. + /// + /// + /// Any number of dimensions. + /// + /// + /// 1-D with size the last dimension of value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BiasAdd'. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the bias tensor will be added to the last dimension + /// of the value tensor. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// The tensor will be added to "in_channels", the third-to-the-last + /// dimension. + /// + /// + /// Broadcasted sum of value and bias. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is a special case of tf.add where bias is restricted to be 1-D. + /// Broadcasting is supported, so value may have any number of dimensions. + /// + public static Tensor bias_add (Tensor value, Tensor bias, string data_format = null, string name = "BiasAdd") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["bias"] = bias; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("BiasAdd", name: name, keywords: dict); + return op.output; + } + + /// + /// The backward operation for "BiasAdd" on the "bias" tensor. + /// + /// + /// Any number of dimensions. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BiasAddGrad'. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the bias tensor will be added to the last dimension + /// of the value tensor. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// The tensor will be added to "in_channels", the third-to-the-last + /// dimension. + /// + /// + /// 1-D with size the feature dimension of out_backprop. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// It accumulates all the values from out_backprop into the feature dimension. + /// For NHWC data format, the feature dimension is the last. For NCHW data format, + /// the feature dimension is the third-to-last. + /// + public static Tensor bias_add_grad (Tensor out_backprop, string data_format = null, string name = "BiasAddGrad") + { + var dict = new Dictionary(); + dict["out_backprop"] = out_backprop; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("BiasAddGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Adds bias to value. + /// + /// + /// Any number of dimensions. + /// + /// + /// 1-D with size the last dimension of value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BiasAddV1'. + /// + /// + /// Broadcasted sum of value and bias. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is a deprecated version of BiasAdd and will be soon removed. + /// + /// This is a special case of tf.add where bias is restricted to be 1-D. + /// Broadcasting is supported, so value may have any number of dimensions. + /// + public static Tensor bias_add_v1 (Tensor value, Tensor bias, string name = "BiasAddV1") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["bias"] = bias; + var op = _op_def_lib._apply_op_helper("BiasAddV1", name: name, keywords: dict); + return op.output; + } + + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// int32 Tensor. + /// + /// + /// non-negative int32 scalar Tensor. + /// + /// + /// is an int32, int64, float32, or float64 Tensor with the same + /// shape as arr, or a length-0 Tensor, in which case it acts as all weights + /// equal to 1. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Bincount'. + /// + /// + /// 1D Tensor with length equal to size. The counts or summed weights for + /// each value in the range [0, size). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Outputs a vector with length size and the same dtype as weights. If + /// weights are empty, then index i stores the number of times the value i is + /// counted in arr. If weights are non-empty, then index i stores the sum of + /// the value in weights at each index where the corresponding value in arr is + /// i. + /// + /// Values in arr outside of the range [0, size) are ignored. + /// + public static Tensor bincount (Tensor arr, Tensor size, Tensor weights, string name = "Bincount") + { + var dict = new Dictionary(); + dict["arr"] = arr; + dict["size"] = size; + dict["weights"] = weights; + var op = _op_def_lib._apply_op_helper("Bincount", name: name, keywords: dict); + return op.output; + } + + /// + /// Bitcasts a tensor from one type to another without copying data. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Bitcast'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor input, this operation returns a tensor that has the same buffer + /// data as input with datatype type. + /// + /// If the input datatype T is larger than the output datatype type then the + /// shape changes from [...] to [..., sizeof(T)/sizeof(type)]. + /// + /// If T is smaller than type, the operator requires that the rightmost + /// dimension be equal to sizeof(type)/sizeof(T). The shape then goes from + /// [..., sizeof(type)/sizeof(T)] to [...]. + /// + /// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different + /// endian orderings will give different results. + /// + public static Tensor bitcast (Tensor input, TF_DataType type, string name = "Bitcast") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["type"] = type; + var op = _op_def_lib._apply_op_helper("Bitcast", name: name, keywords: dict); + return op.output; + } + + /// + /// Elementwise computes the bitwise AND of x and y. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BitwiseAnd'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The result will have those bits set, that are set in both x and y. The + /// computation is performed on the underlying representations of x and y. + /// + public static Tensor bitwise_and (Tensor x, Tensor y, string name = "BitwiseAnd") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("BitwiseAnd", name: name, keywords: dict); + return op.output; + } + + /// + /// Elementwise computes the bitwise OR of x and y. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BitwiseOr'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The result will have those bits set, that are set in x, y or both. The + /// computation is performed on the underlying representations of x and y. + /// + public static Tensor bitwise_or (Tensor x, Tensor y, string name = "BitwiseOr") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("BitwiseOr", name: name, keywords: dict); + return op.output; + } + + /// + /// Elementwise computes the bitwise XOR of x and y. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BitwiseXor'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The result will have those bits set, that are different in x and y. The + /// computation is performed on the underlying representations of x and y. + /// + public static Tensor bitwise_xor (Tensor x, Tensor y, string name = "BitwiseXor") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("BitwiseXor", name: name, keywords: dict); + return op.output; + } + + /// + /// Calculates gains for each feature and returns the best possible split information for the feature. + /// + /// + /// A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within stats_summary_list. The nodes are iterated between the two nodes specified by the tensor, as like for node_id in range(node_id_range[0], node_id_range[1]) (Note that the last index node_id_range[1] is exclusive). + /// + /// + /// A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. + /// + /// + /// l1 regularization factor on leaf weights, per instance based. + /// + /// + /// l2 regularization factor on leaf weights, per instance based. + /// + /// + /// adjustment to the gain, per leaf based. + /// + /// + /// mininum avg of hessians in a node before required for the node to be considered for splitting. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesCalculateBestGainsPerFeature'. + /// + /// + /// Optional argument + /// the number of nodes that can be split in the whole tree. Used as a dimension of output tensors. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// node_ids_list : An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. + /// gains_list : An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. + /// thresholds_list : An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. + /// left_node_contribs_list : A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. + /// right_node_contribs_list : A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. + /// + /// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return node_ids_list for each feature, containing the list of nodes that this feature can be used to split. + /// + /// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). + /// + /// The length of output lists are all of the same length, num_features. + /// The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature. + /// + public static (Tensor[] node_ids_list, Tensor[] gains_list, Tensor[] thresholds_list, Tensor[] left_node_contribs_list, Tensor[] right_node_contribs_list) boosted_trees_calculate_best_gains_per_feature (Tensor node_id_range, Tensor[] stats_summary_list, Tensor l1, Tensor l2, Tensor tree_complexity, Tensor min_node_weight, int max_splits, string name = "BoostedTreesCalculateBestGainsPerFeature") + { + var dict = new Dictionary(); + dict["node_id_range"] = node_id_range; + dict["stats_summary_list"] = stats_summary_list; + dict["l1"] = l1; + dict["l2"] = l2; + dict["tree_complexity"] = tree_complexity; + dict["min_node_weight"] = min_node_weight; + dict["max_splits"] = max_splits; + var op = _op_def_lib._apply_op_helper("BoostedTreesCalculateBestGainsPerFeature", name: name, keywords: dict); + int _idx = 0; + var node_ids_list = Enumerable.Range(0, op.OutputListLength("node_ids_list")).Select(_ => op.outputs[_idx++]).ToArray(); + var gains_list = Enumerable.Range(0, op.OutputListLength("gains_list")).Select(_ => op.outputs[_idx++]).ToArray(); + var thresholds_list = Enumerable.Range(0, op.OutputListLength("thresholds_list")).Select(_ => op.outputs[_idx++]).ToArray(); + var left_node_contribs_list = Enumerable.Range(0, op.OutputListLength("left_node_contribs_list")).Select(_ => op.outputs[_idx++]).ToArray(); + var right_node_contribs_list = Enumerable.Range(0, op.OutputListLength("right_node_contribs_list")).Select(_ => op.outputs[_idx++]).ToArray(); + return (node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list); + } + + /// + /// Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering. + /// + /// + /// Handle to the tree ensemble. + /// + /// + /// A tensor with shape=[logits_dimension] with mean of gradients for a first node. + /// + /// + /// A tensor with shape=[logits_dimension] mean of hessians for a first node. + /// + /// + /// l1 regularization factor on leaf weights, per instance based. + /// + /// + /// l2 regularization factor on leaf weights, per instance based. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesCenterBias'. + /// + /// + /// Bool, whether to continue bias centering. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor boosted_trees_center_bias (Tensor tree_ensemble_handle, Tensor mean_gradients, Tensor mean_hessians, Tensor l1, Tensor l2, string name = "BoostedTreesCenterBias") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + dict["mean_gradients"] = mean_gradients; + dict["mean_hessians"] = mean_hessians; + dict["l1"] = l1; + dict["l2"] = l2; + var op = _op_def_lib._apply_op_helper("BoostedTreesCenterBias", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a tree ensemble model and returns a handle to it. + /// + /// + /// Handle to the tree ensemble resource to be created. + /// + /// + /// Token to use as the initial value of the resource stamp. + /// + /// + /// Serialized proto of the tree ensemble. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesCreateEnsemble'. + /// + /// + /// Returns the description of the operation + /// + public static Operation boosted_trees_create_ensemble (Tensor tree_ensemble_handle, Tensor stamp_token, Tensor tree_ensemble_serialized, string name = "BoostedTreesCreateEnsemble") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + dict["stamp_token"] = stamp_token; + dict["tree_ensemble_serialized"] = tree_ensemble_serialized; + var op = _op_def_lib._apply_op_helper("BoostedTreesCreateEnsemble", name: name, keywords: dict); + return op; + } + + /// + /// Deserializes a serialized tree ensemble config and replaces current tree + /// + /// + /// Handle to the tree ensemble. + /// + /// + /// Token to use as the new value of the resource stamp. + /// + /// + /// Serialized proto of the ensemble. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesDeserializeEnsemble'. + /// + /// + /// Returns the description of the operation + /// + /// + /// ensemble. + /// + public static Operation boosted_trees_deserialize_ensemble (Tensor tree_ensemble_handle, Tensor stamp_token, Tensor tree_ensemble_serialized, string name = "BoostedTreesDeserializeEnsemble") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + dict["stamp_token"] = stamp_token; + dict["tree_ensemble_serialized"] = tree_ensemble_serialized; + var op = _op_def_lib._apply_op_helper("BoostedTreesDeserializeEnsemble", name: name, keywords: dict); + return op; + } + + /// + /// Creates a handle to a BoostedTreesEnsembleResource + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesEnsembleResourceHandleOp'. + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor boosted_trees_ensemble_resource_handle_op (string container = null, string shared_name = null, string name = "BoostedTreesEnsembleResourceHandleOp") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("BoostedTreesEnsembleResourceHandleOp", name: name, keywords: dict); + return op.output; + } + + /// + /// Debugging/model interpretability outputs for each example. + /// + /// + /// + /// + /// A list of rank 1 Tensors containing bucket id for each + /// feature. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesExampleDebugOutputs'. + /// + /// + /// Optional argument + /// scalar, dimension of the logits, to be used for constructing the protos in + /// examples_debug_outputs_serialized. + /// + /// + /// Output rank 1 Tensor containing a proto serialized as a string for each example. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// It traverses all the trees and computes debug metrics for individual examples, + /// such as getting split feature ids and logits after each split along the decision + /// path used to compute directional feature contributions. + /// + public static Tensor boosted_trees_example_debug_outputs (Tensor tree_ensemble_handle, Tensor[] bucketized_features, int logits_dimension, string name = "BoostedTreesExampleDebugOutputs") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + dict["bucketized_features"] = bucketized_features; + dict["logits_dimension"] = logits_dimension; + var op = _op_def_lib._apply_op_helper("BoostedTreesExampleDebugOutputs", name: name, keywords: dict); + return op.output; + } + + /// + /// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics. + /// + /// + /// Handle to the tree ensemble. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesGetEnsembleStates'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// stamp_token : Stamp token of the tree ensemble resource. + /// num_trees : The number of trees in the tree ensemble resource. + /// num_finalized_trees : The number of trees that were finished successfully. + /// num_attempted_layers : The number of layers we attempted to build (but not necessarily succeeded). + /// last_layer_nodes_range : Rank size 2 tensor that contains start and end ids of the nodes in the latest + /// layer. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor stamp_token, Tensor num_trees, Tensor num_finalized_trees, Tensor num_attempted_layers, Tensor last_layer_nodes_range) boosted_trees_get_ensemble_states (Tensor tree_ensemble_handle, string name = "BoostedTreesGetEnsembleStates") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + var op = _op_def_lib._apply_op_helper("BoostedTreesGetEnsembleStates", name: name, keywords: dict); + int _idx = 0; + var stamp_token = op.outputs[_idx++]; + var num_trees = op.outputs[_idx++]; + var num_finalized_trees = op.outputs[_idx++]; + var num_attempted_layers = op.outputs[_idx++]; + var last_layer_nodes_range = op.outputs[_idx++]; + return (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, last_layer_nodes_range); + } + + /// + /// Makes the summary of accumulated stats for the batch. + /// + /// + /// int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer. + /// + /// + /// float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients. + /// + /// + /// float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians. + /// + /// + /// int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesMakeStatsSummary'. + /// + /// + /// Optional argument + /// int; the maximum number of splits possible in the whole tree. + /// + /// + /// Optional argument + /// int; equals to the maximum possible value of bucketized feature. + /// + /// + /// output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example. + /// + public static Tensor boosted_trees_make_stats_summary (Tensor node_ids, Tensor gradients, Tensor hessians, Tensor[] bucketized_features_list, int max_splits, int num_buckets, string name = "BoostedTreesMakeStatsSummary") + { + var dict = new Dictionary(); + dict["node_ids"] = node_ids; + dict["gradients"] = gradients; + dict["hessians"] = hessians; + dict["bucketized_features_list"] = bucketized_features_list; + dict["max_splits"] = max_splits; + dict["num_buckets"] = num_buckets; + var op = _op_def_lib._apply_op_helper("BoostedTreesMakeStatsSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// Runs multiple additive regression ensemble predictors on input instances and + /// + /// + /// + /// + /// A list of rank 1 Tensors containing bucket id for each + /// feature. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesPredict'. + /// + /// + /// Optional argument + /// scalar, dimension of the logits, to be used for partial logits + /// shape. + /// + /// + /// Output rank 2 Tensor containing logits for each example. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// computes the logits. It is designed to be used during prediction. + /// It traverses all the trees and calculates the final score for each instance. + /// + public static Tensor boosted_trees_predict (Tensor tree_ensemble_handle, Tensor[] bucketized_features, int logits_dimension, string name = "BoostedTreesPredict") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + dict["bucketized_features"] = bucketized_features; + dict["logits_dimension"] = logits_dimension; + var op = _op_def_lib._apply_op_helper("BoostedTreesPredict", name: name, keywords: dict); + return op.output; + } + + /// + /// Serializes the tree ensemble to a proto. + /// + /// + /// Handle to the tree ensemble. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesSerializeEnsemble'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// stamp_token : Stamp token of the tree ensemble resource. + /// tree_ensemble_serialized : Serialized proto of the ensemble. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor stamp_token, Tensor tree_ensemble_serialized) boosted_trees_serialize_ensemble (Tensor tree_ensemble_handle, string name = "BoostedTreesSerializeEnsemble") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + var op = _op_def_lib._apply_op_helper("BoostedTreesSerializeEnsemble", name: name, keywords: dict); + int _idx = 0; + var stamp_token = op.outputs[_idx++]; + var tree_ensemble_serialized = op.outputs[_idx++]; + return (stamp_token, tree_ensemble_serialized); + } + + /// + /// Runs multiple additive regression ensemble predictors on input instances and + /// + /// + /// + /// + /// Rank 1 Tensor containing cached tree ids which is the starting + /// tree of prediction. + /// + /// + /// Rank 1 Tensor containing cached node id which is the starting + /// node of prediction. + /// + /// + /// A list of rank 1 Tensors containing bucket id for each + /// feature. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesTrainingPredict'. + /// + /// + /// Optional argument + /// scalar, dimension of the logits, to be used for partial logits + /// shape. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// partial_logits : Rank 2 Tensor containing logits update (with respect to cached + /// values stored) for each example. + /// tree_ids : Rank 1 Tensor containing new tree ids for each example. + /// node_ids : Rank 1 Tensor containing new node ids in the new tree_ids. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// computes the update to cached logits. It is designed to be used during training. + /// It traverses the trees starting from cached tree id and cached node id and + /// calculates the updates to be pushed to the cache. + /// + public static (Tensor partial_logits, Tensor tree_ids, Tensor node_ids) boosted_trees_training_predict (Tensor tree_ensemble_handle, Tensor cached_tree_ids, Tensor cached_node_ids, Tensor[] bucketized_features, int logits_dimension, string name = "BoostedTreesTrainingPredict") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + dict["cached_tree_ids"] = cached_tree_ids; + dict["cached_node_ids"] = cached_node_ids; + dict["bucketized_features"] = bucketized_features; + dict["logits_dimension"] = logits_dimension; + var op = _op_def_lib._apply_op_helper("BoostedTreesTrainingPredict", name: name, keywords: dict); + int _idx = 0; + var partial_logits = op.outputs[_idx++]; + var tree_ids = op.outputs[_idx++]; + var node_ids = op.outputs[_idx++]; + return (partial_logits, tree_ids, node_ids); + } + + /// + /// Updates the tree ensemble by either adding a layer to the last tree being grown + /// + /// + /// Handle to the ensemble variable. + /// + /// + /// Rank 1 tensor with ids for each feature. This is the real id of + /// the feature that will be used in the split. + /// + /// + /// List of rank 1 tensors representing the nodes for which this feature + /// has a split. + /// + /// + /// List of rank 1 tensors representing the gains for each of the feature's + /// split. + /// + /// + /// List of rank 1 tensors representing the thesholds for each of the + /// feature's split. + /// + /// + /// List of rank 2 tensors with left leaf contribs for each of + /// the feature's splits. Will be added to the previous node values to constitute + /// the values of the left nodes. + /// + /// + /// List of rank 2 tensors with right leaf contribs for each + /// of the feature's splits. Will be added to the previous node values to constitute + /// the values of the right nodes. + /// + /// + /// Max depth of the tree to build. + /// + /// + /// shrinkage const for each new tree. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BoostedTreesUpdateEnsemble'. + /// + /// + /// Optional argument + /// 0-No pruning, 1-Pre-pruning, 2-Post-pruning. + /// + /// + /// Returns the description of the operation + /// + /// + /// or by starting a new tree. + /// + public static Operation boosted_trees_update_ensemble (Tensor tree_ensemble_handle, Tensor feature_ids, Tensor[] node_ids, Tensor[] gains, Tensor[] thresholds, Tensor[] left_node_contribs, Tensor[] right_node_contribs, Tensor max_depth, Tensor learning_rate, int pruning_mode, string name = "BoostedTreesUpdateEnsemble") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + dict["feature_ids"] = feature_ids; + dict["node_ids"] = node_ids; + dict["gains"] = gains; + dict["thresholds"] = thresholds; + dict["left_node_contribs"] = left_node_contribs; + dict["right_node_contribs"] = right_node_contribs; + dict["max_depth"] = max_depth; + dict["learning_rate"] = learning_rate; + dict["pruning_mode"] = pruning_mode; + var op = _op_def_lib._apply_op_helper("BoostedTreesUpdateEnsemble", name: name, keywords: dict); + return op; + } + + /// + /// Return the shape of s0 op s1 with broadcast. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BroadcastArgs'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given s0 and s1, tensors that represent shapes, compute r0, the + /// broadcasted shape. s0, s1 and r0 are all integer vectors. + /// + public static Tensor broadcast_args (Tensor s0, Tensor s1, string name = "BroadcastArgs") + { + var dict = new Dictionary(); + dict["s0"] = s0; + dict["s1"] = s1; + var op = _op_def_lib._apply_op_helper("BroadcastArgs", name: name, keywords: dict); + return op.output; + } + + /// + /// Return the reduction indices for computing gradients of s0 op s1 with broadcast. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BroadcastGradientArgs'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// r0 : + /// r1 : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This is typically used by gradient computations for a broadcasting operation. + /// + public static (Tensor r0, Tensor r1) broadcast_gradient_args (Tensor s0, Tensor s1, string name = "BroadcastGradientArgs") + { + var dict = new Dictionary(); + dict["s0"] = s0; + dict["s1"] = s1; + var op = _op_def_lib._apply_op_helper("BroadcastGradientArgs", name: name, keywords: dict); + int _idx = 0; + var r0 = op.outputs[_idx++]; + var r1 = op.outputs[_idx++]; + return (r0, r1); + } + + /// + /// Broadcast an array for a compatible shape. + /// + /// + /// A Tensor to broadcast. + /// + /// + /// An 1-D int Tensor. The shape of the desired output. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BroadcastTo'. + /// + /// + /// A Tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Broadcasting is the process of making arrays to have compatible shapes + /// for arithmetic operations. Two shapes are compatible if for each + /// dimension pair they are either equal or one of them is one. When trying + /// to broadcast a Tensor to a shape, it starts with the trailing dimensions, + /// and works its way forward. + /// + /// For example, + /// + /// >>> x = tf.constant([1, 2, 3]) + /// >>> y = tf.broadcast_to(x, [3, 3]) + /// >>> sess.run(y) + /// array([[1, 2, 3], + /// [1, 2, 3], + /// [1, 2, 3]], dtype=int32) + /// + /// In the above example, the input Tensor with the shape of [1, 3] + /// is broadcasted to output Tensor with shape of [3, 3]. + /// + public static Tensor broadcast_to (Tensor input, Tensor shape, string name = "BroadcastTo") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("BroadcastTo", name: name, keywords: dict); + return op.output; + } + + /// + /// Bucketizes 'input' based on 'boundaries'. + /// + /// + /// Any shape of Tensor contains with int or float type. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Bucketize'. + /// + /// + /// Optional argument + /// A sorted list of floats gives the boundary of the buckets. + /// + /// + /// Same shape with 'input', each value of input replaced with bucket index. + /// + /// @compatibility(numpy) + /// Equivalent to np.digitize. + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For example, if the inputs are + /// boundaries = [0, 10, 100] + /// input = [[-5, 10000] + /// [150, 10] + /// [5, 100]] + /// + /// then the output will be + /// output = [[0, 3] + /// [3, 2] + /// [1, 3]] + /// + public static Tensor bucketize (Tensor input, float[] boundaries, string name = "Bucketize") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["boundaries"] = boundaries; + var op = _op_def_lib._apply_op_helper("Bucketize", name: name, keywords: dict); + return op.output; + } + + /// + /// Records the bytes size of each element of input_dataset in a StatsAggregator. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'BytesProducedStatsDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor bytes_produced_stats_dataset (Tensor input_dataset, Tensor tag, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "BytesProducedStatsDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["tag"] = tag; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("BytesProducedStatsDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs beam search decoding on the logits given in input. + /// + /// + /// 3-D, shape: (max_time x batch_size x num_classes), the logits. + /// + /// + /// A vector containing sequence lengths, size (batch). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CTCBeamSearchDecoder'. + /// + /// + /// Optional argument + /// A scalar >= 0 (beam search beam width). + /// + /// + /// Optional argument + /// A scalar >= 0, <= beam_width (controls output size). + /// + /// + /// If true, merge repeated classes in output. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// decoded_indices : A list (length: top_paths) of indices matrices. Matrix j, + /// size (total_decoded_outputs[j] x 2), has indices of a + /// SparseTensor<int64, 2>. The rows store: [batch, time]. + /// decoded_values : A list (length: top_paths) of values vectors. Vector j, + /// size (length total_decoded_outputs[j]), has the values of a + /// SparseTensor<int64, 2>. The vector stores the decoded classes for beam j. + /// decoded_shape : A list (length: top_paths) of shape vector. Vector j, + /// size (2), stores the shape of the decoded SparseTensor[j]. + /// Its values are: [batch_size, max_decoded_length[j]]. + /// log_probability : A matrix, shaped: (batch_size x top_paths). The + /// sequence log-probabilities. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// A note about the attribute merge_repeated: For the beam search decoder, + /// this means that if consecutive entries in a beam are the same, only + /// the first of these is emitted. That is, when the top path is "A B B B B", + /// "A B" is returned if merge_repeated = True but "A B B B B" is + /// returned if merge_repeated = False. + /// + public static (Tensor[] decoded_indices, Tensor[] decoded_values, Tensor[] decoded_shape, Tensor log_probability) c_t_c_beam_search_decoder (Tensor inputs, Tensor sequence_length, int beam_width, int top_paths, bool? merge_repeated = null, string name = "CTCBeamSearchDecoder") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["sequence_length"] = sequence_length; + dict["beam_width"] = beam_width; + dict["top_paths"] = top_paths; + if (merge_repeated.HasValue) + dict["merge_repeated"] = merge_repeated.Value; + var op = _op_def_lib._apply_op_helper("CTCBeamSearchDecoder", name: name, keywords: dict); + int _idx = 0; + var decoded_indices = Enumerable.Range(0, op.OutputListLength("decoded_indices")).Select(_ => op.outputs[_idx++]).ToArray(); + var decoded_values = Enumerable.Range(0, op.OutputListLength("decoded_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var decoded_shape = Enumerable.Range(0, op.OutputListLength("decoded_shape")).Select(_ => op.outputs[_idx++]).ToArray(); + var log_probability = op.outputs[_idx++]; + return (decoded_indices, decoded_values, decoded_shape, log_probability); + } + + /// + /// Performs greedy decoding on the logits given in inputs. + /// + /// + /// 3-D, shape: (max_time x batch_size x num_classes), the logits. + /// + /// + /// A vector containing sequence lengths, size (batch_size). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CTCGreedyDecoder'. + /// + /// + /// If True, merge repeated classes in output. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// decoded_indices : Indices matrix, size (total_decoded_outputs x 2), + /// of a SparseTensor<int64, 2>. The rows store: [batch, time]. + /// decoded_values : Values vector, size: (total_decoded_outputs), + /// of a SparseTensor<int64, 2>. The vector stores the decoded classes. + /// decoded_shape : Shape vector, size (2), of the decoded SparseTensor. + /// Values are: [batch_size, max_decoded_length]. + /// log_probability : Matrix, size (batch_size x 1), containing sequence + /// log-probabilities. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// A note about the attribute merge_repeated: if enabled, when + /// consecutive logits' maximum indices are the same, only the first of + /// these is emitted. Labeling the blank '*', the sequence "A B B * B B" + /// becomes "A B B" if merge_repeated = True and "A B B B B" if + /// merge_repeated = False. + /// + /// Regardless of the value of merge_repeated, if the maximum index of a given + /// time and batch corresponds to the blank, index (num_classes - 1), no new + /// element is emitted. + /// + public static (Tensor decoded_indices, Tensor decoded_values, Tensor decoded_shape, Tensor log_probability) c_t_c_greedy_decoder (Tensor inputs, Tensor sequence_length, bool? merge_repeated = null, string name = "CTCGreedyDecoder") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["sequence_length"] = sequence_length; + if (merge_repeated.HasValue) + dict["merge_repeated"] = merge_repeated.Value; + var op = _op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, keywords: dict); + int _idx = 0; + var decoded_indices = op.outputs[_idx++]; + var decoded_values = op.outputs[_idx++]; + var decoded_shape = op.outputs[_idx++]; + var log_probability = op.outputs[_idx++]; + return (decoded_indices, decoded_values, decoded_shape, log_probability); + } + + /// + /// Calculates the CTC Loss (log probability) for each batch entry. Also calculates + /// + /// + /// 3-D, shape: (max_time x batch_size x num_classes), the logits. + /// + /// + /// The indices of a SparseTensor<int32, 2>. + /// labels_indices(i, :) == [b, t] means labels_values(i) stores the id for + /// (batch b, time t). + /// + /// + /// The values (labels) associated with the given batch and time. + /// + /// + /// A vector containing sequence lengths (batch). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CTCLoss'. + /// + /// + /// Scalar, if true then repeated labels are + /// collapsed prior to the CTC calculation. + /// + /// + /// Scalar. If set to false, *during* CTC calculation + /// repeated non-blank labels will not be merged and are interpreted as + /// individual labels. This is a simplified version of CTC. + /// + /// + /// Scalar. If set to true, during CTC + /// calculation, items that have longer output sequences than input sequences + /// are skipped: they don't contribute to the loss term and have zero-gradient. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// loss : A vector (batch) containing log-probabilities. + /// gradient : The gradient of loss. 3-D, shape: + /// (max_time x batch_size x num_classes). + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// the gradient. This class performs the softmax operation for you, so inputs + /// should be e.g. linear projections of outputs by an LSTM. + /// + public static (Tensor loss, Tensor gradient) c_t_c_loss (Tensor inputs, Tensor labels_indices, Tensor labels_values, Tensor sequence_length, bool? preprocess_collapse_repeated = null, bool? ctc_merge_repeated = null, bool? ignore_longer_outputs_than_inputs = null, string name = "CTCLoss") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["labels_indices"] = labels_indices; + dict["labels_values"] = labels_values; + dict["sequence_length"] = sequence_length; + if (preprocess_collapse_repeated.HasValue) + dict["preprocess_collapse_repeated"] = preprocess_collapse_repeated.Value; + if (ctc_merge_repeated.HasValue) + dict["ctc_merge_repeated"] = ctc_merge_repeated.Value; + if (ignore_longer_outputs_than_inputs.HasValue) + dict["ignore_longer_outputs_than_inputs"] = ignore_longer_outputs_than_inputs.Value; + var op = _op_def_lib._apply_op_helper("CTCLoss", name: name, keywords: dict); + int _idx = 0; + var loss = op.outputs[_idx++]; + var gradient = op.outputs[_idx++]; + return (loss, gradient); + } + + /// + /// Creates a dataset that caches elements from input_dataset. + /// + /// + /// + /// + /// A path on the filesystem where we should cache the dataset. Note: this + /// will be a directory. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CacheDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// A CacheDataset will iterate over the input_dataset, and store tensors. If the + /// cache already exists, the cache will be used. If the cache is inappropriate + /// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error + /// will the returned when used. + /// + public static Tensor cache_dataset (Tensor input_dataset, Tensor filename, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "CacheDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["filename"] = filename; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("CacheDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Cast x of type SrcT to y of DstT. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Cast'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor cast (Tensor x, TF_DataType DstT, bool? Truncate = null, string name = "Cast") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["DstT"] = DstT; + if (Truncate.HasValue) + dict["Truncate"] = Truncate.Value; + var op = _op_def_lib._apply_op_helper("Cast", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns element-wise smallest integer not less than x. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Ceil'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor ceil (Tensor x, string name = "Ceil") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Ceil", name: name, keywords: dict); + return op.output; + } + + /// + /// Checks a tensor for NaN and Inf values. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CheckNumerics'. + /// + /// + /// Optional argument + /// Prefix of the error message. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// When run, reports an InvalidArgument error if tensor has any values + /// that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor as-is. + /// + public static Tensor check_numerics (Tensor tensor, string message, string name = "CheckNumerics") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + dict["message"] = message; + var op = _op_def_lib._apply_op_helper("CheckNumerics", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the Cholesky decomposition of one or more square matrices. + /// + /// + /// Shape is [..., M, M]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Cholesky'. + /// + /// + /// Shape is [..., M, M]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The input is a tensor of shape [..., M, M] whose inner-most 2 dimensions + /// form square matrices. + /// + /// The input has to be symmetric and positive definite. Only the lower-triangular + /// part of the input will be used for this operation. The upper-triangular part + /// will not be read. + /// + /// The output is a tensor of the same shape as the input + /// containing the Cholesky decompositions for all input submatrices [..., :, :]. + /// + /// **Note**: The gradient computation on GPU is faster for large matrices but + /// not for large batch dimensions when the submatrices are small. In this + /// case it might be faster to use the CPU. + /// + public static Tensor cholesky (Tensor input, string name = "Cholesky") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("Cholesky", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the reverse mode backpropagated gradient of the Cholesky algorithm. + /// + /// + /// Output of batch Cholesky algorithm l = cholesky(A). Shape is [..., M, M]. + /// Algorithm depends only on lower triangular part of the innermost matrices of + /// this tensor. + /// + /// + /// df/dl where f is some scalar function. Shape is [..., M, M]. + /// Algorithm depends only on lower triangular part of the innermost matrices of + /// this tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CholeskyGrad'. + /// + /// + /// Symmetrized version of df/dA . Shape is [..., M, M] + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For an explanation see "Differentiation of the Cholesky algorithm" by + /// Iain Murray http://arxiv.org/abs/1602.07527. + /// + public static Tensor cholesky_grad (Tensor l, Tensor grad, string name = "CholeskyGrad") + { + var dict = new Dictionary(); + dict["l"] = l; + dict["grad"] = grad; + var op = _op_def_lib._apply_op_helper("CholeskyGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Clips tensor values to a specified min and max. + /// + /// + /// A Tensor. + /// + /// + /// A 0-D (scalar) Tensor, or a Tensor with the same shape + /// as t. The minimum value to clip by. + /// + /// + /// A 0-D (scalar) Tensor, or a Tensor with the same shape + /// as t. The maximum value to clip by. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ClipByValue'. + /// + /// + /// A clipped Tensor with the same shape as input 't'. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor t, this operation returns a tensor of the same type and + /// shape as t with its values clipped to clip_value_min and clip_value_max. + /// Any values less than clip_value_min are set to clip_value_min. Any values + /// greater than clip_value_max are set to clip_value_max. + /// + public static Tensor clip_by_value (Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = "ClipByValue") + { + var dict = new Dictionary(); + dict["t"] = t; + dict["clip_value_min"] = clip_value_min; + dict["clip_value_max"] = clip_value_max; + var op = _op_def_lib._apply_op_helper("ClipByValue", name: name, keywords: dict); + return op.output; + } + + /// + /// Receives a tensor value broadcast from another device. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CollectiveBcastRecv'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor collective_bcast_recv (TF_DataType T, int group_size, int group_key, int instance_key, TensorShape shape, string name = "CollectiveBcastRecv") + { + var dict = new Dictionary(); + dict["T"] = T; + dict["group_size"] = group_size; + dict["group_key"] = group_key; + dict["instance_key"] = instance_key; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("CollectiveBcastRecv", name: name, keywords: dict); + return op.output; + } + + /// + /// Broadcasts a tensor value to one or more other devices. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CollectiveBcastSend'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor collective_bcast_send (Tensor input, int group_size, int group_key, int instance_key, TensorShape shape, string name = "CollectiveBcastSend") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["group_size"] = group_size; + dict["group_key"] = group_key; + dict["instance_key"] = instance_key; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("CollectiveBcastSend", name: name, keywords: dict); + return op.output; + } + + /// + /// Mutually reduces multiple tensors of identical type and shape. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CollectiveReduce'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor collective_reduce (Tensor input, int group_size, int group_key, int instance_key, string merge_op, string final_op, int[] subdiv_offsets, string name = "CollectiveReduce") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["group_size"] = group_size; + dict["group_key"] = group_key; + dict["instance_key"] = instance_key; + dict["merge_op"] = merge_op; + dict["final_op"] = final_op; + dict["subdiv_offsets"] = subdiv_offsets; + var op = _op_def_lib._apply_op_helper("CollectiveReduce", name: name, keywords: dict); + return op.output; + } + + /// + /// Compare values of input to threshold and pack resulting bits into a uint8. + /// + /// + /// Values to compare against threshold and bitpack. + /// + /// + /// Threshold to compare against. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CompareAndBitpack'. + /// + /// + /// The bitpacked comparisons. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Each comparison returns a boolean true (if input_value > threshold) + /// or and false otherwise. + /// + /// This operation is useful for Locality-Sensitive-Hashing (LSH) and other + /// algorithms that use hashing approximations of cosine and L2 distances; + /// codes can be generated from an input via: + /// + /// + /// codebook_size = 50 + /// codebook_bits = codebook_size * 32 + /// codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], + /// dtype=x.dtype, + /// initializer=tf.orthogonal_initializer()) + /// codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) + /// codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 + /// # now codes has shape x.shape[:-1] + [codebook_size] + /// + /// + /// **NOTE**: Currently, the innermost dimension of the tensor must be divisible + /// by 8. + /// + /// Given an input shaped [s0, s1, ..., s_n], the output is + /// a uint8 tensor shaped [s0, s1, ..., s_n / 8]. + /// + public static Tensor compare_and_bitpack (Tensor input, Tensor threshold, string name = "CompareAndBitpack") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["threshold"] = threshold; + var op = _op_def_lib._apply_op_helper("CompareAndBitpack", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts two real numbers to a complex number. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Complex'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor real representing the real part of a complex number, and a + /// tensor imag representing the imaginary part of a complex number, this + /// operation returns complex numbers elementwise of the form \\(a + bj\\), where + /// *a* represents the real part and *b* represents the imag part. + /// + /// The input tensors real and imag must have the same shape. + /// + /// For example: + /// + /// + /// # tensor 'real' is [2.25, 3.25] + /// # tensor imag is [4.75, 5.75] + /// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + /// + /// + public static Tensor complex (Tensor real, Tensor imag, TF_DataType? Tout = null, string name = "Complex") + { + var dict = new Dictionary(); + dict["real"] = real; + dict["imag"] = imag; + if (Tout.HasValue) + dict["Tout"] = Tout.Value; + var op = _op_def_lib._apply_op_helper("Complex", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the complex absolute value of a tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ComplexAbs'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor x of complex numbers, this operation returns a tensor of type + /// float or double that is the absolute value of each element in x. All + /// elements in x must be complex numbers of the form \\(a + bj\\). The absolute + /// value is computed as \\( \sqrt{a^2 + b^2}\\). + /// + public static Tensor complex_abs (Tensor x, TF_DataType? Tout = null, string name = "ComplexAbs") + { + var dict = new Dictionary(); + dict["x"] = x; + if (Tout.HasValue) + dict["Tout"] = Tout.Value; + var op = _op_def_lib._apply_op_helper("ComplexAbs", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the ids of the positions in sampled_candidates that match true_labels. + /// + /// + /// The true_classes output of UnpackSparseLabels. + /// + /// + /// The sampled_candidates output of CandidateSampler. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ComputeAccidentalHits'. + /// + /// + /// Optional argument + /// Number of true labels per context. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// indices : A vector of indices corresponding to rows of true_candidates. + /// ids : A vector of IDs of positions in sampled_candidates that match a true_label + /// for the row with the corresponding index in indices. + /// weights : A vector of the same length as indices and ids, in which each element + /// is -FLOAT_MAX. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// When doing log-odds NCE, the result of this op should be passed through a + /// SparseToDense op, then added to the logits of the sampled candidates. This has + /// the effect of 'removing' the sampled labels that match the true labels by + /// making the classifier sure that they are sampled labels. + /// + public static (Tensor indices, Tensor ids, Tensor weights) compute_accidental_hits (Tensor true_classes, Tensor sampled_candidates, int num_true, int? seed = null, int? seed2 = null, string name = "ComputeAccidentalHits") + { + var dict = new Dictionary(); + dict["true_classes"] = true_classes; + dict["sampled_candidates"] = sampled_candidates; + dict["num_true"] = num_true; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("ComputeAccidentalHits", name: name, keywords: dict); + int _idx = 0; + var indices = op.outputs[_idx++]; + var ids = op.outputs[_idx++]; + var weights = op.outputs[_idx++]; + return (indices, ids, weights); + } + + /// + /// Concatenates tensors along one dimension. + /// + /// + /// 0-D. The dimension along which to concatenate. Must be in the + /// range [0, rank(values)). + /// + /// + /// The N Tensors to concatenate. Their ranks and types must match, + /// and their sizes must match in all dimensions except concat_dim. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Concat'. + /// + /// + /// A Tensor with the concatenation of values stacked along the + /// concat_dim dimension. This tensor's shape matches that of values except + /// in concat_dim where it has the sum of the sizes. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor concat (Tensor concat_dim, Tensor[] values, string name = "Concat") + { + var dict = new Dictionary(); + dict["concat_dim"] = concat_dim; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("Concat", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes offsets of concat inputs within its output. + /// + /// + /// The dimension along which to concatenate. + /// + /// + /// The N int32 vectors representing shape of tensors being concatenated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ConcatOffset'. + /// + /// + /// The N int32 vectors representing the starting offset + /// of input tensors within the concatenated output. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For example: + /// + /// + /// # 'x' is [2, 2, 7] + /// # 'y' is [2, 3, 7] + /// # 'z' is [2, 5, 7] + /// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] + /// + /// + /// This is typically used by gradient computations for a concat operation. + /// + public static Tensor[] concat_offset (Tensor concat_dim, Tensor[] shape, string name = "ConcatOffset") + { + var dict = new Dictionary(); + dict["concat_dim"] = concat_dim; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("ConcatOffset", name: name, keywords: dict); + int _idx = 0; + var offset = Enumerable.Range(0, op.OutputListLength("offset")).Select(_ => op.outputs[_idx++]).ToArray(); + return (offset); + } + + /// + /// Concatenates tensors along one dimension. + /// + /// + /// List of N Tensors to concatenate. Their ranks and types must match, + /// and their sizes must match in all dimensions except concat_dim. + /// + /// + /// 0-D. The dimension along which to concatenate. Must be in the + /// range [-rank(values), rank(values)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ConcatV2'. + /// + /// + /// A Tensor with the concatenation of values stacked along the + /// concat_dim dimension. This tensor's shape matches that of values except + /// in concat_dim where it has the sum of the sizes. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor concat_v2 (Tensor[] values, Tensor axis, string name = "ConcatV2") + { + var dict = new Dictionary(); + dict["values"] = values; + dict["axis"] = axis; + var op = _op_def_lib._apply_op_helper("ConcatV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that concatenates input_dataset with another_dataset. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ConcatenateDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor concatenate_dataset (Tensor input_dataset, Tensor another_dataset, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "ConcatenateDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["another_dataset"] = another_dataset; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("ConcatenateDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// A conditional accumulator for aggregating gradients. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ConditionalAccumulator'. + /// + /// + /// Optional argument + /// The type of the value being accumulated. + /// + /// + /// Optional argument + /// The shape of the values, can be [], in which case shape is unknown. + /// + /// + /// If non-empty, this accumulator is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this accumulator will be shared under the + /// given name across multiple sessions. + /// + /// + /// The handle to the accumulator. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The accumulator accepts gradients marked with local_step greater or + /// equal to the most recent global_step known to the accumulator. The + /// average can be extracted from the accumulator, provided sufficient + /// gradients have been accumulated. Extracting the average automatically + /// resets the aggregate to 0, and increments the global_step recorded by + /// the accumulator. + /// + public static Tensor conditional_accumulator (TF_DataType dtype, TensorShape shape, string container = null, string shared_name = null, string name = "ConditionalAccumulator") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + dict["shape"] = shape; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("ConditionalAccumulator", name: name, keywords: dict); + return op.output; + } + + /// + /// An op that sets up the centralized structures for a distributed TPU + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ConfigureDistributedTPU'. + /// + /// + /// Reserved. Do not use. + /// + /// + /// Serialized tensorflow.tpu.TPUEmbeddingConfiguration that + /// describes the embedding lookups of the program. + /// + /// + /// Reserved. Do not use. + /// + /// + /// A serialized tensorflow.tpu.TopologyProto that describes the TPU + /// topology. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// system. + /// + public static Tensor configure_distributed_t_p_u (string embedding_config = null, string tpu_embedding_config = null, bool? is_global_init = null, string name = "ConfigureDistributedTPU") + { + var dict = new Dictionary(); + if (embedding_config != null) + dict["embedding_config"] = embedding_config; + if (tpu_embedding_config != null) + dict["tpu_embedding_config"] = tpu_embedding_config; + if (is_global_init.HasValue) + dict["is_global_init"] = is_global_init.Value; + var op = _op_def_lib._apply_op_helper("ConfigureDistributedTPU", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the complex conjugate of a complex number. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conj'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor input of complex numbers, this operation returns a tensor of + /// complex numbers that are the complex conjugate of each element in input. The + /// complex numbers in input must be of the form \\(a + bj\\), where *a* is the + /// real part and *b* is the imaginary part. + /// + /// The complex conjugate returned by this operation is of the form \\(a - bj\\). + /// + /// For example: + /// + /// + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + /// + /// + public static Tensor conj (Tensor input, string name = "Conj") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("Conj", name: name, keywords: dict); + return op.output; + } + + /// + /// Shuffle dimensions of x according to a permutation and conjugate the result. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ConjugateTranspose'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The output y has the same rank as x. The shapes of x and y satisfy: + /// y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1] + /// y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]]) + /// + public static Tensor conjugate_transpose (Tensor x, Tensor perm, string name = "ConjugateTranspose") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["perm"] = perm; + var op = _op_def_lib._apply_op_helper("ConjugateTranspose", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a constant tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Const'. + /// + /// + /// Optional argument + /// Attr value is the tensor to return. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor constant (Tensor value, TF_DataType dtype, string name = "Const") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("Const", name: name, keywords: dict); + return op.output; + } + + /// + /// This op consumes a lock created by MutexLock. + /// + /// + /// A tensor returned by MutexLock. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ConsumeMutexLock'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This op exists to consume a tensor created by MutexLock (other than + /// direct control dependencies). It should be the only that consumes the tensor, + /// and will raise an error if it is not. Its only purpose is to keep the + /// mutex lock tensor alive until it is consumed by this op. + /// + /// **NOTE**: This operation must run on the same device as its input. This may + /// be enforced via the colocate_with mechanism. + /// + public static Operation consume_mutex_lock (Tensor mutex_lock, string name = "ConsumeMutexLock") + { + var dict = new Dictionary(); + dict["mutex_lock"] = mutex_lock; + var op = _op_def_lib._apply_op_helper("ConsumeMutexLock", name: name, keywords: dict); + return op; + } + + /// + /// Does nothing. Serves as a control trigger for scheduling. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ControlTrigger'. + /// + /// + /// Returns the description of the operation + /// + /// + /// Only useful as a placeholder for control edges. + /// + public static Operation control_trigger (string name = "ControlTrigger") + { + var dict = new Dictionary(); + var op = _op_def_lib._apply_op_helper("ControlTrigger", name: name, keywords: dict); + return op; + } + + /// + /// Computes a 2-D convolution given 4-D input and filter tensors. + /// + /// + /// A 4-D tensor. The dimension order is interpreted according to the value + /// of data_format, see below for details. + /// + /// + /// A 4-D tensor of shape + /// [filter_height, filter_width, in_channels, out_channels] + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conv2D'. + /// + /// + /// Optional argument + /// 1-D tensor of length 4. The stride of the sliding window for each + /// dimension of input. The dimension order is determined by the value of + /// data_format, see below for details. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of data_format, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// A 4-D tensor. The dimension order is determined by the value of + /// data_format, see below for details. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given an input tensor of shape [batch, in_height, in_width, in_channels] + /// and a filter / kernel tensor of shape + /// [filter_height, filter_width, in_channels, out_channels], this op + /// performs the following: + /// + /// 1. Flattens the filter to a 2-D matrix with shape + /// [filter_height * filter_width * in_channels, output_channels]. + /// 2. Extracts image patches from the input tensor to form a *virtual* + /// tensor of shape [batch, out_height, out_width, + /// filter_height * filter_width * in_channels]. + /// 3. For each patch, right-multiplies the filter matrix and the image patch + /// vector. + /// + /// In detail, with the default NHWC format, + /// + /// output[b, i, j, k] = + /// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + /// filter[di, dj, q, k] + /// + /// Must have strides[0] = strides[3] = 1. For the most common case of the same + /// horizontal and vertices strides, strides = [1, stride, stride, 1]. + /// + public static Tensor conv2d (Tensor input, Tensor filter, int[] strides, string padding, bool? use_cudnn_on_gpu = null, string data_format = null, int[] dilations = null, string name = "Conv2D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["strides"] = strides; + dict["padding"] = padding; + if (use_cudnn_on_gpu.HasValue) + dict["use_cudnn_on_gpu"] = use_cudnn_on_gpu.Value; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("Conv2D", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradients of convolution with respect to the filter. + /// + /// + /// 4-D with shape [batch, in_height, in_width, in_channels]. + /// + /// + /// An integer vector representing the tensor shape of filter, + /// where filter is a 4-D + /// [filter_height, filter_width, in_channels, out_channels] tensor. + /// + /// + /// 4-D with shape [batch, out_height, out_width, out_channels]. + /// Gradients w.r.t. the output of the convolution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conv2DBackpropFilter'. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input + /// of the convolution. Must be in the same order as the dimension specified with + /// format. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// data_format, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// 4-D with shape + /// [filter_height, filter_width, in_channels, out_channels]. Gradient w.r.t. + /// the filter input of the convolution. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor conv2d_backprop_filter (Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, bool? use_cudnn_on_gpu = null, string data_format = null, int[] dilations = null, string name = "Conv2DBackpropFilter") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter_sizes"] = filter_sizes; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["padding"] = padding; + if (use_cudnn_on_gpu.HasValue) + dict["use_cudnn_on_gpu"] = use_cudnn_on_gpu.Value; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradients of convolution with respect to the input. + /// + /// + /// An integer vector representing the shape of input, + /// where input is a 4-D [batch, height, width, channels] tensor. + /// + /// + /// 4-D with shape + /// [filter_height, filter_width, in_channels, out_channels]. + /// + /// + /// 4-D with shape [batch, out_height, out_width, out_channels]. + /// Gradients w.r.t. the output of the convolution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conv2DBackpropInput'. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input + /// of the convolution. Must be in the same order as the dimension specified with + /// format. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// data_format, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// 4-D with shape [batch, in_height, in_width, in_channels]. Gradient + /// w.r.t. the input of the convolution. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor conv2d_backprop_input (Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, bool? use_cudnn_on_gpu = null, string data_format = null, int[] dilations = null, string name = "Conv2DBackpropInput") + { + var dict = new Dictionary(); + dict["input_sizes"] = input_sizes; + dict["filter"] = filter; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["padding"] = padding; + if (use_cudnn_on_gpu.HasValue) + dict["use_cudnn_on_gpu"] = use_cudnn_on_gpu.Value; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("Conv2DBackpropInput", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes a 3-D convolution given 5-D input and filter tensors. + /// + /// + /// Shape [batch, in_depth, in_height, in_width, in_channels]. + /// + /// + /// Shape [filter_depth, filter_height, filter_width, in_channels, + /// out_channels]. in_channels must match between input and filter. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conv3D'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of data_format, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// In signal processing, cross-correlation is a measure of similarity of + /// two waveforms as a function of a time-lag applied to one of them. This + /// is also known as a sliding dot product or sliding inner-product. + /// + /// Our Conv3D implements a form of cross-correlation. + /// + public static Tensor conv3d (Tensor input, Tensor filter, int[] strides, string padding, string data_format = null, int[] dilations = null, string name = "Conv3D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("Conv3D", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradients of 3-D convolution with respect to the filter. + /// + /// + /// Shape [batch, depth, rows, cols, in_channels]. + /// + /// + /// Shape [depth, rows, cols, in_channels, out_channels]. + /// in_channels must match between input and filter. + /// + /// + /// Backprop signal of shape [batch, out_depth, out_rows, out_cols, + /// out_channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conv3DBackpropFilter'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor conv3d_backprop_filter (Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations = null, string name = "Conv3DBackpropFilter") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["padding"] = padding; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("Conv3DBackpropFilter", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradients of 3-D convolution with respect to the filter. + /// + /// + /// Shape [batch, depth, rows, cols, in_channels]. + /// + /// + /// An integer vector representing the tensor shape of filter, + /// where filter is a 5-D + /// [filter_depth, filter_height, filter_width, in_channels, out_channels] + /// tensor. + /// + /// + /// Backprop signal of shape [batch, out_depth, out_rows, out_cols, + /// out_channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conv3DBackpropFilterV2'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of data_format, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor conv3d_backprop_filter_v2 (Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, string data_format = null, int[] dilations = null, string name = "Conv3DBackpropFilterV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter_sizes"] = filter_sizes; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("Conv3DBackpropFilterV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradients of 3-D convolution with respect to the input. + /// + /// + /// Shape [batch, depth, rows, cols, in_channels]. + /// + /// + /// Shape [depth, rows, cols, in_channels, out_channels]. + /// in_channels must match between input and filter. + /// + /// + /// Backprop signal of shape [batch, out_depth, out_rows, out_cols, + /// out_channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conv3DBackpropInput'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor conv3d_backprop_input (Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations = null, string name = "Conv3DBackpropInput") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["padding"] = padding; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("Conv3DBackpropInput", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradients of 3-D convolution with respect to the input. + /// + /// + /// An integer vector representing the tensor shape of input, + /// where input is a 5-D + /// [batch, depth, rows, cols, in_channels] tensor. + /// + /// + /// Shape [depth, rows, cols, in_channels, out_channels]. + /// in_channels must match between input and filter. + /// + /// + /// Backprop signal of shape [batch, out_depth, out_rows, out_cols, + /// out_channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Conv3DBackpropInputV2'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of data_format, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor conv3d_backprop_input_v2 (Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, string data_format = null, int[] dilations = null, string name = "Conv3DBackpropInputV2") + { + var dict = new Dictionary(); + dict["input_sizes"] = input_sizes; + dict["filter"] = filter; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("Conv3DBackpropInputV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Copy Op. + /// + /// + /// Input tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Copy'. + /// + /// + /// The name of the input tensor. + /// + /// + /// A list of debug op spec (op, url, gated_grpc) for attached debug + /// ops. Each element of the list has the format + /// <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented + /// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", + /// "DebugIdentity;file:///tmp/tfdbg_1;0". + /// + /// + /// Output tensor, deep-copied from input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the + /// device on which the tensor is allocated. + /// N.B.: If the all downstream attached debug ops are disabled given the current + /// gRPC gating status, the output will simply forward the input tensor without + /// deep-copying. See the documentation of Debug* ops for more details. + /// + /// Unlike the CopyHost Op, this op does not have HostMemory constraint on its + /// input or output. + /// + public static Tensor copy (Tensor input, string tensor_name = null, string[] debug_ops_spec = null, string name = "Copy") + { + var dict = new Dictionary(); + dict["input"] = input; + if (tensor_name != null) + dict["tensor_name"] = tensor_name; + if (debug_ops_spec != null) + dict["debug_ops_spec"] = debug_ops_spec; + var op = _op_def_lib._apply_op_helper("Copy", name: name, keywords: dict); + return op.output; + } + + /// + /// Copy Host Op. + /// + /// + /// Input tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CopyHost'. + /// + /// + /// The name of the input tensor. + /// + /// + /// A list of debug op spec (op, url, gated_grpc) for attached debug + /// ops. Each element of the list has the format + /// <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented + /// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", + /// "DebugIdentity;file:///tmp/tfdbg_1;0". + /// + /// + /// Output tensor, deep-copied from input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Performs CPU-to-CPU deep-copying of tensor. + /// N.B.: If the all downstream attached debug ops are disabled given the current + /// gRPC gating status, the output will simply forward the input tensor without + /// deep-copying. See the documentation of Debug* ops for more details. + /// + /// Unlike the Copy Op, this op has HostMemory constraint on its input or output. + /// + public static Tensor copy_host (Tensor input, string tensor_name = null, string[] debug_ops_spec = null, string name = "CopyHost") + { + var dict = new Dictionary(); + dict["input"] = input; + if (tensor_name != null) + dict["tensor_name"] = tensor_name; + if (debug_ops_spec != null) + dict["debug_ops_spec"] = debug_ops_spec; + var op = _op_def_lib._apply_op_helper("CopyHost", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes cos of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Cos'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor cos (Tensor x, string name = "Cos") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Cos", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes hyperbolic cosine of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Cosh'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor cosh (Tensor x, string name = "Cosh") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Cosh", name: name, keywords: dict); + return op.output; + } + + /// + /// Increments 'ref' until it reaches 'limit'. + /// + /// + /// Should be from a scalar Variable node. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CountUpTo'. + /// + /// + /// Optional argument + /// If incrementing ref would bring it above limit, instead generates an + /// 'OutOfRange' error. + /// + /// + /// A copy of the input before increment. If nothing else modifies the + /// input, the values produced will all be distinct. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor count_up_to (Tensor referecne, int limit, string name = "CountUpTo") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["limit"] = limit; + var op = _op_def_lib._apply_op_helper("CountUpTo", name: name, keywords: dict); + return op.output; + } + + /// + /// Extracts crops from the input image tensor and resizes them. + /// + /// + /// A 4-D tensor of shape [batch, image_height, image_width, depth]. + /// Both image_height and image_width need to be positive. + /// + /// + /// A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor + /// specifies the coordinates of a box in the box_ind[i] image and is specified + /// in normalized coordinates [y1, x1, y2, x2]. A normalized coordinate value of + /// y is mapped to the image coordinate at y * (image_height - 1), so as the + /// [0, 1] interval of normalized image height is mapped to + /// [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + /// which case the sampled crop is an up-down flipped version of the original + /// image. The width dimension is treated similarly. Normalized coordinates + /// outside the [0, 1] range are allowed, in which case we use + /// extrapolation_value to extrapolate the input image values. + /// + /// + /// A 1-D tensor of shape [num_boxes] with int32 values in [0, batch). + /// The value of box_ind[i] specifies the image that the i-th box refers to. + /// + /// + /// A 1-D tensor of 2 elements, size = [crop_height, crop_width]. All + /// cropped image patches are resized to this size. The aspect ratio of the image + /// content is not preserved. Both crop_height and crop_width need to be + /// positive. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CropAndResize'. + /// + /// + /// A string specifying the sampling method for resizing. It can be either + /// "bilinear" or "nearest" and default to "bilinear". Currently two sampling + /// methods are supported: Bilinear and Nearest Neighbor. + /// + /// + /// Value used for extrapolation, when applicable. + /// + /// + /// A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Extracts crops from the input image tensor and resizes them using bilinear + /// sampling or nearest neighbor sampling (possibly with aspect ratio change) to a + /// common output size specified by crop_size. This is more general than the + /// crop_to_bounding_box op which extracts a fixed size slice from the input image + /// and does not allow resizing or aspect ratio change. + /// + /// Returns a tensor with crops from the input image at positions defined at the + /// bounding box locations in boxes. The cropped boxes are all resized (with + /// bilinear or nearest neighbor interpolation) to a fixed + /// size = [crop_height, crop_width]. The result is a 4-D tensor + /// [num_boxes, crop_height, crop_width, depth]. The resizing is corner aligned. + /// In particular, if boxes = [[0, 0, 1, 1]], the method will give identical + /// results to using tf.image.resize_bilinear() or + /// tf.image.resize_nearest_neighbor()(depends on the method argument) with + /// align_corners=True. + /// + public static Tensor crop_and_resize (Tensor image, Tensor boxes, Tensor box_ind, Tensor crop_size, string method = null, float? extrapolation_value = null, string name = "CropAndResize") + { + var dict = new Dictionary(); + dict["image"] = image; + dict["boxes"] = boxes; + dict["box_ind"] = box_ind; + dict["crop_size"] = crop_size; + if (method != null) + dict["method"] = method; + if (extrapolation_value.HasValue) + dict["extrapolation_value"] = extrapolation_value.Value; + var op = _op_def_lib._apply_op_helper("CropAndResize", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of the crop_and_resize op wrt the input boxes tensor. + /// + /// + /// A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. + /// + /// + /// A 4-D tensor of shape [batch, image_height, image_width, depth]. + /// Both image_height and image_width need to be positive. + /// + /// + /// A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor + /// specifies the coordinates of a box in the box_ind[i] image and is specified + /// in normalized coordinates [y1, x1, y2, x2]. A normalized coordinate value of + /// y is mapped to the image coordinate at y * (image_height - 1), so as the + /// [0, 1] interval of normalized image height is mapped to + /// [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + /// which case the sampled crop is an up-down flipped version of the original + /// image. The width dimension is treated similarly. Normalized coordinates + /// outside the [0, 1] range are allowed, in which case we use + /// extrapolation_value to extrapolate the input image values. + /// + /// + /// A 1-D tensor of shape [num_boxes] with int32 values in [0, batch). + /// The value of box_ind[i] specifies the image that the i-th box refers to. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CropAndResizeGradBoxes'. + /// + /// + /// A string specifying the interpolation method. Only 'bilinear' is + /// supported for now. + /// + /// + /// A 2-D tensor of shape [num_boxes, 4]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor crop_and_resize_grad_boxes (Tensor grads, Tensor image, Tensor boxes, Tensor box_ind, string method = null, string name = "CropAndResizeGradBoxes") + { + var dict = new Dictionary(); + dict["grads"] = grads; + dict["image"] = image; + dict["boxes"] = boxes; + dict["box_ind"] = box_ind; + if (method != null) + dict["method"] = method; + var op = _op_def_lib._apply_op_helper("CropAndResizeGradBoxes", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of the crop_and_resize op wrt the input image tensor. + /// + /// + /// A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. + /// + /// + /// A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor + /// specifies the coordinates of a box in the box_ind[i] image and is specified + /// in normalized coordinates [y1, x1, y2, x2]. A normalized coordinate value of + /// y is mapped to the image coordinate at y * (image_height - 1), so as the + /// [0, 1] interval of normalized image height is mapped to + /// [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + /// which case the sampled crop is an up-down flipped version of the original + /// image. The width dimension is treated similarly. Normalized coordinates + /// outside the [0, 1] range are allowed, in which case we use + /// extrapolation_value to extrapolate the input image values. + /// + /// + /// A 1-D tensor of shape [num_boxes] with int32 values in [0, batch). + /// The value of box_ind[i] specifies the image that the i-th box refers to. + /// + /// + /// A 1-D tensor with value [batch, image_height, image_width, depth] + /// containing the original image size. Both image_height and image_width need + /// to be positive. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CropAndResizeGradImage'. + /// + /// + /// Optional argument + /// + /// + /// A string specifying the interpolation method. Only 'bilinear' is + /// supported for now. + /// + /// + /// A 4-D tensor of shape [batch, image_height, image_width, depth]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor crop_and_resize_grad_image (Tensor grads, Tensor boxes, Tensor box_ind, Tensor image_size, TF_DataType T, string method = null, string name = "CropAndResizeGradImage") + { + var dict = new Dictionary(); + dict["grads"] = grads; + dict["boxes"] = boxes; + dict["box_ind"] = box_ind; + dict["image_size"] = image_size; + dict["T"] = T; + if (method != null) + dict["method"] = method; + var op = _op_def_lib._apply_op_helper("CropAndResizeGradImage", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute the pairwise cross product. + /// + /// + /// A tensor containing 3-element vectors. + /// + /// + /// Another tensor, of same type and shape as a. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Cross'. + /// + /// + /// Pairwise cross product of the vectors in a and b. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// a and b must be the same shape; they can either be simple 3-element vectors, + /// or any shape where the innermost dimension is 3. In the latter case, each pair + /// of corresponding 3-element vectors is cross-multiplied independently. + /// + public static Tensor cross (Tensor a, Tensor b, string name = "Cross") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["b"] = b; + var op = _op_def_lib._apply_op_helper("Cross", name: name, keywords: dict); + return op.output; + } + + /// + /// An Op to sum inputs across replicated TPU instances. Each + /// + /// + /// The local input to the sum. + /// + /// + /// An int32 tensor with shape + /// [num_groups, num_replicas_per_group]. group_assignment[i] represents the + /// replica ids in the ith subgroup. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CrossReplicaSum'. + /// + /// + /// The sum of all the distributed inputs. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// instance supplies its own input. If group_assignment is empty, the output of + /// each is the sum of all the inputs, otherwise the output of each is the sum of + /// the inputs belonging to the same group. + /// + /// For example, suppose there are 8 TPU instances: [A, B, C, D, E, F, G, H]. + /// Passing group_assignment=[[0,2,4,6],[1,3,5,7]] sets A, C, E, G as group 0, + /// and B, D, F, H as group 1. Thus we get the outputs: + /// [A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]. + /// + public static Tensor cross_replica_sum (Tensor input, Tensor group_assignment, string name = "CrossReplicaSum") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["group_assignment"] = group_assignment; + var op = _op_def_lib._apply_op_helper("CrossReplicaSum", name: name, keywords: dict); + return op.output; + } + + /// + /// A RNN backed by cuDNN. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CudnnRNN'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// output_h : + /// output_c : + /// reserve_space : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Computes the RNN from the input and initial states, with respect to the params + /// buffer. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. + /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, + /// num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// output: A 3-D tensor with the shape of [seq_length, batch_size, + /// dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// is_training: Indicates whether this operation is used for inferenece or + /// training. + /// reserve_space: An opaque tensor that can be used in backprop calculation. It + /// is only produced if is_training is false. + /// + public static (Tensor output, Tensor output_h, Tensor output_c, Tensor reserve_space) cudnn_r_n_n (Tensor input, Tensor input_h, Tensor input_c, Tensor parameters, string rnn_mode = null, string input_mode = null, string direction = null, float? dropout = null, int? seed = null, int? seed2 = null, bool? is_training = null, string name = "CudnnRNN") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_h"] = input_h; + dict["input_c"] = input_c; + dict["params"] = parameters; + if (rnn_mode != null) + dict["rnn_mode"] = rnn_mode; + if (input_mode != null) + dict["input_mode"] = input_mode; + if (direction != null) + dict["direction"] = direction; + if (dropout.HasValue) + dict["dropout"] = dropout.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + if (is_training.HasValue) + dict["is_training"] = is_training.Value; + var op = _op_def_lib._apply_op_helper("CudnnRNN", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var output_h = op.outputs[_idx++]; + var output_c = op.outputs[_idx++]; + var reserve_space = op.outputs[_idx++]; + return (output, output_h, output_c, reserve_space); + } + + /// + /// Backprop step of CudnnRNN. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CudnnRNNBackprop'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// input_backprop : + /// input_h_backprop : + /// input_c_backprop : + /// params_backprop : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Compute the backprop of both data and weights in a RNN. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. + /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, + /// num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// output: A 3-D tensor with the shape of [seq_length, batch_size, + /// dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// output_backprop: A 3-D tensor with the same shape as output in the forward pass. + /// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward + /// pass. + /// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward + /// pass. + /// reserve_space: The same reserve_space produced in for forward operation. + /// input_backprop: The backprop to input in the forward pass. Has the same shape + /// as input. + /// input_h_backprop: The backprop to input_h in the forward pass. Has the same + /// shape as input_h. + /// input_c_backprop: The backprop to input_c in the forward pass. Has the same + /// shape as input_c. + /// params_backprop: The backprop to the params buffer in the forward pass. Has the + /// same shape as params. + /// + public static (Tensor input_backprop, Tensor input_h_backprop, Tensor input_c_backprop, Tensor params_backprop) cudnn_r_n_n_backprop (Tensor input, Tensor input_h, Tensor input_c, Tensor parameters, Tensor output, Tensor output_h, Tensor output_c, Tensor output_backprop, Tensor output_h_backprop, Tensor output_c_backprop, Tensor reserve_space, string rnn_mode = null, string input_mode = null, string direction = null, float? dropout = null, int? seed = null, int? seed2 = null, string name = "CudnnRNNBackprop") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_h"] = input_h; + dict["input_c"] = input_c; + dict["params"] = parameters; + dict["output"] = output; + dict["output_h"] = output_h; + dict["output_c"] = output_c; + dict["output_backprop"] = output_backprop; + dict["output_h_backprop"] = output_h_backprop; + dict["output_c_backprop"] = output_c_backprop; + dict["reserve_space"] = reserve_space; + if (rnn_mode != null) + dict["rnn_mode"] = rnn_mode; + if (input_mode != null) + dict["input_mode"] = input_mode; + if (direction != null) + dict["direction"] = direction; + if (dropout.HasValue) + dict["dropout"] = dropout.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("CudnnRNNBackprop", name: name, keywords: dict); + int _idx = 0; + var input_backprop = op.outputs[_idx++]; + var input_h_backprop = op.outputs[_idx++]; + var input_c_backprop = op.outputs[_idx++]; + var params_backprop = op.outputs[_idx++]; + return (input_backprop, input_h_backprop, input_c_backprop, params_backprop); + } + + /// + /// Backprop step of CudnnRNN. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CudnnRNNBackpropV2'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// input_backprop : + /// input_h_backprop : + /// input_c_backprop : + /// params_backprop : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Compute the backprop of both data and weights in a RNN. Takes an extra + /// "host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN + /// cudnnRNNAlgo_t and cudnnMathType_t. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicates whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. + /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, + /// num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// output: A 3-D tensor with the shape of [seq_length, batch_size, + /// dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// output_backprop: A 3-D tensor with the same shape as output in the forward pass. + /// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward + /// pass. + /// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward + /// pass. + /// reserve_space: The same reserve_space produced in the forward operation. + /// host_reserved: The same host_reserved produced in the forward operation. + /// input_backprop: The backprop to input in the forward pass. Has the same shape + /// as input. + /// input_h_backprop: The backprop to input_h in the forward pass. Has the same + /// shape as input_h. + /// input_c_backprop: The backprop to input_c in the forward pass. Has the same + /// shape as input_c. + /// params_backprop: The backprop to the params buffer in the forward pass. Has the + /// same shape as params. + /// + public static (Tensor input_backprop, Tensor input_h_backprop, Tensor input_c_backprop, Tensor params_backprop) cudnn_r_n_n_backprop_v2 (Tensor input, Tensor input_h, Tensor input_c, Tensor parameters, Tensor output, Tensor output_h, Tensor output_c, Tensor output_backprop, Tensor output_h_backprop, Tensor output_c_backprop, Tensor reserve_space, Tensor host_reserved, string rnn_mode = null, string input_mode = null, string direction = null, float? dropout = null, int? seed = null, int? seed2 = null, string name = "CudnnRNNBackpropV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_h"] = input_h; + dict["input_c"] = input_c; + dict["params"] = parameters; + dict["output"] = output; + dict["output_h"] = output_h; + dict["output_c"] = output_c; + dict["output_backprop"] = output_backprop; + dict["output_h_backprop"] = output_h_backprop; + dict["output_c_backprop"] = output_c_backprop; + dict["reserve_space"] = reserve_space; + dict["host_reserved"] = host_reserved; + if (rnn_mode != null) + dict["rnn_mode"] = rnn_mode; + if (input_mode != null) + dict["input_mode"] = input_mode; + if (direction != null) + dict["direction"] = direction; + if (dropout.HasValue) + dict["dropout"] = dropout.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("CudnnRNNBackpropV2", name: name, keywords: dict); + int _idx = 0; + var input_backprop = op.outputs[_idx++]; + var input_h_backprop = op.outputs[_idx++]; + var input_c_backprop = op.outputs[_idx++]; + var params_backprop = op.outputs[_idx++]; + return (input_backprop, input_h_backprop, input_c_backprop, params_backprop); + } + + /// + /// Converts CudnnRNN params from canonical form to usable form. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CudnnRNNCanonicalToParams'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Writes a set of weights into the opaque params buffer so they can be used in + /// upcoming training or inferences. + /// + /// Note that the params buffer may not be compatible across different GPUs. So any + /// save and restoration should be converted to and from the canonical weights and + /// biases. + /// + /// num_layers: Specifies the number of layers in the RNN model. + /// num_units: Specifies the size of the hidden state. + /// input_size: Specifies the size of the input state. + /// weights: the canonical form of weights that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// biases: the canonical form of biases that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// num_params: number of parameter sets for all layers. + /// Each layer may contain multiple parameter sets, with each set consisting of + /// a weight matrix and a bias vector. + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// The actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. + /// dir = (direction == bidirectional) ? 2 : 1 + /// dropout: dropout probability. When set to 0., dropout is disabled. + /// seed: the 1st part of a seed to initialize dropout. + /// seed2: the 2nd part of a seed to initialize dropout. + /// + public static Tensor cudnn_r_n_n_canonical_to_params (Tensor num_layers, Tensor num_units, Tensor input_size, Tensor[] weights, Tensor[] biases, string rnn_mode = null, string input_mode = null, string direction = null, float? dropout = null, int? seed = null, int? seed2 = null, string name = "CudnnRNNCanonicalToParams") + { + var dict = new Dictionary(); + dict["num_layers"] = num_layers; + dict["num_units"] = num_units; + dict["input_size"] = input_size; + dict["weights"] = weights; + dict["biases"] = biases; + if (rnn_mode != null) + dict["rnn_mode"] = rnn_mode; + if (input_mode != null) + dict["input_mode"] = input_mode; + if (direction != null) + dict["direction"] = direction; + if (dropout.HasValue) + dict["dropout"] = dropout.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("CudnnRNNCanonicalToParams", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes size of weights that can be used by a Cudnn RNN model. + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CudnnRNNParamsSize'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Return the params size that can be used by the Cudnn RNN model. Subsequent + /// weight allocation and initialization should use this size. + /// + /// num_layers: Specifies the number of layers in the RNN model. + /// num_units: Specifies the size of the hidden state. + /// input_size: Specifies the size of the input state. + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// The actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. + /// dir = (direction == bidirectional) ? 2 : 1 + /// dropout: dropout probability. When set to 0., dropout is disabled. + /// seed: the 1st part of a seed to initialize dropout. + /// seed2: the 2nd part of a seed to initialize dropout. + /// params_size: The size of the params buffer that should be allocated and + /// initialized for this RNN model. Note that this params buffer may not be + /// compatible across GPUs. Please use CudnnRNNParamsWeights and + /// CudnnRNNParamsBiases to save and restore them in a way that is compatible + /// across different runs. + /// + public static Tensor cudnn_r_n_n_params_size (Tensor num_layers, Tensor num_units, Tensor input_size, TF_DataType T, TF_DataType S, string rnn_mode = null, string input_mode = null, string direction = null, float? dropout = null, int? seed = null, int? seed2 = null, string name = "CudnnRNNParamsSize") + { + var dict = new Dictionary(); + dict["num_layers"] = num_layers; + dict["num_units"] = num_units; + dict["input_size"] = input_size; + dict["T"] = T; + dict["S"] = S; + if (rnn_mode != null) + dict["rnn_mode"] = rnn_mode; + if (input_mode != null) + dict["input_mode"] = input_mode; + if (direction != null) + dict["direction"] = direction; + if (dropout.HasValue) + dict["dropout"] = dropout.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("CudnnRNNParamsSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Retrieves CudnnRNN params in canonical form. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CudnnRNNParamsToCanonical'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// weights : + /// biases : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Retrieves a set of weights from the opaque params buffer that can be saved and + /// restored in a way compatible with future runs. + /// + /// Note that the params buffer may not be compatible across different GPUs. So any + /// save and restoration should be converted to and from the canonical weights and + /// biases. + /// + /// num_layers: Specifies the number of layers in the RNN model. + /// num_units: Specifies the size of the hidden state. + /// input_size: Specifies the size of the input state. + /// num_params: number of parameter sets for all layers. + /// Each layer may contain multiple parameter sets, with each set consisting of + /// a weight matrix and a bias vector. + /// weights: the canonical form of weights that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// biases: the canonical form of biases that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// The actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. + /// dir = (direction == bidirectional) ? 2 : 1 + /// dropout: dropout probability. When set to 0., dropout is disabled. + /// seed: the 1st part of a seed to initialize dropout. + /// seed2: the 2nd part of a seed to initialize dropout. + /// + public static (Tensor[] weights, Tensor[] biases) cudnn_r_n_n_params_to_canonical (Tensor num_layers, Tensor num_units, Tensor input_size, Tensor parameters, int num_params, string rnn_mode = null, string input_mode = null, string direction = null, float? dropout = null, int? seed = null, int? seed2 = null, string name = "CudnnRNNParamsToCanonical") + { + var dict = new Dictionary(); + dict["num_layers"] = num_layers; + dict["num_units"] = num_units; + dict["input_size"] = input_size; + dict["params"] = parameters; + dict["num_params"] = num_params; + if (rnn_mode != null) + dict["rnn_mode"] = rnn_mode; + if (input_mode != null) + dict["input_mode"] = input_mode; + if (direction != null) + dict["direction"] = direction; + if (dropout.HasValue) + dict["dropout"] = dropout.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("CudnnRNNParamsToCanonical", name: name, keywords: dict); + int _idx = 0; + var weights = Enumerable.Range(0, op.OutputListLength("weights")).Select(_ => op.outputs[_idx++]).ToArray(); + var biases = Enumerable.Range(0, op.OutputListLength("biases")).Select(_ => op.outputs[_idx++]).ToArray(); + return (weights, biases); + } + + /// + /// A RNN backed by cuDNN. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'CudnnRNNV2'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// output_h : + /// output_c : + /// reserve_space : + /// host_reserved : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Computes the RNN from the input and initial states, with respect to the params + /// buffer. Produces one extra output "host_reserved" than CudnnRNN. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicates whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. + /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, + /// num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// output: A 3-D tensor with the shape of [seq_length, batch_size, + /// dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// is_training: Indicates whether this operation is used for inferenece or + /// training. + /// reserve_space: An opaque tensor that can be used in backprop calculation. It + /// is only produced if is_training is true. + /// host_reserved: An opaque tensor that can be used in backprop calculation. It is + /// only produced if is_training is true. It is output on host memory rather than + /// device memory. + /// + public static (Tensor output, Tensor output_h, Tensor output_c, Tensor reserve_space, Tensor host_reserved) cudnn_r_n_n_v2 (Tensor input, Tensor input_h, Tensor input_c, Tensor parameters, string rnn_mode = null, string input_mode = null, string direction = null, float? dropout = null, int? seed = null, int? seed2 = null, bool? is_training = null, string name = "CudnnRNNV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_h"] = input_h; + dict["input_c"] = input_c; + dict["params"] = parameters; + if (rnn_mode != null) + dict["rnn_mode"] = rnn_mode; + if (input_mode != null) + dict["input_mode"] = input_mode; + if (direction != null) + dict["direction"] = direction; + if (dropout.HasValue) + dict["dropout"] = dropout.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + if (is_training.HasValue) + dict["is_training"] = is_training.Value; + var op = _op_def_lib._apply_op_helper("CudnnRNNV2", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var output_h = op.outputs[_idx++]; + var output_c = op.outputs[_idx++]; + var reserve_space = op.outputs[_idx++]; + var host_reserved = op.outputs[_idx++]; + return (output, output_h, output_c, reserve_space, host_reserved); + } + + /// + /// Compute the cumulative product of the tensor x along axis. + /// + /// + /// A Tensor. Must be one of the following types: float32, float64, + /// int64, int32, uint8, uint16, int16, int8, complex64, + /// complex128, qint8, quint8, qint32, half. + /// + /// + /// A Tensor of type int32 (default: 0). Must be in the range + /// [-rank(x), rank(x)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Cumprod'. + /// + /// + /// If True, perform exclusive cumprod. + /// + /// + /// A bool (default: False). + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// By default, this op performs an inclusive cumprod, which means that the first + /// element of the input is identical to the first element of the output: + /// + /// + /// tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + /// + /// + /// By setting the exclusive kwarg to True, an exclusive cumprod is + /// performed instead: + /// + /// + /// tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + /// + /// + /// By setting the reverse kwarg to True, the cumprod is performed in the + /// opposite direction: + /// + /// + /// tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + /// + /// + /// This is more efficient than using separate tf.reverse ops. + /// + /// The reverse and exclusive kwargs can also be combined: + /// + /// + /// tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + /// + /// + public static Tensor cumprod (Tensor x, Tensor axis, bool? exclusive = null, bool? reverse = null, string name = "Cumprod") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["axis"] = axis; + if (exclusive.HasValue) + dict["exclusive"] = exclusive.Value; + if (reverse.HasValue) + dict["reverse"] = reverse.Value; + var op = _op_def_lib._apply_op_helper("Cumprod", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute the cumulative sum of the tensor x along axis. + /// + /// + /// A Tensor. Must be one of the following types: float32, float64, + /// int64, int32, uint8, uint16, int16, int8, complex64, + /// complex128, qint8, quint8, qint32, half. + /// + /// + /// A Tensor of type int32 (default: 0). Must be in the range + /// [-rank(x), rank(x)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Cumsum'. + /// + /// + /// If True, perform exclusive cumsum. + /// + /// + /// A bool (default: False). + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// By default, this op performs an inclusive cumsum, which means that the first + /// element of the input is identical to the first element of the output: + /// + /// + /// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + /// + /// + /// By setting the exclusive kwarg to True, an exclusive cumsum is + /// performed instead: + /// + /// + /// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + /// + /// + /// By setting the reverse kwarg to True, the cumsum is performed in the + /// opposite direction: + /// + /// + /// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + /// + /// + /// This is more efficient than using separate tf.reverse ops. + /// + /// The reverse and exclusive kwargs can also be combined: + /// + /// + /// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + /// + /// + public static Tensor cumsum (Tensor x, Tensor axis, bool? exclusive = null, bool? reverse = null, string name = "Cumsum") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["axis"] = axis; + if (exclusive.HasValue) + dict["exclusive"] = exclusive.Value; + if (reverse.HasValue) + dict["reverse"] = reverse.Value; + var op = _op_def_lib._apply_op_helper("Cumsum", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the dimension index in the destination data format given the one in + /// + /// + /// A Tensor with each element as a dimension index in source data format. + /// Must be in the range [-4, 4). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DataFormatDimMap'. + /// + /// + /// source data format. + /// + /// + /// destination data format. + /// + /// + /// A Tensor with each element as a dimension index in destination data format. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// the source data format. + /// + public static Tensor data_format_dim_map (Tensor x, string src_format = null, string dst_format = null, string name = "DataFormatDimMap") + { + var dict = new Dictionary(); + dict["x"] = x; + if (src_format != null) + dict["src_format"] = src_format; + if (dst_format != null) + dict["dst_format"] = dst_format; + var op = _op_def_lib._apply_op_helper("DataFormatDimMap", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the permuted vector/tensor in the destination data format given the + /// + /// + /// Vector of size 4 or Tensor of shape (4, 2) in source data format. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DataFormatVecPermute'. + /// + /// + /// source data format. + /// + /// + /// destination data format. + /// + /// + /// Vector of size 4 or Tensor of shape (4, 2) in destination data format. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// one in the source data format. + /// + public static Tensor data_format_vec_permute (Tensor x, string src_format = null, string dst_format = null, string name = "DataFormatVecPermute") + { + var dict = new Dictionary(); + dict["x"] = x; + if (src_format != null) + dict["src_format"] = src_format; + if (dst_format != null) + dict["dst_format"] = dst_format; + var op = _op_def_lib._apply_op_helper("DataFormatVecPermute", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a serialized GraphDef representing input_dataset. + /// + /// + /// A variant tensor representing the dataset to return the graph representation for. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DatasetToGraph'. + /// + /// + /// The graph representation of the dataset (as serialized GraphDef). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Returns a graph representation for input_dataset. + /// + public static Tensor dataset_to_graph (Tensor input_dataset, string name = "DatasetToGraph") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + var op = _op_def_lib._apply_op_helper("DatasetToGraph", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs the single element from the given dataset. + /// + /// + /// A handle to a dataset that contains a single element. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DatasetToSingleElement'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The components of the single element of input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor[] dataset_to_single_element (Tensor dataset, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "DatasetToSingleElement") + { + var dict = new Dictionary(); + dict["dataset"] = dataset; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("DatasetToSingleElement", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Writes the given dataset to the given file using the TFRecord format. + /// + /// + /// A variant tensor representing the dataset to write. + /// + /// + /// A scalar string tensor representing the filename to use. + /// + /// + /// A scalar string tensor containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DatasetToTFRecord'. + /// + /// + /// Returns the description of the operation + /// + public static Operation dataset_to_t_f_record (Tensor input_dataset, Tensor filename, Tensor compression_type, string name = "DatasetToTFRecord") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["filename"] = filename; + dict["compression_type"] = compression_type; + var op = _op_def_lib._apply_op_helper("DatasetToTFRecord", name: name, keywords: dict); + return op; + } + + /// + /// Identity op for gradient debugging. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DebugGradientIdentity'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op is hidden from public in Python. It is used by TensorFlow Debugger to + /// register gradient tensors for gradient debugging. + /// This op operates on non-reference-type tensors. + /// + public static Tensor debug_gradient_identity (Tensor input, string name = "DebugGradientIdentity") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("DebugGradientIdentity", name: name, keywords: dict); + return op.output; + } + + /// + /// Identity op for gradient debugging. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DebugGradientRefIdentity'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op is hidden from public in Python. It is used by TensorFlow Debugger to + /// register gradient tensors for gradient debugging. + /// This op operates on reference-type tensors. + /// + public static Tensor debug_gradient_ref_identity (Tensor input, string name = "DebugGradientRefIdentity") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("DebugGradientRefIdentity", name: name, keywords: dict); + return op.output; + } + + /// + /// Debug Identity Op. + /// + /// + /// Input tensor, non-Reference type. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DebugIdentity'. + /// + /// + /// + /// + /// Name of the input tensor. + /// + /// + /// List of URLs to debug targets, e.g., + /// file:///foo/tfdbg_dump, grpc:://localhost:11011 + /// + /// + /// Whether this op will be gated. If any of the debug_urls of this + /// debug node is of the grpc:// scheme, when the value of this attribute is set + /// to True, the data will not actually be sent via the grpc stream unless this + /// debug op has been enabled at the debug_url. If all of the debug_urls of this + /// debug node are of the grpc:// scheme and the debug op is enabled at none of + /// them, the output will be an empty Tensor. + /// + /// + /// Output tensor that equals the input tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Provides an identity mapping of the non-Ref type input tensor for debugging. + /// + public static Tensor debug_identity (Tensor input, string device_name = null, string tensor_name = null, string[] debug_urls = null, bool? gated_grpc = null, string name = "DebugIdentity") + { + var dict = new Dictionary(); + dict["input"] = input; + if (device_name != null) + dict["device_name"] = device_name; + if (tensor_name != null) + dict["tensor_name"] = tensor_name; + if (debug_urls != null) + dict["debug_urls"] = debug_urls; + if (gated_grpc.HasValue) + dict["gated_grpc"] = gated_grpc.Value; + var op = _op_def_lib._apply_op_helper("DebugIdentity", name: name, keywords: dict); + return op.output; + } + + /// + /// Debug NaN Value Counter Op + /// + /// + /// Input tensor, non-Reference type. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DebugNanCount'. + /// + /// + /// + /// + /// Name of the input tensor. + /// + /// + /// List of URLs to debug targets, e.g., + /// file:///foo/tfdbg_dump, grpc:://localhost:11011. + /// + /// + /// Whether this op will be gated. If any of the debug_urls of this + /// debug node is of the grpc:// scheme, when the value of this attribute is set + /// to True, the data will not actually be sent via the grpc stream unless this + /// debug op has been enabled at the debug_url. If all of the debug_urls of this + /// debug node are of the grpc:// scheme and the debug op is enabled at none of + /// them, the output will be an empty Tensor. + /// + /// + /// An integer output tensor that is the number of NaNs in the input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Counts number of NaNs in the input tensor, for debugging. + /// + public static Tensor debug_nan_count (Tensor input, string device_name = null, string tensor_name = null, string[] debug_urls = null, bool? gated_grpc = null, string name = "DebugNanCount") + { + var dict = new Dictionary(); + dict["input"] = input; + if (device_name != null) + dict["device_name"] = device_name; + if (tensor_name != null) + dict["tensor_name"] = tensor_name; + if (debug_urls != null) + dict["debug_urls"] = debug_urls; + if (gated_grpc.HasValue) + dict["gated_grpc"] = gated_grpc.Value; + var op = _op_def_lib._apply_op_helper("DebugNanCount", name: name, keywords: dict); + return op.output; + } + + /// + /// Debug Numeric Summary Op. + /// + /// + /// Input tensor, non-Reference type, float or double. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DebugNumericSummary'. + /// + /// + /// + /// + /// Name of the input tensor. + /// + /// + /// List of URLs to debug targets, e.g., + /// file:///foo/tfdbg_dump, grpc:://localhost:11011 + /// + /// + /// (float) The lower bound <= which values will be included in the + /// generalized -inf count. Default: -inf. + /// + /// + /// (float) The upper bound >= which values will be included in the + /// generalized +inf count. Default: +inf. + /// + /// + /// (bool) Do not send data to the debug URLs unless at least one + /// of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and + /// inf counts) is non-zero. + /// + /// + /// Whether this op will be gated. If any of the debug_urls of this + /// debug node is of the grpc:// scheme, when the value of this attribute is set + /// to True, the data will not actually be sent via the grpc stream unless this + /// debug op has been enabled at the debug_url. If all of the debug_urls of this + /// debug node are of the grpc:// scheme and the debug op is enabled at none of + /// them, the output will be an empty Tensor. + /// + /// + /// A double tensor of shape [14 + nDimensions], where nDimensions is the + /// the number of dimensions of the tensor's shape. The elements of output are: + /// [0]: is initialized (1.0) or not (0.0). + /// [1]: total number of elements + /// [2]: NaN element count + /// [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by + /// default. + /// [4]: negative element count (excluding -inf), if lower_bound is the default + /// -inf. Otherwise, this is the count of elements > lower_bound and < 0. + /// [5]: zero element count + /// [6]: positive element count (excluding +inf), if upper_bound is the default + /// -inf. Otherwise, this is the count of elements < upper_bound and > 0. + /// [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by + /// default. + /// Output elements [1:8] are all zero, if the tensor is uninitialized. + /// [8]: minimum of all non-inf and non-NaN elements. + /// If uninitialized or no such element exists: +inf. + /// [9]: maximum of all non-inf and non-NaN elements. + /// If uninitialized or no such element exists: -inf. + /// [10]: mean of all non-inf and non-NaN elements. + /// If uninitialized or no such element exists: NaN. + /// [11]: variance of all non-inf and non-NaN elements. + /// If uninitialized or no such element exists: NaN. + /// [12]: Data type of the tensor encoded as an enum integer. See the DataType + /// proto for more details. + /// [13]: Number of dimensions of the tensor (ndims). + /// [14+]: Sizes of the dimensions. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Provide a basic summary of numeric value types, range and distribution. + /// + public static Tensor debug_numeric_summary (Tensor input, string device_name = null, string tensor_name = null, string[] debug_urls = null, float? lower_bound = null, float? upper_bound = null, bool? mute_if_healthy = null, bool? gated_grpc = null, string name = "DebugNumericSummary") + { + var dict = new Dictionary(); + dict["input"] = input; + if (device_name != null) + dict["device_name"] = device_name; + if (tensor_name != null) + dict["tensor_name"] = tensor_name; + if (debug_urls != null) + dict["debug_urls"] = debug_urls; + if (lower_bound.HasValue) + dict["lower_bound"] = lower_bound.Value; + if (upper_bound.HasValue) + dict["upper_bound"] = upper_bound.Value; + if (mute_if_healthy.HasValue) + dict["mute_if_healthy"] = mute_if_healthy.Value; + if (gated_grpc.HasValue) + dict["gated_grpc"] = gated_grpc.Value; + var op = _op_def_lib._apply_op_helper("DebugNumericSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// Decode and Crop a JPEG-encoded image to a uint8 tensor. + /// + /// + /// 0-D. The JPEG-encoded image. + /// + /// + /// 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeAndCropJpeg'. + /// + /// + /// Number of color channels for the decoded image. + /// + /// + /// Downscaling ratio. + /// + /// + /// If true use a slower but nicer upscaling of the + /// chroma planes (yuv420/422 only). + /// + /// + /// If true try to recover an image from truncated input. + /// + /// + /// The minimum required fraction of lines before a truncated + /// input is accepted. + /// + /// + /// string specifying a hint about the algorithm used for + /// decompression. Defaults to "" which maps to a system-specific + /// default. Currently valid values are ["INTEGER_FAST", + /// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + /// jpeg library changes to a version that does not have that specific + /// option.) + /// + /// + /// 3-D with shape [height, width, channels].. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The attr channels indicates the desired number of color channels for the + /// decoded image. + /// + /// Accepted values are: + /// + /// * 0: Use the number of channels in the JPEG-encoded image. + /// * 1: output a grayscale image. + /// * 3: output an RGB image. + /// + /// If needed, the JPEG-encoded image is transformed to match the requested number + /// of color channels. + /// + /// The attr ratio allows downscaling the image by an integer factor during + /// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + /// downscaling the image later. + /// + /// + /// It is equivalent to a combination of decode and crop, but much faster by only + /// decoding partial jpeg image. + /// + public static Tensor decode_and_crop_jpeg (Tensor contents, Tensor crop_window, int? channels = null, int? ratio = null, bool? fancy_upscaling = null, bool? try_recover_truncated = null, float? acceptable_fraction = null, string dct_method = null, string name = "DecodeAndCropJpeg") + { + var dict = new Dictionary(); + dict["contents"] = contents; + dict["crop_window"] = crop_window; + if (channels.HasValue) + dict["channels"] = channels.Value; + if (ratio.HasValue) + dict["ratio"] = ratio.Value; + if (fancy_upscaling.HasValue) + dict["fancy_upscaling"] = fancy_upscaling.Value; + if (try_recover_truncated.HasValue) + dict["try_recover_truncated"] = try_recover_truncated.Value; + if (acceptable_fraction.HasValue) + dict["acceptable_fraction"] = acceptable_fraction.Value; + if (dct_method != null) + dict["dct_method"] = dct_method; + var op = _op_def_lib._apply_op_helper("DecodeAndCropJpeg", name: name, keywords: dict); + return op.output; + } + + /// + /// Decode web-safe base64-encoded strings. + /// + /// + /// Base64 strings to decode. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeBase64'. + /// + /// + /// Decoded strings. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Input may or may not have padding at the end. See EncodeBase64 for padding. + /// Web-safe means that input must use - and _ instead of + and /. + /// + public static Tensor decode_base64 (Tensor input, string name = "DecodeBase64") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("DecodeBase64", name: name, keywords: dict); + return op.output; + } + + /// + /// Decode the first frame of a BMP-encoded image to a uint8 tensor. + /// + /// + /// 0-D. The BMP-encoded image. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeBmp'. + /// + /// + /// + /// + /// 3-D with shape [height, width, channels]. RGB order + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The attr channels indicates the desired number of color channels for the + /// decoded image. + /// + /// Accepted values are: + /// + /// * 0: Use the number of channels in the BMP-encoded image. + /// * 3: output an RGB image. + /// * 4: output an RGBA image. + /// + public static Tensor decode_bmp (Tensor contents, int? channels = null, string name = "DecodeBmp") + { + var dict = new Dictionary(); + dict["contents"] = contents; + if (channels.HasValue) + dict["channels"] = channels.Value; + var op = _op_def_lib._apply_op_helper("DecodeBmp", name: name, keywords: dict); + return op.output; + } + + /// + /// Convert CSV records to tensors. Each column maps to one tensor. + /// + /// + /// Each string is a record/row in the csv and all records should have + /// the same format. + /// + /// + /// One tensor per column of the input record, with either a + /// scalar default value for that column or an empty vector if the column is + /// required. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeCSV'. + /// + /// + /// char delimiter to separate fields in a record. + /// + /// + /// If false, treats double quotation marks as regular + /// characters inside of the string fields (ignoring RFC 4180, Section 2, + /// Bullet 5). + /// + /// + /// Additional string to recognize as NA/NaN. + /// + /// + /// + /// + /// Each tensor will have the same shape as records. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// RFC 4180 format is expected for the CSV records. + /// (https://tools.ietf.org/html/rfc4180) + /// Note that we allow leading and trailing spaces with int or float field. + /// + public static Tensor[] decode_c_s_v (Tensor records, Tensor[] record_defaults, string field_delim = null, bool? use_quote_delim = null, string na_value = null, int[] select_cols = null, string name = "DecodeCSV") + { + var dict = new Dictionary(); + dict["records"] = records; + dict["record_defaults"] = record_defaults; + if (field_delim != null) + dict["field_delim"] = field_delim; + if (use_quote_delim.HasValue) + dict["use_quote_delim"] = use_quote_delim.Value; + if (na_value != null) + dict["na_value"] = na_value; + if (select_cols != null) + dict["select_cols"] = select_cols; + var op = _op_def_lib._apply_op_helper("DecodeCSV", name: name, keywords: dict); + int _idx = 0; + var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray(); + return (output); + } + + /// + /// Decompress strings. + /// + /// + /// A Tensor of string which is compressed. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeCompressed'. + /// + /// + /// A scalar containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + /// + /// + /// A Tensor with the same shape as input bytes, uncompressed + /// from bytes. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op decompresses each element of the bytes input Tensor, which + /// is assumed to be compressed using the given compression_type. + /// + /// The output is a string Tensor of the same shape as bytes, + /// each element containing the decompressed data from the corresponding + /// element in bytes. + /// + public static Tensor decode_compressed (Tensor bytes, string compression_type = null, string name = "DecodeCompressed") + { + var dict = new Dictionary(); + dict["bytes"] = bytes; + if (compression_type != null) + dict["compression_type"] = compression_type; + var op = _op_def_lib._apply_op_helper("DecodeCompressed", name: name, keywords: dict); + return op.output; + } + + /// + /// Decode the first frame of a GIF-encoded image to a uint8 tensor. + /// + /// + /// 0-D. The GIF-encoded image. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeGif'. + /// + /// + /// 4-D with shape [num_frames, height, width, 3]. RGB order + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// GIF with frame or transparency compression are not supported + /// convert animated GIF from compressed to uncompressed by: + /// + /// convert $src.gif -coalesce $dst.gif + /// + /// This op also supports decoding JPEGs and PNGs, though it is cleaner to use + /// tf.image.decode_image. + /// + public static Tensor decode_gif (Tensor contents, string name = "DecodeGif") + { + var dict = new Dictionary(); + dict["contents"] = contents; + var op = _op_def_lib._apply_op_helper("DecodeGif", name: name, keywords: dict); + return op.output; + } + + /// + /// Convert JSON-encoded Example records to binary protocol buffer strings. + /// + /// + /// Each string is a JSON object serialized according to the JSON + /// mapping of the Example proto. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeJSONExample'. + /// + /// + /// Each string is a binary Example protocol buffer corresponding + /// to the respective element of json_examples. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op translates a tensor containing Example records, encoded using + /// the [standard JSON + /// mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), + /// into a tensor containing the same records encoded as binary protocol + /// buffers. The resulting tensor can then be fed to any of the other + /// Example-parsing ops. + /// + public static Tensor decode_j_s_o_n_example (Tensor json_examples, string name = "DecodeJSONExample") + { + var dict = new Dictionary(); + dict["json_examples"] = json_examples; + var op = _op_def_lib._apply_op_helper("DecodeJSONExample", name: name, keywords: dict); + return op.output; + } + + /// + /// Decode a JPEG-encoded image to a uint8 tensor. + /// + /// + /// 0-D. The JPEG-encoded image. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeJpeg'. + /// + /// + /// Number of color channels for the decoded image. + /// + /// + /// Downscaling ratio. + /// + /// + /// If true use a slower but nicer upscaling of the + /// chroma planes (yuv420/422 only). + /// + /// + /// If true try to recover an image from truncated input. + /// + /// + /// The minimum required fraction of lines before a truncated + /// input is accepted. + /// + /// + /// string specifying a hint about the algorithm used for + /// decompression. Defaults to "" which maps to a system-specific + /// default. Currently valid values are ["INTEGER_FAST", + /// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + /// jpeg library changes to a version that does not have that specific + /// option.) + /// + /// + /// 3-D with shape [height, width, channels].. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The attr channels indicates the desired number of color channels for the + /// decoded image. + /// + /// Accepted values are: + /// + /// * 0: Use the number of channels in the JPEG-encoded image. + /// * 1: output a grayscale image. + /// * 3: output an RGB image. + /// + /// If needed, the JPEG-encoded image is transformed to match the requested number + /// of color channels. + /// + /// The attr ratio allows downscaling the image by an integer factor during + /// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + /// downscaling the image later. + /// + /// + /// This op also supports decoding PNGs and non-animated GIFs since the interface is + /// the same, though it is cleaner to use tf.image.decode_image. + /// + public static Tensor decode_jpeg (Tensor contents, int? channels = null, int? ratio = null, bool? fancy_upscaling = null, bool? try_recover_truncated = null, float? acceptable_fraction = null, string dct_method = null, string name = "DecodeJpeg") + { + var dict = new Dictionary(); + dict["contents"] = contents; + if (channels.HasValue) + dict["channels"] = channels.Value; + if (ratio.HasValue) + dict["ratio"] = ratio.Value; + if (fancy_upscaling.HasValue) + dict["fancy_upscaling"] = fancy_upscaling.Value; + if (try_recover_truncated.HasValue) + dict["try_recover_truncated"] = try_recover_truncated.Value; + if (acceptable_fraction.HasValue) + dict["acceptable_fraction"] = acceptable_fraction.Value; + if (dct_method != null) + dict["dct_method"] = dct_method; + var op = _op_def_lib._apply_op_helper("DecodeJpeg", name: name, keywords: dict); + return op.output; + } + + /// + /// Decode a PNG-encoded image to a uint8 or uint16 tensor. + /// + /// + /// 0-D. The PNG-encoded image. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodePng'. + /// + /// + /// Number of color channels for the decoded image. + /// + /// + /// + /// + /// 3-D with shape [height, width, channels]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The attr channels indicates the desired number of color channels for the + /// decoded image. + /// + /// Accepted values are: + /// + /// * 0: Use the number of channels in the PNG-encoded image. + /// * 1: output a grayscale image. + /// * 3: output an RGB image. + /// * 4: output an RGBA image. + /// + /// If needed, the PNG-encoded image is transformed to match the requested number + /// of color channels. + /// + /// This op also supports decoding JPEGs and non-animated GIFs since the interface + /// is the same, though it is cleaner to use tf.image.decode_image. + /// + public static Tensor decode_png (Tensor contents, int? channels = null, TF_DataType? dtype = null, string name = "DecodePng") + { + var dict = new Dictionary(); + dict["contents"] = contents; + if (channels.HasValue) + dict["channels"] = channels.Value; + if (dtype.HasValue) + dict["dtype"] = dtype.Value; + var op = _op_def_lib._apply_op_helper("DecodePng", name: name, keywords: dict); + return op.output; + } + + /// + /// The op extracts fields from a serialized protocol buffers message into tensors. + /// + /// + /// Tensor of serialized protos with shape batch_shape. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeProtoV2'. + /// + /// + /// Optional argument + /// Name of the proto message type to decode. + /// + /// + /// Optional argument + /// List of strings containing proto field names. + /// + /// + /// Optional argument + /// List of TF types to use for the respective field in field_names. + /// + /// + /// Either the special value local:// or a path to a file containing + /// a serialized FileDescriptorSet. + /// + /// + /// Either binary or text. + /// + /// + /// Whether to sanitize the result or not. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sizes : Tensor of int32 with shape [batch_shape, len(field_names)]. + /// Each entry is the number of values found for the corresponding field. + /// Optional fields may have 0 or 1 values. + /// values : List of tensors containing values for the corresponding field. + /// values[i] has datatype output_types[i] + /// and shape [batch_shape, max(sizes[...,i])]. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The decode_proto op extracts fields from a serialized protocol buffers + /// message into tensors. The fields in field_names are decoded and converted + /// to the corresponding output_types if possible. + /// + /// A message_type name must be provided to give context for the field + /// names. The actual message descriptor can be looked up either in the + /// linked-in descriptor pool or a filename provided by the caller using + /// the descriptor_source attribute. + /// + /// Each output tensor is a dense tensor. This means that it is padded to + /// hold the largest number of repeated elements seen in the input + /// minibatch. (The shape is also padded by one to prevent zero-sized + /// dimensions). The actual repeat counts for each example in the + /// minibatch can be found in the sizes output. In many cases the output + /// of decode_proto is fed immediately into tf.squeeze if missing values + /// are not a concern. When using tf.squeeze, always pass the squeeze + /// dimension explicitly to avoid surprises. + /// + /// For the most part, the mapping between Proto field types and + /// TensorFlow dtypes is straightforward. However, there are a few + /// special cases: + /// + /// - A proto field that contains a submessage or group can only be converted + /// to DT_STRING (the serialized submessage). This is to reduce the + /// complexity of the API. The resulting string can be used as input + /// to another instance of the decode_proto op. + /// + /// - TensorFlow lacks support for unsigned integers. The ops represent uint64 + /// types as a DT_INT64 with the same twos-complement bit pattern + /// (the obvious way). Unsigned int32 values can be represented exactly by + /// specifying type DT_INT64, or using twos-complement if the caller + /// specifies DT_INT32 in the output_types attribute. + /// + /// The descriptor_source attribute selects a source of protocol + /// descriptors to consult when looking up message_type. This may be a + /// filename containing a serialized FileDescriptorSet message, + /// or the special value local://, in which case only descriptors linked + /// into the code will be searched; the filename can be on any filesystem + /// accessible to TensorFlow. + /// + /// You can build a descriptor_source file using the --descriptor_set_out + /// and --include_imports options to the protocol compiler protoc. + /// + /// The local:// database only covers descriptors linked into the + /// code via C++ libraries, not Python imports. You can link in a proto descriptor + /// by creating a cc_library target with alwayslink=1. + /// + /// Both binary and text proto serializations are supported, and can be + /// chosen using the format attribute. + /// + public static (Tensor sizes, Tensor[] values) decode_proto_v2 (Tensor bytes, string message_type, string[] field_names, TF_DataType[] output_types, string descriptor_source = null, string message_format = null, bool? sanitize = null, string name = "DecodeProtoV2") + { + var dict = new Dictionary(); + dict["bytes"] = bytes; + dict["message_type"] = message_type; + dict["field_names"] = field_names; + dict["output_types"] = output_types; + if (descriptor_source != null) + dict["descriptor_source"] = descriptor_source; + if (message_format != null) + dict["message_format"] = message_format; + if (sanitize.HasValue) + dict["sanitize"] = sanitize.Value; + var op = _op_def_lib._apply_op_helper("DecodeProtoV2", name: name, keywords: dict); + int _idx = 0; + var sizes = op.outputs[_idx++]; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (sizes, values); + } + + /// + /// Reinterpret the bytes of a string as a vector of numbers. + /// + /// + /// All the elements must have the same length. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeRaw'. + /// + /// + /// Optional argument + /// + /// + /// Whether the input bytes are in little-endian order. + /// Ignored for out_type values that are stored in a single byte like + /// uint8. + /// + /// + /// A Tensor with one more dimension than the input bytes. The + /// added dimension will have size equal to the length of the elements + /// of bytes divided by the number of bytes to represent out_type. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor decode_raw (Tensor bytes, TF_DataType out_type, bool? little_endian = null, string name = "DecodeRaw") + { + var dict = new Dictionary(); + dict["bytes"] = bytes; + dict["out_type"] = out_type; + if (little_endian.HasValue) + dict["little_endian"] = little_endian.Value; + var op = _op_def_lib._apply_op_helper("DecodeRaw", name: name, keywords: dict); + return op.output; + } + + /// + /// Decode a 16-bit PCM WAV file to a float tensor. + /// + /// + /// The WAV-encoded audio, usually from a file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DecodeWav'. + /// + /// + /// Number of sample channels wanted. + /// + /// + /// Length of audio requested. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// audio : 2-D with shape [length, channels]. + /// sample_rate : Scalar holding the sample rate found in the WAV header. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. + /// + /// When desired_channels is set, if the input contains fewer channels than this + /// then the last channel will be duplicated to give the requested number, else if + /// the input has more channels than requested then the additional channels will be + /// ignored. + /// + /// If desired_samples is set, then the audio will be cropped or padded with zeroes + /// to the requested length. + /// + /// The first output contains a Tensor with the content of the audio samples. The + /// lowest dimension will be the number of channels, and the second will be the + /// number of samples. For example, a ten-sample-long stereo WAV file should give an + /// output shape of [10, 2]. + /// + public static (Tensor audio, Tensor sample_rate) decode_wav (Tensor contents, int? desired_channels = null, int? desired_samples = null, string name = "DecodeWav") + { + var dict = new Dictionary(); + dict["contents"] = contents; + if (desired_channels.HasValue) + dict["desired_channels"] = desired_channels.Value; + if (desired_samples.HasValue) + dict["desired_samples"] = desired_samples.Value; + var op = _op_def_lib._apply_op_helper("DecodeWav", name: name, keywords: dict); + int _idx = 0; + var audio = op.outputs[_idx++]; + var sample_rate = op.outputs[_idx++]; + return (audio, sample_rate); + } + + /// + /// Makes a copy of x. + /// + /// + /// The source tensor of type T. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DeepCopy'. + /// + /// + /// y: A Tensor of type T. A copy of x. Guaranteed that y + /// is not an alias of x. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor deep_copy (Tensor x, string name = "DeepCopy") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("DeepCopy", name: name, keywords: dict); + return op.output; + } + + /// + /// Delete the tensor specified by its handle in the session. + /// + /// + /// The handle for a tensor stored in the session state. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DeleteSessionTensor'. + /// + /// + /// Returns the description of the operation + /// + public static Operation delete_session_tensor (Tensor handle, string name = "DeleteSessionTensor") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("DeleteSessionTensor", name: name, keywords: dict); + return op; + } + + /// + /// Applies set operation along last dimension of 2 Tensor inputs. + /// + /// + /// Tensor with rank n. 1st n-1 dimensions must be the same as set2. + /// Dimension n contains values in a set, duplicates are allowed but ignored. + /// + /// + /// Tensor with rank n. 1st n-1 dimensions must be the same as set1. + /// Dimension n contains values in a set, duplicates are allowed but ignored. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DenseToDenseSetOperation'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// result_indices : 2D indices of a SparseTensor. + /// result_values : 1D values of a SparseTensor. + /// result_shape : 1D Tensor shape of a SparseTensor. result_shape[0...n-1] is + /// the same as the 1st n-1 dimensions of set1 and set2, result_shape[n] + /// is the max result set size across all 0...n-1 dimensions. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// See SetOperationOp::SetOperationFromContext for values of set_operation. + /// + /// Output result is a SparseTensor represented by result_indices, + /// result_values, and result_shape. For set1 and set2 ranked n, this + /// has rank n and the same 1st n-1 dimensions as set1 and set2. The nth + /// dimension contains the result of set_operation applied to the corresponding + /// [0...n-1] dimension of set. + /// + public static (Tensor result_indices, Tensor result_values, Tensor result_shape) dense_to_dense_set_operation (Tensor set1, Tensor set2, string set_operation, bool? validate_indices = null, string name = "DenseToDenseSetOperation") + { + var dict = new Dictionary(); + dict["set1"] = set1; + dict["set2"] = set2; + dict["set_operation"] = set_operation; + if (validate_indices.HasValue) + dict["validate_indices"] = validate_indices.Value; + var op = _op_def_lib._apply_op_helper("DenseToDenseSetOperation", name: name, keywords: dict); + int _idx = 0; + var result_indices = op.outputs[_idx++]; + var result_values = op.outputs[_idx++]; + var result_shape = op.outputs[_idx++]; + return (result_indices, result_values, result_shape); + } + + /// + /// Creates a dataset that batches input elements into a SparseTensor. + /// + /// + /// A handle to an input dataset. Must have a single component. + /// + /// + /// A scalar representing the number of elements to accumulate in a + /// batch. + /// + /// + /// A vector representing the dense shape of each row in the produced + /// SparseTensor. The shape may be partially specified, using -1 to indicate + /// that a particular dimension should use the maximum size of all batch elements. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DenseToSparseBatchDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor dense_to_sparse_batch_dataset (Tensor input_dataset, Tensor batch_size, Tensor row_shape, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "DenseToSparseBatchDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["batch_size"] = batch_size; + dict["row_shape"] = row_shape; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("DenseToSparseBatchDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies set operation along last dimension of Tensor and SparseTensor. + /// + /// + /// Tensor with rank n. 1st n-1 dimensions must be the same as set2. + /// Dimension n contains values in a set, duplicates are allowed but ignored. + /// + /// + /// 2D Tensor, indices of a SparseTensor. Must be in row-major + /// order. + /// + /// + /// 1D Tensor, values of a SparseTensor. Must be in row-major + /// order. + /// + /// + /// 1D Tensor, shape of a SparseTensor. set2_shape[0...n-1] must + /// be the same as the 1st n-1 dimensions of set1, result_shape[n] is the + /// max set size across n-1 dimensions. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DenseToSparseSetOperation'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// result_indices : 2D indices of a SparseTensor. + /// result_values : 1D values of a SparseTensor. + /// result_shape : 1D Tensor shape of a SparseTensor. result_shape[0...n-1] is + /// the same as the 1st n-1 dimensions of set1 and set2, result_shape[n] + /// is the max result set size across all 0...n-1 dimensions. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// See SetOperationOp::SetOperationFromContext for values of set_operation. + /// + /// Input set2 is a SparseTensor represented by set2_indices, set2_values, + /// and set2_shape. For set2 ranked n, 1st n-1 dimensions must be the same + /// as set1. Dimension n contains values in a set, duplicates are allowed but + /// ignored. + /// + /// If validate_indices is True, this op validates the order and range of set2 + /// indices. + /// + /// Output result is a SparseTensor represented by result_indices, + /// result_values, and result_shape. For set1 and set2 ranked n, this + /// has rank n and the same 1st n-1 dimensions as set1 and set2. The nth + /// dimension contains the result of set_operation applied to the corresponding + /// [0...n-1] dimension of set. + /// + public static (Tensor result_indices, Tensor result_values, Tensor result_shape) dense_to_sparse_set_operation (Tensor set1, Tensor set2_indices, Tensor set2_values, Tensor set2_shape, string set_operation, bool? validate_indices = null, string name = "DenseToSparseSetOperation") + { + var dict = new Dictionary(); + dict["set1"] = set1; + dict["set2_indices"] = set2_indices; + dict["set2_values"] = set2_values; + dict["set2_shape"] = set2_shape; + dict["set_operation"] = set_operation; + if (validate_indices.HasValue) + dict["validate_indices"] = validate_indices.Value; + var op = _op_def_lib._apply_op_helper("DenseToSparseSetOperation", name: name, keywords: dict); + int _idx = 0; + var result_indices = op.outputs[_idx++]; + var result_values = op.outputs[_idx++]; + var result_shape = op.outputs[_idx++]; + return (result_indices, result_values, result_shape); + } + + /// + /// DepthToSpace for tensors of type T. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DepthToSpace'. + /// + /// + /// Optional argument + /// The size of the spatial block, same as in Space2Depth. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Rearranges data from depth into blocks of spatial data. + /// This is the reverse transformation of SpaceToDepth. More specifically, + /// this op outputs a copy of the input tensor where values from the depth + /// dimension are moved in spatial blocks to the height and width dimensions. + /// The attr block_size indicates the input block size and how the data is moved. + /// + /// * Chunks of data of size block_size * block_size from depth are rearranged + /// into non-overlapping blocks of size block_size x block_size + /// * The width the output tensor is input_depth * block_size, whereas the + /// height is input_height * block_size. + /// * The Y, X coordinates within each block of the output image are determined + /// by the high order component of the input channel index. + /// * The depth of the input tensor must be divisible by + /// block_size * block_size. + /// + /// The data_format attr specifies the layout of the input and output tensors + /// with the following options: + /// "NHWC": [ batch, height, width, channels ] + /// "NCHW": [ batch, channels, height, width ] + /// "NCHW_VECT_C": + /// qint8 [ batch, channels / 4, height, width, 4 ] + /// + /// It is useful to consider the operation as transforming a 6-D Tensor. + /// e.g. for data_format = NHWC, + /// Each element in the input tensor can be specified via 6 coordinates, + /// ordered by decreasing memory layout significance as: + /// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + /// within the input image, bX, bY means coordinates + /// within the output block, oC means output channels). + /// The output would be the input transposed to the following layout: + /// n,iY,bY,iX,bX,oC + /// + /// This operation is useful for resizing the activations between convolutions + /// (but keeping all data), e.g. instead of pooling. It is also useful for training + /// purely convolutional models. + /// + /// For example, given an input of shape [1, 1, 1, 4], data_format = "NHWC" and + /// block_size = 2: + /// + /// + /// x = [[[[1, 2, 3, 4]]]] + /// + /// + /// + /// This operation will output a tensor of shape [1, 2, 2, 1]: + /// + /// + /// [[[[1], [2]], + /// [[3], [4]]]] + /// + /// + /// Here, the input has a batch of 1 and each batch element has shape [1, 1, 4], + /// the corresponding output will have 2x2 elements and will have a depth of + /// 1 channel (1 = 4 / (block_size * block_size)). + /// The output element shape is [2, 2, 1]. + /// + /// For an input tensor with larger depth, here of shape [1, 1, 1, 12], e.g. + /// + /// + /// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + /// + /// + /// This operation, for block size of 2, will return the following tensor of shape + /// [1, 2, 2, 3] + /// + /// + /// [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// + /// + /// Similarly, for the following input of shape [1 2 2 4], and a block size of 2: + /// + /// + /// x = [[[[1, 2, 3, 4], + /// [5, 6, 7, 8]], + /// [[9, 10, 11, 12], + /// [13, 14, 15, 16]]]] + /// + /// + /// the operator will return the following tensor of shape [1 4 4 1]: + /// + /// + /// x = [[[ [1], [2], [5], [6]], + /// [ [3], [4], [7], [8]], + /// [ [9], [10], [13], [14]], + /// [ [11], [12], [15], [16]]]] + /// + /// + /// + public static Tensor depth_to_space (Tensor input, int block_size, string data_format = null, string name = "DepthToSpace") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["block_size"] = block_size; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("DepthToSpace", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes a 2-D depthwise convolution given 4-D input and filter tensors. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DepthwiseConv2dNative'. + /// + /// + /// Optional argument + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of input. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// data_format, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given an input tensor of shape [batch, in_height, in_width, in_channels] + /// and a filter / kernel tensor of shape + /// [filter_height, filter_width, in_channels, channel_multiplier], containing + /// in_channels convolutional filters of depth 1, depthwise_conv2d applies + /// a different filter to each input channel (expanding from 1 channel to + /// channel_multiplier channels for each), then concatenates the results + /// together. Thus, the output has in_channels * channel_multiplier channels. + /// + /// + /// for k in 0..in_channels-1 + /// for q in 0..channel_multiplier-1 + /// output[b, i, j, k * channel_multiplier + q] = + /// sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * + /// filter[di, dj, k, q] + /// + /// + /// Must have strides[0] = strides[3] = 1. For the most common case of the same + /// horizontal and vertices strides, strides = [1, stride, stride, 1]. + /// + public static Tensor depthwise_conv2d_native (Tensor input, Tensor filter, int[] strides, string padding, string data_format = null, int[] dilations = null, string name = "DepthwiseConv2dNative") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("DepthwiseConv2dNative", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradients of depthwise convolution with respect to the filter. + /// + /// + /// 4-D with shape based on data_format. For example, if + /// data_format is 'NHWC' then input is a 4-D [batch, in_height, + /// in_width, in_channels] tensor. + /// + /// + /// An integer vector representing the tensor shape of filter, + /// where filter is a 4-D + /// [filter_height, filter_width, in_channels, depthwise_multiplier] tensor. + /// + /// + /// 4-D with shape based on data_format. + /// For example, if data_format is 'NHWC' then + /// out_backprop shape is [batch, out_height, out_width, out_channels]. + /// Gradients w.r.t. the output of the convolution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DepthwiseConv2dNativeBackpropFilter'. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input + /// of the convolution. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// data_format, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// 4-D with shape + /// [filter_height, filter_width, in_channels, out_channels]. Gradient w.r.t. + /// the filter input of the convolution. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor depthwise_conv2d_native_backprop_filter (Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, string data_format = null, int[] dilations = null, string name = "DepthwiseConv2dNativeBackpropFilter") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter_sizes"] = filter_sizes; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("DepthwiseConv2dNativeBackpropFilter", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradients of depthwise convolution with respect to the input. + /// + /// + /// An integer vector representing the shape of input, based + /// on data_format. For example, if data_format is 'NHWC' then + /// input is a 4-D [batch, height, width, channels] tensor. + /// + /// + /// 4-D with shape + /// [filter_height, filter_width, in_channels, depthwise_multiplier]. + /// + /// + /// 4-D with shape based on data_format. + /// For example, if data_format is 'NHWC' then + /// out_backprop shape is [batch, out_height, out_width, out_channels]. + /// Gradients w.r.t. the output of the convolution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DepthwiseConv2dNativeBackpropInput'. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input + /// of the convolution. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// data_format, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// 4-D with shape according to data_format. For example, if + /// data_format is 'NHWC', output shape is [batch, in_height, + /// in_width, in_channels]. Gradient w.r.t. the input of the + /// convolution. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor depthwise_conv2d_native_backprop_input (Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, string data_format = null, int[] dilations = null, string name = "DepthwiseConv2dNativeBackpropInput") + { + var dict = new Dictionary(); + dict["input_sizes"] = input_sizes; + dict["filter"] = filter; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("DepthwiseConv2dNativeBackpropInput", name: name, keywords: dict); + return op.output; + } + + /// + /// Dequantize the 'input' tensor into a float Tensor. + /// + /// + /// + /// + /// The minimum scalar value possibly produced for the input. + /// + /// + /// The maximum scalar value possibly produced for the input. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Dequantize'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// [min_range, max_range] are scalar floats that specify the range for + /// the 'input' data. The 'mode' attribute controls exactly which calculations are + /// used to convert the float values to their quantized equivalents. + /// + /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + /// + /// + /// if T == qint8, in[i] += (range(T) + 1)/ 2.0 + /// out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + /// + /// here range(T) = numeric_limits<T>::max() - numeric_limits<T>::min() + /// + /// *MIN_COMBINED Mode Example* + /// + /// If the input comes from a QuantizedRelu6, the output type is + /// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + /// 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + /// Dequantize on quint8 will take each value, cast to float, and multiply + /// by 6 / 255. + /// Note that if quantizedtype is qint8, the operation will additionally add + /// each value by 128 prior to casting. + /// + /// If the mode is 'MIN_FIRST', then this approach is used: + /// + /// + /// num_discrete_values = 1 << (# of bits in T) + /// range_adjust = num_discrete_values / (num_discrete_values - 1) + /// range = (range_max - range_min) * range_adjust + /// range_scale = range / num_discrete_values + /// const double offset_input = static_cast<double>(input) - lowest_quantized; + /// result = range_min + ((input - numeric_limits<T>::min()) * range_scale) + /// + /// + /// *SCALED mode Example* + /// + /// SCALED mode matches the quantization approach used in + /// QuantizeAndDequantize{V2|V3}. + /// + /// If the mode is SCALED, we do not use the full range of the output type, + /// choosing to elide the lowest possible value for symmetry (e.g., output range is + /// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to + /// 0. + /// + /// We first find the range of values in our tensor. The + /// range we use is always centered on 0, so we find m such that + /// + /// m = max(abs(input_min), abs(input_max)) + /// + /// + /// Our input tensor range is then [-m, m]. + /// + /// Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. + /// If T is signed, this is + /// + /// num_bits = sizeof(T) * 8 + /// [min_fixed, max_fixed] = + /// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1] + /// + /// + /// Otherwise, if T is unsigned, the fixed-point range is + /// + /// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1] + /// + /// + /// From this we compute our scaling factor, s: + /// + /// s = (2 * m) / (max_fixed - min_fixed) + /// + /// + /// Now we can dequantize the elements of our tensor: + /// + /// result = input * s + /// + /// + public static Tensor dequantize (Tensor input, Tensor min_range, Tensor max_range, string mode = null, string name = "Dequantize") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["min_range"] = min_range; + dict["max_range"] = max_range; + if (mode != null) + dict["mode"] = mode; + var op = _op_def_lib._apply_op_helper("Dequantize", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts the given variant tensor to an iterator and stores it in the given resource. + /// + /// + /// A handle to an iterator resource. + /// + /// + /// A variant tensor storing the state of the iterator contained in the + /// resource. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DeserializeIterator'. + /// + /// + /// Returns the description of the operation + /// + public static Operation deserialize_iterator (Tensor resource_handle, Tensor serialized, string name = "DeserializeIterator") + { + var dict = new Dictionary(); + dict["resource_handle"] = resource_handle; + dict["serialized"] = serialized; + var op = _op_def_lib._apply_op_helper("DeserializeIterator", name: name, keywords: dict); + return op; + } + + /// + /// Deserialize and concatenate SparseTensors from a serialized minibatch. + /// + /// + /// 2-D, The N serialized SparseTensor objects. + /// Must have 3 columns. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DeserializeManySparse'. + /// + /// + /// Optional argument + /// The dtype of the serialized SparseTensor objects. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sparse_indices : + /// sparse_values : + /// sparse_shape : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The input serialized_sparse must be a string matrix of shape [N x 3] where + /// N is the minibatch size and the rows correspond to packed outputs of + /// SerializeSparse. The ranks of the original SparseTensor objects + /// must all match. When the final SparseTensor is created, it has rank one + /// higher than the ranks of the incoming SparseTensor objects + /// (they have been concatenated along a new row dimension). + /// + /// The output SparseTensor object's shape values for all dimensions but the + /// first are the max across the input SparseTensor objects' shape values + /// for the corresponding dimensions. Its first shape value is N, the minibatch + /// size. + /// + /// The input SparseTensor objects' indices are assumed ordered in + /// standard lexicographic order. If this is not the case, after this + /// step run SparseReorder to restore index ordering. + /// + /// For example, if the serialized input is a [2 x 3] matrix representing two + /// original SparseTensor objects: + /// + /// index = [ 0] + /// [10] + /// [20] + /// values = [1, 2, 3] + /// shape = [50] + /// + /// and + /// + /// index = [ 2] + /// [10] + /// values = [4, 5] + /// shape = [30] + /// + /// then the final deserialized SparseTensor will be: + /// + /// index = [0 0] + /// [0 10] + /// [0 20] + /// [1 2] + /// [1 10] + /// values = [1, 2, 3, 4, 5] + /// shape = [2 50] + /// + public static (Tensor sparse_indices, Tensor sparse_values, Tensor sparse_shape) deserialize_many_sparse (Tensor serialized_sparse, TF_DataType dtype, string name = "DeserializeManySparse") + { + var dict = new Dictionary(); + dict["serialized_sparse"] = serialized_sparse; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("DeserializeManySparse", name: name, keywords: dict); + int _idx = 0; + var sparse_indices = op.outputs[_idx++]; + var sparse_values = op.outputs[_idx++]; + var sparse_shape = op.outputs[_idx++]; + return (sparse_indices, sparse_values, sparse_shape); + } + + /// + /// Deserialize SparseTensor objects. + /// + /// + /// The serialized SparseTensor objects. The last dimension + /// must have 3 columns. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DeserializeSparse'. + /// + /// + /// Optional argument + /// The dtype of the serialized SparseTensor objects. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sparse_indices : + /// sparse_values : + /// sparse_shape : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The input serialized_sparse must have the shape [?, ?, ..., ?, 3] where + /// the last dimension stores serialized SparseTensor objects and the other N + /// dimensions (N >= 0) correspond to a batch. The ranks of the original + /// SparseTensor objects must all match. When the final SparseTensor is + /// created, its rank is the rank of the incoming SparseTensor objects plus N; + /// the sparse tensors have been concatenated along new dimensions, one for each + /// batch. + /// + /// The output SparseTensor object's shape values for the original dimensions + /// are the max across the input SparseTensor objects' shape values for the + /// corresponding dimensions. The new dimensions match the size of the batch. + /// + /// The input SparseTensor objects' indices are assumed ordered in + /// standard lexicographic order. If this is not the case, after this + /// step run SparseReorder to restore index ordering. + /// + /// For example, if the serialized input is a [2 x 3] matrix representing two + /// original SparseTensor objects: + /// + /// index = [ 0] + /// [10] + /// [20] + /// values = [1, 2, 3] + /// shape = [50] + /// + /// and + /// + /// index = [ 2] + /// [10] + /// values = [4, 5] + /// shape = [30] + /// + /// then the final deserialized SparseTensor will be: + /// + /// index = [0 0] + /// [0 10] + /// [0 20] + /// [1 2] + /// [1 10] + /// values = [1, 2, 3, 4, 5] + /// shape = [2 50] + /// + public static (Tensor sparse_indices, Tensor sparse_values, Tensor sparse_shape) deserialize_sparse (Tensor serialized_sparse, TF_DataType dtype, string name = "DeserializeSparse") + { + var dict = new Dictionary(); + dict["serialized_sparse"] = serialized_sparse; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("DeserializeSparse", name: name, keywords: dict); + int _idx = 0; + var sparse_indices = op.outputs[_idx++]; + var sparse_values = op.outputs[_idx++]; + var sparse_shape = op.outputs[_idx++]; + return (sparse_indices, sparse_values, sparse_shape); + } + + /// + /// Deletes the resource specified by the handle. + /// + /// + /// handle to the resource to delete. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DestroyResourceOp'. + /// + /// + /// whether to ignore the error when the resource + /// doesn't exist. + /// + /// + /// Returns the description of the operation + /// + /// + /// All subsequent operations using the resource will result in a NotFound + /// error status. + /// + public static Operation destroy_resource_op (Tensor resource, bool? ignore_lookup_error = null, string name = "DestroyResourceOp") + { + var dict = new Dictionary(); + dict["resource"] = resource; + if (ignore_lookup_error.HasValue) + dict["ignore_lookup_error"] = ignore_lookup_error.Value; + var op = _op_def_lib._apply_op_helper("DestroyResourceOp", name: name, keywords: dict); + return op; + } + + /// + /// Destroys the temporary variable and returns its final value. + /// + /// + /// A reference to the temporary variable tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DestroyTemporaryVariable'. + /// + /// + /// Optional argument + /// Name of the temporary variable, usually the name of the matching + /// 'TemporaryVariable' op. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Sets output to the value of the Tensor pointed to by 'ref', then destroys + /// the temporary variable called 'var_name'. + /// All other uses of 'ref' *must* have executed before this op. + /// This is typically achieved by chaining the ref through each assign op, or by + /// using control dependencies. + /// + /// Outputs the final value of the tensor pointed to by 'ref'. + /// + public static Tensor destroy_temporary_variable (Tensor referecne, string var_name, string name = "DestroyTemporaryVariable") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["var_name"] = var_name; + var op = _op_def_lib._apply_op_helper("DestroyTemporaryVariable", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a diagonal tensor with a given diagonal values. + /// + /// + /// Rank k tensor where k is at most 1. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Diag'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a diagonal, this operation returns a tensor with the diagonal and + /// everything else padded with zeros. The diagonal is computed as follows: + /// + /// Assume diagonal has dimensions [D1,..., Dk], then the output is a tensor of + /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: + /// + /// output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik] and 0 everywhere else. + /// + /// For example: + /// + /// + /// # 'diagonal' is [1, 2, 3, 4] + /// tf.diag(diagonal) ==> [[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]] + /// + /// + public static Tensor diag (Tensor diagonal, string name = "Diag") + { + var dict = new Dictionary(); + dict["diagonal"] = diagonal; + var op = _op_def_lib._apply_op_helper("Diag", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the diagonal part of the tensor. + /// + /// + /// Rank k tensor where k is even and not zero. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DiagPart'. + /// + /// + /// The extracted diagonal. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns a tensor with the diagonal part + /// of the input. The diagonal part is computed as follows: + /// + /// Assume input has dimensions [D1,..., Dk, D1,..., Dk], then the output is a + /// tensor of rank k with dimensions [D1,..., Dk] where: + /// + /// diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]. + /// + /// For example: + /// + /// + /// # 'input' is [[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]] + /// + /// tf.diag_part(input) ==> [1, 2, 3, 4] + /// + /// + public static Tensor diag_part (Tensor input, string name = "DiagPart") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("DiagPart", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes Psi, the derivative of Lgamma (the log of the absolute value of + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Digamma'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Gamma(x)), element-wise. + /// + public static Tensor digamma (Tensor x, string name = "Digamma") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Digamma", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the grayscale dilation of 4-D input and 3-D filter tensors. + /// + /// + /// 4-D with shape [batch, in_height, in_width, depth]. + /// + /// + /// 3-D with shape [filter_height, filter_width, depth]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Dilation2D'. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input + /// tensor. Must be: [1, stride_height, stride_width, 1]. + /// + /// + /// Optional argument + /// The input stride for atrous morphological dilation. Must be: + /// [1, rate_height, rate_width, 1]. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// 4-D with shape [batch, out_height, out_width, depth]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The input tensor has shape [batch, in_height, in_width, depth] and the + /// filter tensor has shape [filter_height, filter_width, depth], i.e., each + /// input channel is processed independently of the others with its own structuring + /// function. The output tensor has shape + /// [batch, out_height, out_width, depth]. The spatial dimensions of the output + /// tensor depend on the padding algorithm. We currently only support the default + /// "NHWC" data_format. + /// + /// In detail, the grayscale morphological 2-D dilation is the max-sum correlation + /// (for consistency with conv2d, we use unmirrored filters): + /// + /// output[b, y, x, c] = + /// max_{dy, dx} input[b, + /// strides[1] * y + rates[1] * dy, + /// strides[2] * x + rates[2] * dx, + /// c] + + /// filter[dy, dx, c] + /// + /// Max-pooling is a special case when the filter has size equal to the pooling + /// kernel size and contains all zeros. + /// + /// Note on duality: The dilation of input by the filter is equal to the + /// negation of the erosion of -input by the reflected filter. + /// + public static Tensor dilation2d (Tensor input, Tensor filter, int[] strides, int[] rates, string padding, string name = "Dilation2D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["strides"] = strides; + dict["rates"] = rates; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("Dilation2D", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of morphological 2-D dilation with respect to the filter. + /// + /// + /// 4-D with shape [batch, in_height, in_width, depth]. + /// + /// + /// 3-D with shape [filter_height, filter_width, depth]. + /// + /// + /// 4-D with shape [batch, out_height, out_width, depth]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Dilation2DBackpropFilter'. + /// + /// + /// Optional argument + /// 1-D of length 4. The stride of the sliding window for each dimension of + /// the input tensor. Must be: [1, stride_height, stride_width, 1]. + /// + /// + /// Optional argument + /// 1-D of length 4. The input stride for atrous morphological dilation. + /// Must be: [1, rate_height, rate_width, 1]. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// 3-D with shape [filter_height, filter_width, depth]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor dilation2d_backprop_filter (Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string name = "Dilation2DBackpropFilter") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["rates"] = rates; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("Dilation2DBackpropFilter", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of morphological 2-D dilation with respect to the input. + /// + /// + /// 4-D with shape [batch, in_height, in_width, depth]. + /// + /// + /// 3-D with shape [filter_height, filter_width, depth]. + /// + /// + /// 4-D with shape [batch, out_height, out_width, depth]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Dilation2DBackpropInput'. + /// + /// + /// Optional argument + /// 1-D of length 4. The stride of the sliding window for each dimension of + /// the input tensor. Must be: [1, stride_height, stride_width, 1]. + /// + /// + /// Optional argument + /// 1-D of length 4. The input stride for atrous morphological dilation. + /// Must be: [1, rate_height, rate_width, 1]. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// 4-D with shape [batch, in_height, in_width, depth]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor dilation2d_backprop_input (Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string name = "Dilation2DBackpropInput") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["out_backprop"] = out_backprop; + dict["strides"] = strides; + dict["rates"] = rates; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("Dilation2DBackpropInput", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns x / y element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Div'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Div supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor div (Tensor x, Tensor y, string name = "Div") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Div", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns 0 if the denominator is zero. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DivNoNan'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// + /// *NOTE*: DivNoNan supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor div_no_nan (Tensor x, Tensor y, string name = "DivNoNan") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("DivNoNan", name: name, keywords: dict); + return op.output; + } + + /// + /// Draw bounding boxes on a batch of images. + /// + /// + /// 4-D with shape [batch, height, width, depth]. A batch of images. + /// + /// + /// 3-D with shape [batch, num_bounding_boxes, 4] containing bounding + /// boxes. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DrawBoundingBoxes'. + /// + /// + /// 4-D with the same shape as images. The batch of input images with + /// bounding boxes drawn on the images. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Outputs a copy of images but draws on top of the pixels zero or more bounding + /// boxes specified by the locations in boxes. The coordinates of the each + /// bounding box in boxes are encoded as [y_min, x_min, y_max, x_max]. The + /// bounding box coordinates are floats in [0.0, 1.0] relative to the width and + /// height of the underlying image. + /// + /// For example, if an image is 100 x 200 pixels (height x width) and the bounding + /// box is [0.1, 0.2, 0.5, 0.9], the upper-left and bottom-right coordinates of + /// the bounding box will be (40, 10) to (180, 50) (in (x,y) coordinates). + /// + /// Parts of the bounding box may fall outside the image. + /// + public static Tensor draw_bounding_boxes (Tensor images, Tensor boxes, string name = "DrawBoundingBoxes") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["boxes"] = boxes; + var op = _op_def_lib._apply_op_helper("DrawBoundingBoxes", name: name, keywords: dict); + return op.output; + } + + /// + /// Partitions data into num_partitions tensors using indices from partitions. + /// + /// + /// + /// + /// Any shape. Indices in the range [0, num_partitions). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DynamicPartition'. + /// + /// + /// Optional argument + /// The number of partitions to output. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For each index tuple js of size partitions.ndim, the slice data[js, ...] + /// becomes part of outputs[partitions[js]]. The slices with partitions[js] = i + /// are placed in outputs[i] in lexicographic order of js, and the first + /// dimension of outputs[i] is the number of entries in partitions equal to i. + /// In detail, + /// + /// + /// outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] + /// + /// outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) + /// + /// + /// data.shape must start with partitions.shape. + /// + /// For example: + /// + /// + /// # Scalar partitions. + /// partitions = 1 + /// num_partitions = 2 + /// data = [10, 20] + /// outputs[0] = [] # Empty with shape [0, 2] + /// outputs[1] = [[10, 20]] + /// + /// # Vector partitions. + /// partitions = [0, 0, 1, 1, 0] + /// num_partitions = 2 + /// data = [10, 20, 30, 40, 50] + /// outputs[0] = [10, 20, 50] + /// outputs[1] = [30, 40] + /// + /// + /// See dynamic_stitch for an example on how to merge partitions back. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt> + /// </div> + /// + public static Tensor[] dynamic_partition (Tensor data, Tensor partitions, int num_partitions, string name = "DynamicPartition") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["partitions"] = partitions; + dict["num_partitions"] = num_partitions; + var op = _op_def_lib._apply_op_helper("DynamicPartition", name: name, keywords: dict); + int _idx = 0; + var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray(); + return (outputs); + } + + /// + /// Interleave the values from the data tensors into a single tensor. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DynamicStitch'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Builds a merged tensor such that + /// + /// + /// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + /// + /// + /// For example, if each indices[m] is scalar or vector, we have + /// + /// + /// # Scalar indices: + /// merged[indices[m], ...] = data[m][...] + /// + /// # Vector indices: + /// merged[indices[m][i], ...] = data[m][i, ...] + /// + /// + /// Each data[i].shape must start with the corresponding indices[i].shape, + /// and the rest of data[i].shape must be constant w.r.t. i. That is, we + /// must have data[i].shape = indices[i].shape + constant. In terms of this + /// constant, the output shape is + /// + /// merged.shape = [max(indices)] + constant + /// + /// Values are merged in order, so if an index appears in both indices[m][i] and + /// indices[n][j] for (m,i) < (n,j) the slice data[n][j] will appear in the + /// merged result. If you do not need this guarantee, ParallelDynamicStitch might + /// perform better on some devices. + /// + /// For example: + /// + /// + /// indices[0] = 6 + /// indices[1] = [4, 1] + /// indices[2] = [[5, 2], [0, 3]] + /// data[0] = [61, 62] + /// data[1] = [[41, 42], [11, 12]] + /// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + /// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + /// [51, 52], [61, 62]] + /// + /// + /// This method can be used to merge partitions created by dynamic_partition + /// as illustrated on the following example: + /// + /// + /// # Apply function (increments x_i) on elements for which a certain condition + /// # apply (x_i != -1 in this example). + /// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + /// condition_mask=tf.not_equal(x,tf.constant(-1.)) + /// partitioned_data = tf.dynamic_partition( + /// x, tf.cast(condition_mask, tf.int32) , 2) + /// partitioned_data[1] = partitioned_data[1] + 1.0 + /// condition_indices = tf.dynamic_partition( + /// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + /// x = tf.dynamic_stitch(condition_indices, partitioned_data) + /// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + /// # unchanged. + /// + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt> + /// </div> + /// + public static Tensor dynamic_stitch (Tensor[] indices, Tensor[] data, string name = "DynamicStitch") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["data"] = data; + var op = _op_def_lib._apply_op_helper("DynamicStitch", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the (possibly normalized) Levenshtein Edit Distance. + /// + /// + /// The indices of the hypothesis list SparseTensor. + /// This is an N x R int64 matrix. + /// + /// + /// The values of the hypothesis list SparseTensor. + /// This is an N-length vector. + /// + /// + /// The shape of the hypothesis list SparseTensor. + /// This is an R-length vector. + /// + /// + /// The indices of the truth list SparseTensor. + /// This is an M x R int64 matrix. + /// + /// + /// The values of the truth list SparseTensor. + /// This is an M-length vector. + /// + /// + /// truth indices, vector. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EditDistance'. + /// + /// + /// boolean (if true, edit distances are normalized by length of truth). + /// + /// The output is: + /// + /// + /// A dense float tensor with rank R - 1. + /// + /// For the example input: + /// + /// // hypothesis represents a 2x1 matrix with variable-length values: + /// // (0,0) = ["a"] + /// // (1,0) = ["b"] + /// hypothesis_indices = [[0, 0, 0], + /// [1, 0, 0]] + /// hypothesis_values = ["a", "b"] + /// hypothesis_shape = [2, 1, 1] + /// + /// // truth represents a 2x2 matrix with variable-length values: + /// // (0,0) = [] + /// // (0,1) = ["a"] + /// // (1,0) = ["b", "c"] + /// // (1,1) = ["a"] + /// truth_indices = [[0, 1, 0], + /// [1, 0, 0], + /// [1, 0, 1], + /// [1, 1, 0]] + /// truth_values = ["a", "b", "c", "a"] + /// truth_shape = [2, 2, 2] + /// normalize = true + /// + /// The output will be: + /// + /// // output is a 2x2 matrix with edit distances normalized by truth lengths. + /// output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis + /// [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The inputs are variable-length sequences provided by SparseTensors + /// (hypothesis_indices, hypothesis_values, hypothesis_shape) + /// and + /// (truth_indices, truth_values, truth_shape). + /// + /// The inputs are: + /// + public static Tensor edit_distance (Tensor hypothesis_indices, Tensor hypothesis_values, Tensor hypothesis_shape, Tensor truth_indices, Tensor truth_values, Tensor truth_shape, bool? normalize = null, string name = "EditDistance") + { + var dict = new Dictionary(); + dict["hypothesis_indices"] = hypothesis_indices; + dict["hypothesis_values"] = hypothesis_values; + dict["hypothesis_shape"] = hypothesis_shape; + dict["truth_indices"] = truth_indices; + dict["truth_values"] = truth_values; + dict["truth_shape"] = truth_shape; + if (normalize.HasValue) + dict["normalize"] = normalize.Value; + var op = _op_def_lib._apply_op_helper("EditDistance", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes exponential linear: exp(features) - 1 if < 0, features otherwise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + /// ](http://arxiv.org/abs/1511.07289) + /// + public static Tensor elu (Tensor features, string name = "Elu") + { + var dict = new Dictionary(); + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("Elu", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients for the exponential linear (Elu) operation. + /// + /// + /// The backpropagated gradients to the corresponding Elu operation. + /// + /// + /// The outputs of the corresponding Elu operation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EluGrad'. + /// + /// + /// The gradients: gradients * (outputs + 1) if outputs < 0, + /// gradients otherwise. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor elu_grad (Tensor gradients, Tensor outputs, string name = "EluGrad") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["outputs"] = outputs; + var op = _op_def_lib._apply_op_helper("EluGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a tensor with the given shape. + /// + /// This operation creates a tensor of shape and dtype. + /// + /// + /// 1-D. Represents the shape of the output tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Empty'. + /// + /// + /// Optional argument + /// + /// + /// If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. + /// + /// + /// A Tensor of type T. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor empty (Tensor shape, TF_DataType dtype, bool? init = null, string name = "Empty") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["dtype"] = dtype; + if (init.HasValue) + dict["init"] = init.Value; + var op = _op_def_lib._apply_op_helper("Empty", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates and returns an empty tensor list. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EmptyTensorList'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// All list elements must be tensors of dtype element_dtype and shape compatible + /// with element_shape. + /// + /// handle: an empty tensor list. + /// element_dtype: the type of elements in the list. + /// element_shape: a shape compatible with that of elements in the list. + /// + public static Tensor empty_tensor_list (Tensor element_shape, TF_DataType element_dtype, string name = "EmptyTensorList") + { + var dict = new Dictionary(); + dict["element_shape"] = element_shape; + dict["element_dtype"] = element_dtype; + var op = _op_def_lib._apply_op_helper("EmptyTensorList", name: name, keywords: dict); + return op.output; + } + + /// + /// Encode strings into web-safe base64 format. + /// + /// + /// Strings to be encoded. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EncodeBase64'. + /// + /// + /// Bool whether padding is applied at the ends. + /// + /// + /// Input strings encoded in base64. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Refer to the following article for more information on base64 format: + /// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the + /// end so that the encoded has length multiple of 4. See Padding section of the + /// link above. + /// + /// Web-safe means that the encoder uses - and _ instead of + and /. + /// + public static Tensor encode_base64 (Tensor input, bool? pad = null, string name = "EncodeBase64") + { + var dict = new Dictionary(); + dict["input"] = input; + if (pad.HasValue) + dict["pad"] = pad.Value; + var op = _op_def_lib._apply_op_helper("EncodeBase64", name: name, keywords: dict); + return op.output; + } + + /// + /// JPEG-encode an image. + /// + /// + /// 3-D with shape [height, width, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EncodeJpeg'. + /// + /// + /// Per pixel image format. + /// + /// + /// Quality of the compression from 0 to 100 (higher is better and slower). + /// + /// + /// If True, create a JPEG that loads progressively (coarse to fine). + /// + /// + /// If True, spend CPU/RAM to reduce size with no quality change. + /// + /// + /// See http://en.wikipedia.org/wiki/Chroma_subsampling. + /// + /// + /// Unit used to specify x_density and y_density: + /// pixels per inch ('in') or centimeter ('cm'). + /// + /// + /// Horizontal pixels per density unit. + /// + /// + /// Vertical pixels per density unit. + /// + /// + /// If not empty, embed this XMP metadata in the image header. + /// + /// + /// 0-D. JPEG-encoded image. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// image is a 3-D uint8 Tensor of shape [height, width, channels]. + /// + /// The attr format can be used to override the color format of the encoded + /// output. Values can be: + /// + /// * '': Use a default format based on the number of channels in the image. + /// * grayscale: Output a grayscale JPEG image. The channels dimension + /// of image must be 1. + /// * rgb: Output an RGB JPEG image. The channels dimension + /// of image must be 3. + /// + /// If format is not specified or is the empty string, a default format is picked + /// in function of the number of channels in image: + /// + /// * 1: Output a grayscale image. + /// * 3: Output an RGB image. + /// + public static Tensor encode_jpeg (Tensor image, string format = null, int? quality = null, bool? progressive = null, bool? optimize_size = null, bool? chroma_downsampling = null, string density_unit = null, int? x_density = null, int? y_density = null, string xmp_metadata = null, string name = "EncodeJpeg") + { + var dict = new Dictionary(); + dict["image"] = image; + if (format != null) + dict["format"] = format; + if (quality.HasValue) + dict["quality"] = quality.Value; + if (progressive.HasValue) + dict["progressive"] = progressive.Value; + if (optimize_size.HasValue) + dict["optimize_size"] = optimize_size.Value; + if (chroma_downsampling.HasValue) + dict["chroma_downsampling"] = chroma_downsampling.Value; + if (density_unit != null) + dict["density_unit"] = density_unit; + if (x_density.HasValue) + dict["x_density"] = x_density.Value; + if (y_density.HasValue) + dict["y_density"] = y_density.Value; + if (xmp_metadata != null) + dict["xmp_metadata"] = xmp_metadata; + var op = _op_def_lib._apply_op_helper("EncodeJpeg", name: name, keywords: dict); + return op.output; + } + + /// + /// PNG-encode an image. + /// + /// + /// 3-D with shape [height, width, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EncodePng'. + /// + /// + /// Compression level. + /// + /// + /// 0-D. PNG-encoded image. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// image is a 3-D uint8 or uint16 Tensor of shape [height, width, channels] + /// where channels is: + /// + /// * 1: for grayscale. + /// * 2: for grayscale + alpha. + /// * 3: for RGB. + /// * 4: for RGBA. + /// + /// The ZLIB compression level, compression, can be -1 for the PNG-encoder + /// default or a value from 0 to 9. 9 is the highest compression level, generating + /// the smallest output, but is slower. + /// + public static Tensor encode_png (Tensor image, int? compression = null, string name = "EncodePng") + { + var dict = new Dictionary(); + dict["image"] = image; + if (compression.HasValue) + dict["compression"] = compression.Value; + var op = _op_def_lib._apply_op_helper("EncodePng", name: name, keywords: dict); + return op.output; + } + + /// + /// The op serializes protobuf messages provided in the input tensors. + /// + /// + /// Tensor of int32 with shape [batch_shape, len(field_names)]. + /// + /// + /// List of tensors containing values for the corresponding field. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EncodeProto'. + /// + /// + /// Optional argument + /// List of strings containing proto field names. + /// + /// + /// Optional argument + /// Name of the proto message type to decode. + /// + /// + /// + /// + /// Tensor of serialized protos with shape batch_shape. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The types of the tensors in values must match the schema for the + /// fields specified in field_names. All the tensors in values must + /// have a common shape prefix, *batch_shape*. + /// + /// The sizes tensor specifies repeat counts for each field. The repeat + /// count (last dimension) of a each tensor in values must be greater + /// than or equal to corresponding repeat count in sizes. + /// + /// A message_type name must be provided to give context for the field + /// names. The actual message descriptor can be looked up either in the + /// linked-in descriptor pool or a filename provided by the caller using + /// the descriptor_source attribute. + /// + /// The descriptor_source attribute selects a source of protocol + /// descriptors to consult when looking up message_type. This may be a + /// filename containing a serialized FileDescriptorSet message, + /// or the special value local://, in which case only descriptors linked + /// into the code will be searched; the filename can be on any filesystem + /// accessible to TensorFlow. + /// + /// You can build a descriptor_source file using the --descriptor_set_out + /// and --include_imports options to the protocol compiler protoc. + /// + /// The local:// database only covers descriptors linked into the + /// code via C++ libraries, not Python imports. You can link in a proto descriptor + /// by creating a cc_library target with alwayslink=1. + /// + /// There are a few special cases in the value mapping: + /// + /// Submessage and group fields must be pre-serialized as TensorFlow strings. + /// + /// TensorFlow lacks support for unsigned int64s, so they must be + /// represented as tf.int64 with the same twos-complement bit pattern + /// (the obvious way). + /// + /// Unsigned int32 values can be represented exactly with tf.int64, or + /// with sign wrapping if the input is of type tf.int32. + /// + public static Tensor encode_proto (Tensor sizes, Tensor[] values, string[] field_names, string message_type, string descriptor_source = null, string name = "EncodeProto") + { + var dict = new Dictionary(); + dict["sizes"] = sizes; + dict["values"] = values; + dict["field_names"] = field_names; + dict["message_type"] = message_type; + if (descriptor_source != null) + dict["descriptor_source"] = descriptor_source; + var op = _op_def_lib._apply_op_helper("EncodeProto", name: name, keywords: dict); + return op.output; + } + + /// + /// Encode audio data using the WAV file format. + /// + /// + /// 2-D with shape [length, channels]. + /// + /// + /// Scalar containing the sample frequency. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EncodeWav'. + /// + /// + /// 0-D. WAV-encoded file contents. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation will generate a string suitable to be saved out to create a .wav + /// audio file. It will be encoded in the 16-bit PCM format. It takes in float + /// values in the range -1.0f to 1.0f, and any outside that value will be clamped to + /// that range. + /// + /// audio is a 2-D float Tensor of shape [length, channels]. + /// sample_rate is a scalar Tensor holding the rate to use (e.g. 44100). + /// + public static Tensor encode_wav (Tensor audio, Tensor sample_rate, string name = "EncodeWav") + { + var dict = new Dictionary(); + dict["audio"] = audio; + dict["sample_rate"] = sample_rate; + var op = _op_def_lib._apply_op_helper("EncodeWav", name: name, keywords: dict); + return op.output; + } + + /// + /// Ensures that the tensor's shape matches the expected shape. + /// + /// + /// A tensor, whose shape is to be validated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'EnsureShape'. + /// + /// + /// Optional argument + /// The expected (possibly partially specified) shape of the input tensor. + /// + /// + /// A tensor with the same shape and contents as the input tensor or value. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Raises an error if the input tensor's shape does not match the specified shape. + /// Returns the input tensor otherwise. + /// + public static Tensor ensure_shape (Tensor input, TensorShape shape, string name = "EnsureShape") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("EnsureShape", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates or finds a child frame, and makes data available to the child frame. + /// + /// + /// The tensor to be made available to the child frame. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Enter'. + /// + /// + /// Optional argument + /// The name of the child frame. + /// + /// + /// If true, the output is constant within the child frame. + /// + /// + /// The number of iterations allowed to run in parallel. + /// + /// + /// The same tensor as data. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op is used together with Exit to create loops in the graph. + /// The unique frame_name is used by the Executor to identify frames. If + /// is_constant is true, output is a constant in the child frame; otherwise + /// it may be changed in the child frame. At most parallel_iterations iterations + /// are run in parallel in the child frame. + /// + public static Tensor enter (Tensor data, string frame_name, bool? is_constant = null, int? parallel_iterations = null, string name = "Enter") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["frame_name"] = frame_name; + if (is_constant.HasValue) + dict["is_constant"] = is_constant.Value; + if (parallel_iterations.HasValue) + dict["parallel_iterations"] = parallel_iterations.Value; + var op = _op_def_lib._apply_op_helper("Enter", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of (x == y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Equal'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Equal supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor equal (Tensor x, Tensor y, string name = "Equal") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Equal", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the Gauss error function of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Erf'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor erf (Tensor x, string name = "Erf") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Erf", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the complementary error function of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Erfc'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor erfc (Tensor x, string name = "Erfc") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Erfc", name: name, keywords: dict); + return op.output; + } + + /// + /// Exits the current frame to its parent frame. + /// + /// + /// The tensor to be made available to the parent frame. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Exit'. + /// + /// + /// The same tensor as data. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Exit makes its input data available to the parent frame. + /// + public static Tensor exit (Tensor data, string name = "Exit") + { + var dict = new Dictionary(); + dict["data"] = data; + var op = _op_def_lib._apply_op_helper("Exit", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes exponential of x element-wise. \\(y = e^x\\). + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Exp'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor exp (Tensor x, string name = "Exp") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Exp", name: name, keywords: dict); + return op.output; + } + + /// + /// Inserts a dimension of 1 into a tensor's shape. + /// + /// + /// + /// + /// 0-D (scalar). Specifies the dimension index at which to + /// expand the shape of input. Must be in the range + /// [-rank(input) - 1, rank(input)]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ExpandDims'. + /// + /// + /// Contains the same data as input, but its shape has an additional + /// dimension of size 1 added. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor input, this operation inserts a dimension of 1 at the + /// dimension index axis of input's shape. The dimension index axis starts at + /// zero; if you specify a negative number for axis it is counted backward from + /// the end. + /// + /// This operation is useful if you want to add a batch dimension to a single + /// element. For example, if you have a single image of shape [height, width, + /// channels], you can make it a batch of 1 image with expand_dims(image, 0), + /// which will make the shape [1, height, width, channels]. + /// + /// Other examples: + /// + /// + /// # 't' is a tensor of shape [2] + /// shape(expand_dims(t, 0)) ==> [1, 2] + /// shape(expand_dims(t, 1)) ==> [2, 1] + /// shape(expand_dims(t, -1)) ==> [2, 1] + /// + /// # 't2' is a tensor of shape [2, 3, 5] + /// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + /// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + /// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + /// + /// + /// This operation requires that: + /// + /// -1-input.dims() <= dim <= input.dims() + /// + /// This operation is related to squeeze(), which removes dimensions of + /// size 1. + /// + public static Tensor expand_dims (Tensor input, Tensor dim, string name = "ExpandDims") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["dim"] = dim; + var op = _op_def_lib._apply_op_helper("ExpandDims", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes exponential of x - 1 element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Expm1'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = (\exp x) - 1\\). + /// + public static Tensor expm1 (Tensor x, string name = "Expm1") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Expm1", name: name, keywords: dict); + return op.output; + } + + /// + /// Extracts a glimpse from the input tensor. + /// + /// + /// A 4-D float tensor of shape [batch_size, height, width, channels]. + /// + /// + /// A 1-D tensor of 2 elements containing the size of the glimpses + /// to extract. The glimpse height must be specified first, following + /// by the glimpse width. + /// + /// + /// A 2-D integer tensor of shape [batch_size, 2] containing + /// the y, x locations of the center of each window. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ExtractGlimpse'. + /// + /// + /// indicates if the offset coordinates are centered relative to + /// the image, in which case the (0, 0) offset is relative to the center + /// of the input images. If false, the (0,0) offset corresponds to the + /// upper left corner of the input images. + /// + /// + /// indicates if the offset coordinates are normalized. + /// + /// + /// indicates if the noise should be generated using a + /// uniform distribution or a Gaussian distribution. + /// + /// + /// A tensor representing the glimpses [batch_size, + /// glimpse_height, glimpse_width, channels]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Returns a set of windows called glimpses extracted at location + /// offsets from the input tensor. If the windows only partially + /// overlaps the inputs, the non overlapping areas will be filled with + /// random noise. + /// + /// The result is a 4-D tensor of shape [batch_size, glimpse_height, + /// glimpse_width, channels]. The channels and batch dimensions are the + /// same as that of the input tensor. The height and width of the output + /// windows are specified in the size parameter. + /// + /// The argument normalized and centered controls how the windows are built: + /// + /// * If the coordinates are normalized but not centered, 0.0 and 1.0 + /// correspond to the minimum and maximum of each height and width + /// dimension. + /// * If the coordinates are both normalized and centered, they range from + /// -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper + /// left corner, the lower right corner is located at (1.0, 1.0) and the + /// center is at (0, 0). + /// * If the coordinates are not normalized they are interpreted as + /// numbers of pixels. + /// + public static Tensor extract_glimpse (Tensor input, Tensor size, Tensor offsets, bool? centered = null, bool? normalized = null, bool? uniform_noise = null, string name = "ExtractGlimpse") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["size"] = size; + dict["offsets"] = offsets; + if (centered.HasValue) + dict["centered"] = centered.Value; + if (normalized.HasValue) + dict["normalized"] = normalized.Value; + if (uniform_noise.HasValue) + dict["uniform_noise"] = uniform_noise.Value; + var op = _op_def_lib._apply_op_helper("ExtractGlimpse", name: name, keywords: dict); + return op.output; + } + + /// + /// Extract patches from images and put them in the "depth" output dimension. + /// + /// + /// 4-D Tensor with shape [batch, in_rows, in_cols, depth]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ExtractImagePatches'. + /// + /// + /// Optional argument + /// The size of the sliding window for each dimension of images. + /// + /// + /// Optional argument + /// 1-D of length 4. How far the centers of two consecutive patches are in + /// the images. Must be: [1, stride_rows, stride_cols, 1]. + /// + /// + /// Optional argument + /// 1-D of length 4. Must be: [1, rate_rows, rate_cols, 1]. This is the + /// input stride, specifying how far two consecutive patch samples are in the + /// input. Equivalent to extracting patches with + /// patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1), followed by + /// subsampling them spatially by a factor of rates. This is equivalent to + /// rate in dilated (a.k.a. Atrous) convolutions. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// We specify the size-related attributes as: + /// + /// + /// ksizes = [1, ksize_rows, ksize_cols, 1] + /// strides = [1, strides_rows, strides_cols, 1] + /// rates = [1, rates_rows, rates_cols, 1] + /// + /// + /// + /// 4-D Tensor with shape [batch, out_rows, out_cols, ksize_rows * + /// ksize_cols * depth] containing image patches with size + /// ksize_rows x ksize_cols x depth vectorized in the "depth" dimension. Note + /// out_rows and out_cols are the dimensions of the output patches. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor extract_image_patches (Tensor images, int[] ksizes, int[] strides, int[] rates, string padding, string name = "ExtractImagePatches") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["ksizes"] = ksizes; + dict["strides"] = strides; + dict["rates"] = rates; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("ExtractImagePatches", name: name, keywords: dict); + return op.output; + } + + /// + /// Extract the shape information of a JPEG-encoded image. + /// + /// + /// 0-D. The JPEG-encoded image. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ExtractJpegShape'. + /// + /// + /// (Optional) The output type of the operation (int32 or int64). + /// Defaults to int32. + /// + /// + /// 1-D. The image shape with format [height, width, channels]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op only parses the image header, so it is much faster than DecodeJpeg. + /// + public static Tensor extract_jpeg_shape (Tensor contents, TF_DataType? output_type = null, string name = "ExtractJpegShape") + { + var dict = new Dictionary(); + dict["contents"] = contents; + if (output_type.HasValue) + dict["output_type"] = output_type.Value; + var op = _op_def_lib._apply_op_helper("ExtractJpegShape", name: name, keywords: dict); + return op.output; + } + + /// + /// Fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FFT'. + /// + /// + /// A complex64 tensor of the same shape as input. The inner-most + /// dimension of input is replaced with its 1D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.fft + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the 1-dimensional discrete Fourier transform over the inner-most + /// dimension of input. + /// + public static Tensor f_f_t (Tensor input, string name = "FFT") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("FFT", name: name, keywords: dict); + return op.output; + } + + /// + /// 2D fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FFT2D'. + /// + /// + /// A complex64 tensor of the same shape as input. The inner-most 2 + /// dimensions of input are replaced with their 2D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.fft2 + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the 2-dimensional discrete Fourier transform over the inner-most + /// 2 dimensions of input. + /// + public static Tensor f_f_t2d (Tensor input, string name = "FFT2D") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("FFT2D", name: name, keywords: dict); + return op.output; + } + + /// + /// 3D fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FFT3D'. + /// + /// + /// A complex64 tensor of the same shape as input. The inner-most 3 + /// dimensions of input are replaced with their 3D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.fftn with 3 dimensions. + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the 3-dimensional discrete Fourier transform over the inner-most 3 + /// dimensions of input. + /// + public static Tensor f_f_t3d (Tensor input, string name = "FFT3D") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("FFT3D", name: name, keywords: dict); + return op.output; + } + + /// + /// A queue that produces elements in first-in first-out order. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FIFOQueue'. + /// + /// + /// Optional argument + /// The type of each component in a value. + /// + /// + /// The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// + /// + /// The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// + /// + /// If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor f_i_f_o_queue (TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "FIFOQueue") + { + var dict = new Dictionary(); + dict["component_types"] = component_types; + if (shapes != null) + dict["shapes"] = shapes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("FIFOQueue", name: name, keywords: dict); + return op.output; + } + + /// + /// A queue that produces elements in first-in first-out order. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FIFOQueueV2'. + /// + /// + /// Optional argument + /// The type of each component in a value. + /// + /// + /// The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// + /// + /// The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// + /// + /// If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor f_i_f_o_queue_v2 (TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "FIFOQueueV2") + { + var dict = new Dictionary(); + dict["component_types"] = component_types; + if (shapes != null) + dict["shapes"] = shapes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("FIFOQueueV2", name: name, keywords: dict); + return op.output; + } + + /// + /// This op is used as a placeholder in If branch functions. It doesn't provide a + /// valid output when run, so must either be removed (e.g. replaced with a + /// function input) or guaranteed not to be used (e.g. if mirroring an + /// intermediate output needed for the gradient computation of the other branch). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FakeParam'. + /// + /// + /// Optional argument + /// The type of the output. + /// + /// + /// Optional argument + /// The purported shape of the output. This is only used for shape inference; + /// the output will not necessarily have this shape. Can be a partial shape. + /// + /// + /// \"Fake\" output value. This should not be consumed by another op. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor fake_param (TF_DataType dtype, TensorShape shape, string name = "FakeParam") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("FakeParam", name: name, keywords: dict); + return op.output; + } + + /// + /// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FakeQuantWithMinMaxArgs'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Attributes [min; max] define the clamping range for the inputs data. + /// inputs values are quantized into the quantization range ([0; 2^num_bits - 1] + /// when narrow_range is false and [1; 2^num_bits - 1] when it is true) and + /// then de-quantized and output as floats in [min; max] interval. + /// num_bits is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Quantization is called fake since the output is still in floating point. + /// + public static Tensor fake_quant_with_min_max_args (Tensor inputs, float? min = null, float? max = null, int? num_bits = null, bool? narrow_range = null, string name = "FakeQuantWithMinMaxArgs") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + if (min.HasValue) + dict["min"] = min.Value; + if (max.HasValue) + dict["max"] = max.Value; + if (num_bits.HasValue) + dict["num_bits"] = num_bits.Value; + if (narrow_range.HasValue) + dict["narrow_range"] = narrow_range.Value; + var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxArgs", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute gradients for a FakeQuantWithMinMaxArgs operation. + /// + /// + /// Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. + /// + /// + /// Values passed as inputs to the FakeQuantWithMinMaxArgs operation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FakeQuantWithMinMaxArgsGradient'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: + /// gradients * (inputs >= min && inputs <= max). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor fake_quant_with_min_max_args_gradient (Tensor gradients, Tensor inputs, float? min = null, float? max = null, int? num_bits = null, bool? narrow_range = null, string name = "FakeQuantWithMinMaxArgsGradient") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["inputs"] = inputs; + if (min.HasValue) + dict["min"] = min.Value; + if (max.HasValue) + dict["max"] = max.Value; + if (num_bits.HasValue) + dict["num_bits"] = num_bits.Value; + if (narrow_range.HasValue) + dict["narrow_range"] = narrow_range.Value; + var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxArgsGradient", name: name, keywords: dict); + return op.output; + } + + /// + /// Fake-quantize the 'inputs' tensor of type float via global float scalars min + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FakeQuantWithMinMaxVars'. + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// and max to 'outputs' tensor of same shape as inputs. + /// + /// [min; max] define the clamping range for the inputs data. + /// inputs values are quantized into the quantization range ([0; 2^num_bits - 1] + /// when narrow_range is false and [1; 2^num_bits - 1] when it is true) and + /// then de-quantized and output as floats in [min; max] interval. + /// num_bits is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// This operation has a gradient and thus allows for training min and max + /// values. + /// + public static Tensor fake_quant_with_min_max_vars (Tensor inputs, Tensor min, Tensor max, int? num_bits = null, bool? narrow_range = null, string name = "FakeQuantWithMinMaxVars") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["min"] = min; + dict["max"] = max; + if (num_bits.HasValue) + dict["num_bits"] = num_bits.Value; + if (narrow_range.HasValue) + dict["narrow_range"] = narrow_range.Value; + var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxVars", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute gradients for a FakeQuantWithMinMaxVars operation. + /// + /// + /// Backpropagated gradients above the FakeQuantWithMinMaxVars operation. + /// + /// + /// Values passed as inputs to the FakeQuantWithMinMaxVars operation. + /// min, max: Quantization interval, scalar floats. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FakeQuantWithMinMaxVarsGradient'. + /// + /// + /// The bitwidth of the quantization; between 2 and 8, inclusive. + /// + /// + /// Whether to quantize into 2^num_bits - 1 distinct values. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// backprops_wrt_input : Backpropagated gradients w.r.t. inputs: + /// gradients * (inputs >= min && inputs <= max). + /// backprop_wrt_min : Backpropagated gradients w.r.t. min parameter: + /// sum(gradients * (inputs < min)). + /// backprop_wrt_max : Backpropagated gradients w.r.t. max parameter: + /// sum(gradients * (inputs > max)). + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor backprops_wrt_input, Tensor backprop_wrt_min, Tensor backprop_wrt_max) fake_quant_with_min_max_vars_gradient (Tensor gradients, Tensor inputs, Tensor min, Tensor max, int? num_bits = null, bool? narrow_range = null, string name = "FakeQuantWithMinMaxVarsGradient") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["inputs"] = inputs; + dict["min"] = min; + dict["max"] = max; + if (num_bits.HasValue) + dict["num_bits"] = num_bits.Value; + if (narrow_range.HasValue) + dict["narrow_range"] = narrow_range.Value; + var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsGradient", name: name, keywords: dict); + int _idx = 0; + var backprops_wrt_input = op.outputs[_idx++]; + var backprop_wrt_min = op.outputs[_idx++]; + var backprop_wrt_max = op.outputs[_idx++]; + return (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max); + } + + /// + /// Fake-quantize the 'inputs' tensor of type float and one of the shapes: [d], + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FakeQuantWithMinMaxVarsPerChannel'. + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// [b, d] [b, h, w, d] via per-channel floats min and max of shape [d] + /// to 'outputs' tensor of same shape as inputs. + /// + /// [min; max] define the clamping range for the inputs data. + /// inputs values are quantized into the quantization range ([0; 2^num_bits - 1] + /// when narrow_range is false and [1; 2^num_bits - 1] when it is true) and + /// then de-quantized and output as floats in [min; max] interval. + /// num_bits is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// This operation has a gradient and thus allows for training min and max + /// values. + /// + public static Tensor fake_quant_with_min_max_vars_per_channel (Tensor inputs, Tensor min, Tensor max, int? num_bits = null, bool? narrow_range = null, string name = "FakeQuantWithMinMaxVarsPerChannel") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["min"] = min; + dict["max"] = max; + if (num_bits.HasValue) + dict["num_bits"] = num_bits.Value; + if (narrow_range.HasValue) + dict["narrow_range"] = narrow_range.Value; + var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannel", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. + /// + /// + /// Backpropagated gradients above the FakeQuantWithMinMaxVars operation, + /// shape one of: [d], [b, d], [b, h, w, d]. + /// + /// + /// Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape + /// same as gradients. + /// min, max: Quantization interval, floats of shape [d]. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FakeQuantWithMinMaxVarsPerChannelGradient'. + /// + /// + /// The bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// + /// Whether to quantize into 2^num_bits - 1 distinct values. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// backprops_wrt_input : Backpropagated gradients w.r.t. inputs, shape same as + /// inputs: + /// gradients * (inputs >= min && inputs <= max). + /// backprop_wrt_min : Backpropagated gradients w.r.t. min parameter, shape [d]: + /// sum_per_d(gradients * (inputs < min)). + /// backprop_wrt_max : Backpropagated gradients w.r.t. max parameter, shape [d]: + /// sum_per_d(gradients * (inputs > max)). + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor backprops_wrt_input, Tensor backprop_wrt_min, Tensor backprop_wrt_max) fake_quant_with_min_max_vars_per_channel_gradient (Tensor gradients, Tensor inputs, Tensor min, Tensor max, int? num_bits = null, bool? narrow_range = null, string name = "FakeQuantWithMinMaxVarsPerChannelGradient") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["inputs"] = inputs; + dict["min"] = min; + dict["max"] = max; + if (num_bits.HasValue) + dict["num_bits"] = num_bits.Value; + if (narrow_range.HasValue) + dict["narrow_range"] = narrow_range.Value; + var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannelGradient", name: name, keywords: dict); + int _idx = 0; + var backprops_wrt_input = op.outputs[_idx++]; + var backprop_wrt_min = op.outputs[_idx++]; + var backprop_wrt_max = op.outputs[_idx++]; + return (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max); + } + + /// + /// Deprecated. Do not use. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FakeQueue'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor fake_queue (Tensor resource, string name = "FakeQueue") + { + var dict = new Dictionary(); + dict["resource"] = resource; + var op = _op_def_lib._apply_op_helper("FakeQueue", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a tensor filled with a scalar value. + /// + /// + /// 1-D. Represents the shape of the output tensor. + /// + /// + /// 0-D (scalar). Value to fill the returned tensor. + /// + /// @compatibility(numpy) + /// Equivalent to np.full + /// @end_compatibility + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Fill'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation creates a tensor of shape dims and fills it with value. + /// + /// For example: + /// + /// + /// # Output tensor has shape [2, 3]. + /// fill([2, 3], 9) ==> [[9, 9, 9] + /// [9, 9, 9]] + /// + /// + /// tf.fill differs from tf.constant in a few ways: + /// + /// * tf.fill only supports scalar contents, whereas tf.constant supports + /// Tensor values. + /// * tf.fill creates an Op in the computation graph that constructs the actual + /// Tensor value at runtime. This is in contrast to tf.constant which embeds + /// the entire Tensor into the graph with a Const node. + /// * Because tf.fill evaluates at graph runtime, it supports dynamic shapes + /// based on other runtime Tensors, unlike tf.constant. + /// + public static Tensor fill (Tensor dims, Tensor value, string name = "Fill") + { + var dict = new Dictionary(); + dict["dims"] = dims; + dict["value"] = value; + var op = _op_def_lib._apply_op_helper("Fill", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset containing elements of first component of input_dataset having true in the last component. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FilterByLastComponentDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor filter_by_last_component_dataset (Tensor input_dataset, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "FilterByLastComponentDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("FilterByLastComponentDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that emits the records from one or more binary files. + /// + /// + /// A scalar or a vector containing the name(s) of the file(s) to be + /// read. + /// + /// + /// A scalar representing the number of bytes to skip at the + /// beginning of a file. + /// + /// + /// A scalar representing the number of bytes in each record. + /// + /// + /// A scalar representing the number of bytes to skip at the end + /// of a file. + /// + /// + /// A scalar representing the number of bytes to buffer. Must be > 0. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FixedLengthRecordDataset'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor fixed_length_record_dataset (Tensor filenames, Tensor header_bytes, Tensor record_bytes, Tensor footer_bytes, Tensor buffer_size, string name = "FixedLengthRecordDataset") + { + var dict = new Dictionary(); + dict["filenames"] = filenames; + dict["header_bytes"] = header_bytes; + dict["record_bytes"] = record_bytes; + dict["footer_bytes"] = footer_bytes; + dict["buffer_size"] = buffer_size; + var op = _op_def_lib._apply_op_helper("FixedLengthRecordDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs fixed-length records from a file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FixedLengthRecordReader'. + /// + /// + /// Optional argument + /// Number of bytes in the record. + /// + /// + /// Number of bytes in the header, defaults to 0. + /// + /// + /// Number of bytes in the footer, defaults to 0. + /// + /// + /// Number of bytes to hop before each read. Default of 0 means using + /// record_bytes. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor fixed_length_record_reader (int record_bytes, int? header_bytes = null, int? footer_bytes = null, int? hop_bytes = null, string container = null, string shared_name = null, string name = "FixedLengthRecordReader") + { + var dict = new Dictionary(); + dict["record_bytes"] = record_bytes; + if (header_bytes.HasValue) + dict["header_bytes"] = header_bytes.Value; + if (footer_bytes.HasValue) + dict["footer_bytes"] = footer_bytes.Value; + if (hop_bytes.HasValue) + dict["hop_bytes"] = hop_bytes.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("FixedLengthRecordReader", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs fixed-length records from a file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FixedLengthRecordReaderV2'. + /// + /// + /// Optional argument + /// Number of bytes in the record. + /// + /// + /// Number of bytes in the header, defaults to 0. + /// + /// + /// Number of bytes in the footer, defaults to 0. + /// + /// + /// Number of bytes to hop before each read. Default of 0 means using + /// record_bytes. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The type of encoding for the file. Currently ZLIB and GZIP + /// are supported. Defaults to none. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor fixed_length_record_reader_v2 (int record_bytes, int? header_bytes = null, int? footer_bytes = null, int? hop_bytes = null, string container = null, string shared_name = null, string encoding = null, string name = "FixedLengthRecordReaderV2") + { + var dict = new Dictionary(); + dict["record_bytes"] = record_bytes; + if (header_bytes.HasValue) + dict["header_bytes"] = header_bytes.Value; + if (footer_bytes.HasValue) + dict["footer_bytes"] = footer_bytes.Value; + if (hop_bytes.HasValue) + dict["hop_bytes"] = hop_bytes.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (encoding != null) + dict["encoding"] = encoding; + var op = _op_def_lib._apply_op_helper("FixedLengthRecordReaderV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Generates labels for candidate sampling with a learned unigram distribution. + /// + /// + /// A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FixedUnigramCandidateSampler'. + /// + /// + /// Optional argument + /// Number of true labels per context. + /// + /// + /// Optional argument + /// Number of candidates to randomly sample. + /// + /// + /// Optional argument + /// If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// + /// + /// Optional argument + /// The sampler will sample integers from the interval [0, range_max). + /// + /// + /// Each valid line in this file (which should have a CSV-like format) + /// corresponds to a valid word ID. IDs are in sequential order, starting from + /// num_reserved_ids. The last entry in each line is expected to be a value + /// corresponding to the count or relative probability. Exactly one of vocab_file + /// and unigrams needs to be passed to this op. + /// + /// + /// The distortion is used to skew the unigram probability distribution. + /// Each weight is first raised to the distortion's power before adding to the + /// internal unigram distribution. As a result, distortion = 1.0 gives regular + /// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives + /// a uniform distribution. + /// + /// + /// Optionally some reserved IDs can be added in the range [0, + /// ..., num_reserved_ids) by the users. One use case is that a special unknown + /// word token is used as ID 0. These IDs will have a sampling probability of 0. + /// + /// + /// A sampler can be used to sample from a subset of the original range + /// in order to speed up the whole computation through parallelism. This parameter + /// (together with 'shard') indicates the number of partitions that are being + /// used in the overall computation. + /// + /// + /// A sampler can be used to sample from a subset of the original range + /// in order to speed up the whole computation through parallelism. This parameter + /// (together with 'num_shards') indicates the particular partition number of a + /// sampler op, when partitioning is being used. + /// + /// + /// A list of unigram counts or probabilities, one per ID in sequential + /// order. Exactly one of vocab_file and unigrams should be passed to this op. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sampled_candidates : A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// true_expected_count : A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// sampled_expected_count : A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// A unigram sampler could use a fixed unigram distribution read from a + /// file or passed in as an in-memory array instead of building up the distribution + /// from data on the fly. There is also an option to skew the distribution by + /// applying a distortion power to the weights. + /// + /// The vocabulary file should be in CSV-like format, with the last field + /// being the weight associated with the word. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + public static (Tensor sampled_candidates, Tensor true_expected_count, Tensor sampled_expected_count) fixed_unigram_candidate_sampler (Tensor true_classes, int num_true, int num_sampled, bool unique, int range_max, string vocab_file = null, float? distortion = null, int? num_reserved_ids = null, int? num_shards = null, int? shard = null, float[] unigrams = null, int? seed = null, int? seed2 = null, string name = "FixedUnigramCandidateSampler") + { + var dict = new Dictionary(); + dict["true_classes"] = true_classes; + dict["num_true"] = num_true; + dict["num_sampled"] = num_sampled; + dict["unique"] = unique; + dict["range_max"] = range_max; + if (vocab_file != null) + dict["vocab_file"] = vocab_file; + if (distortion.HasValue) + dict["distortion"] = distortion.Value; + if (num_reserved_ids.HasValue) + dict["num_reserved_ids"] = num_reserved_ids.Value; + if (num_shards.HasValue) + dict["num_shards"] = num_shards.Value; + if (shard.HasValue) + dict["shard"] = shard.Value; + if (unigrams != null) + dict["unigrams"] = unigrams; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("FixedUnigramCandidateSampler", name: name, keywords: dict); + int _idx = 0; + var sampled_candidates = op.outputs[_idx++]; + var true_expected_count = op.outputs[_idx++]; + var sampled_expected_count = op.outputs[_idx++]; + return (sampled_candidates, true_expected_count, sampled_expected_count); + } + + /// + /// Returns element-wise largest integer not greater than x. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Floor'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor floor (Tensor x, string name = "Floor") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Floor", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns x // y element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FloorDiv'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: FloorDiv supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor floor_div (Tensor x, Tensor y, string name = "FloorDiv") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("FloorDiv", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns element-wise remainder of division. When x < 0 xor y < 0 is + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FloorMod'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// true, this follows Python semantics in that the result here is consistent + /// with a flooring divide. E.g. floor(x / y) * y + mod(x, y) = x. + /// + /// *NOTE*: FloorMod supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor floor_mod (Tensor x, Tensor y, string name = "FloorMod") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("FloorMod", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs fractional average pooling on the input. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FractionalAvgPool'. + /// + /// + /// Optional argument + /// Pooling ratio for each dimension of value, currently only + /// supports row and col dimension and should be >= 1.0. For example, a valid + /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + /// must be 1.0 because we don't allow pooling on batch and channels + /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + /// respectively. + /// + /// + /// When set to True, generates the pooling sequence in a + /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + /// difference between pseudorandom and random. + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// index 0 1 2 3 4 + /// + /// value 20 5 16 3 7 + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [41/3, 26/3] for fractional avg pooling. + /// + /// + /// When set to True, a fixed pooling region will be used when + /// iterating over a FractionalAvgPool node in the computation graph. Mainly used + /// in unit test to make FractionalAvgPool deterministic. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : output tensor after fractional avg pooling. + /// row_pooling_sequence : row pooling sequence, needed to calculate gradient. + /// col_pooling_sequence : column pooling sequence, needed to calculate gradient. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Fractional average pooling is similar to Fractional max pooling in the pooling + /// region generation step. The only difference is that after pooling regions are + /// generated, a mean operation is performed instead of a max operation in each + /// pooling region. + /// + public static (Tensor output, Tensor row_pooling_sequence, Tensor col_pooling_sequence) fractional_avg_pool (Tensor value, float[] pooling_ratio, bool? pseudo_random = null, bool? overlapping = null, bool? deterministic = null, int? seed = null, int? seed2 = null, string name = "FractionalAvgPool") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["pooling_ratio"] = pooling_ratio; + if (pseudo_random.HasValue) + dict["pseudo_random"] = pseudo_random.Value; + if (overlapping.HasValue) + dict["overlapping"] = overlapping.Value; + if (deterministic.HasValue) + dict["deterministic"] = deterministic.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("FractionalAvgPool", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var row_pooling_sequence = op.outputs[_idx++]; + var col_pooling_sequence = op.outputs[_idx++]; + return (output, row_pooling_sequence, col_pooling_sequence); + } + + /// + /// Computes gradient of the FractionalAvgPool function. + /// + /// + /// Original input tensor shape for fractional_avg_pool + /// + /// + /// 4-D with shape [batch, height, width, channels]. Gradients + /// w.r.t. the output of fractional_avg_pool. + /// + /// + /// row pooling sequence, form pooling region with + /// col_pooling_sequence. + /// + /// + /// column pooling sequence, form pooling region with + /// row_pooling sequence. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FractionalAvgPoolGrad'. + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// index 0 1 2 3 4 + /// + /// value 20 5 16 3 7 + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [41/3, 26/3] for fractional avg pooling. + /// + /// + /// 4-D. Gradients w.r.t. the input of fractional_avg_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for + /// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of + /// out_backprop to those indices that form the same pooling cell. Therefore, we + /// just need to know the shape of original input tensor, instead of the whole + /// tensor. + /// + public static Tensor fractional_avg_pool_grad (Tensor orig_input_tensor_shape, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool? overlapping = null, string name = "FractionalAvgPoolGrad") + { + var dict = new Dictionary(); + dict["orig_input_tensor_shape"] = orig_input_tensor_shape; + dict["out_backprop"] = out_backprop; + dict["row_pooling_sequence"] = row_pooling_sequence; + dict["col_pooling_sequence"] = col_pooling_sequence; + if (overlapping.HasValue) + dict["overlapping"] = overlapping.Value; + var op = _op_def_lib._apply_op_helper("FractionalAvgPoolGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs fractional max pooling on the input. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FractionalMaxPool'. + /// + /// + /// Optional argument + /// Pooling ratio for each dimension of value, currently only + /// supports row and col dimension and should be >= 1.0. For example, a valid + /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + /// must be 1.0 because we don't allow pooling on batch and channels + /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + /// respectively. + /// + /// + /// When set to True, generates the pooling sequence in a + /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + /// difference between pseudorandom and random. + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// index 0 1 2 3 4 + /// + /// value 20 5 16 3 7 + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [20, 16] for fractional max pooling. + /// + /// + /// When set to True, a fixed pooling region will be used when + /// iterating over a FractionalMaxPool node in the computation graph. Mainly used + /// in unit test to make FractionalMaxPool deterministic. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : output tensor after fractional max pooling. + /// row_pooling_sequence : row pooling sequence, needed to calculate gradient. + /// col_pooling_sequence : column pooling sequence, needed to calculate gradient. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Fractional max pooling is slightly different than regular max pooling. In + /// regular max pooling, you downsize an input set by taking the maximum value of + /// smaller N x N subsections of the set (often 2x2), and try to reduce the set by + /// a factor of N, where N is an integer. Fractional max pooling, as you might + /// expect from the word "fractional", means that the overall reduction ratio N + /// does not have to be an integer. + /// + /// The sizes of the pooling regions are generated randomly but are fairly uniform. + /// For example, let's look at the height dimension, and the constraints on the + /// list of rows that will be pool boundaries. + /// + /// First we define the following: + /// + /// 1. input_row_length : the number of rows from the input set + /// 2. output_row_length : which will be smaller than the input + /// 3. alpha = input_row_length / output_row_length : our reduction ratio + /// 4. K = floor(alpha) + /// 5. row_pooling_sequence : this is the result list of pool boundary rows + /// + /// Then, row_pooling_sequence should satisfy: + /// + /// 1. a[0] = 0 : the first value of the sequence is 0 + /// 2. a[end] = input_row_length : the last value of the sequence is the size + /// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size + /// 4. length(row_pooling_sequence) = output_row_length+1 + /// + /// For more details on fractional max pooling, see this paper: + /// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) + /// + public static (Tensor output, Tensor row_pooling_sequence, Tensor col_pooling_sequence) fractional_max_pool (Tensor value, float[] pooling_ratio, bool? pseudo_random = null, bool? overlapping = null, bool? deterministic = null, int? seed = null, int? seed2 = null, string name = "FractionalMaxPool") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["pooling_ratio"] = pooling_ratio; + if (pseudo_random.HasValue) + dict["pseudo_random"] = pseudo_random.Value; + if (overlapping.HasValue) + dict["overlapping"] = overlapping.Value; + if (deterministic.HasValue) + dict["deterministic"] = deterministic.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("FractionalMaxPool", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var row_pooling_sequence = op.outputs[_idx++]; + var col_pooling_sequence = op.outputs[_idx++]; + return (output, row_pooling_sequence, col_pooling_sequence); + } + + /// + /// Computes gradient of the FractionalMaxPool function. + /// + /// + /// Original input for fractional_max_pool + /// + /// + /// Original output for fractional_max_pool + /// + /// + /// 4-D with shape [batch, height, width, channels]. Gradients + /// w.r.t. the output of fractional_max_pool. + /// + /// + /// row pooling sequence, form pooling region with + /// col_pooling_sequence. + /// + /// + /// column pooling sequence, form pooling region with + /// row_pooling sequence. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FractionalMaxPoolGrad'. + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// index 0 1 2 3 4 + /// + /// value 20 5 16 3 7 + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [20, 16] for fractional max pooling. + /// + /// + /// 4-D. Gradients w.r.t. the input of fractional_max_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor fractional_max_pool_grad (Tensor orig_input, Tensor orig_output, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool? overlapping = null, string name = "FractionalMaxPoolGrad") + { + var dict = new Dictionary(); + dict["orig_input"] = orig_input; + dict["orig_output"] = orig_output; + dict["out_backprop"] = out_backprop; + dict["row_pooling_sequence"] = row_pooling_sequence; + dict["col_pooling_sequence"] = col_pooling_sequence; + if (overlapping.HasValue) + dict["overlapping"] = overlapping.Value; + var op = _op_def_lib._apply_op_helper("FractionalMaxPoolGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Batch normalization. + /// + /// + /// A 4D Tensor for input data. + /// + /// + /// A 1D Tensor for scaling factor, to scale the normalized x. + /// + /// + /// A 1D Tensor for offset, to shift to the normalized x. + /// + /// + /// A 1D Tensor for population mean. Used for inference only; + /// must be empty for training. + /// + /// + /// A 1D Tensor for population variance. Used for inference only; + /// must be empty for training. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FusedBatchNorm'. + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// The data format for x and y. Either "NHWC" (default) or "NCHW". + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// y : A 4D Tensor for output data. + /// batch_mean : A 1D Tensor for the computed batch mean, to be used by TensorFlow + /// to compute the running mean. + /// batch_variance : A 1D Tensor for the computed batch variance, to be used by + /// TensorFlow to compute the running variance. + /// reserve_space_1 : A 1D Tensor for the computed batch mean, to be reused + /// in the gradient computation. + /// reserve_space_2 : A 1D Tensor for the computed batch variance (inverted variance + /// in the cuDNN case), to be reused in the gradient computation. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + public static (Tensor y, Tensor batch_mean, Tensor batch_variance, Tensor reserve_space_1, Tensor reserve_space_2) fused_batch_norm (Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float? epsilon = null, string data_format = null, bool? is_training = null, string name = "FusedBatchNorm") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["scale"] = scale; + dict["offset"] = offset; + dict["mean"] = mean; + dict["variance"] = variance; + if (epsilon.HasValue) + dict["epsilon"] = epsilon.Value; + if (data_format != null) + dict["data_format"] = data_format; + if (is_training.HasValue) + dict["is_training"] = is_training.Value; + var op = _op_def_lib._apply_op_helper("FusedBatchNorm", name: name, keywords: dict); + int _idx = 0; + var y = op.outputs[_idx++]; + var batch_mean = op.outputs[_idx++]; + var batch_variance = op.outputs[_idx++]; + var reserve_space_1 = op.outputs[_idx++]; + var reserve_space_2 = op.outputs[_idx++]; + return (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2); + } + + /// + /// Gradient for batch normalization. + /// + /// + /// A 4D Tensor for the gradient with respect to y. + /// + /// + /// A 4D Tensor for input data. + /// + /// + /// A 1D Tensor for scaling factor, to scale the normalized x. + /// + /// + /// When is_training is True, a 1D Tensor for the computed batch + /// mean to be reused in gradient computation. When is_training is + /// False, a 1D Tensor for the population mean to be reused in both + /// 1st and 2nd order gradient computation. + /// + /// + /// When is_training is True, a 1D Tensor for the computed batch + /// variance (inverted variance in the cuDNN case) to be reused in + /// gradient computation. When is_training is False, a 1D Tensor + /// for the population variance to be reused in both 1st and 2nd + /// order gradient computation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FusedBatchNormGrad'. + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// x_backprop : A 4D Tensor for the gradient with respect to x. + /// scale_backprop : A 1D Tensor for the gradient with respect to scale. + /// offset_backprop : A 1D Tensor for the gradient with respect to offset. + /// reserve_space_3 : Unused placeholder to match the mean input in FusedBatchNorm. + /// reserve_space_4 : Unused placeholder to match the variance input + /// in FusedBatchNorm. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + public static (Tensor x_backprop, Tensor scale_backprop, Tensor offset_backprop, Tensor reserve_space_3, Tensor reserve_space_4) fused_batch_norm_grad (Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float? epsilon = null, string data_format = null, bool? is_training = null, string name = "FusedBatchNormGrad") + { + var dict = new Dictionary(); + dict["y_backprop"] = y_backprop; + dict["x"] = x; + dict["scale"] = scale; + dict["reserve_space_1"] = reserve_space_1; + dict["reserve_space_2"] = reserve_space_2; + if (epsilon.HasValue) + dict["epsilon"] = epsilon.Value; + if (data_format != null) + dict["data_format"] = data_format; + if (is_training.HasValue) + dict["is_training"] = is_training.Value; + var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: name, keywords: dict); + int _idx = 0; + var x_backprop = op.outputs[_idx++]; + var scale_backprop = op.outputs[_idx++]; + var offset_backprop = op.outputs[_idx++]; + var reserve_space_3 = op.outputs[_idx++]; + var reserve_space_4 = op.outputs[_idx++]; + return (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4); + } + + /// + /// Gradient for batch normalization. + /// + /// + /// A 4D Tensor for the gradient with respect to y. + /// + /// + /// A 4D Tensor for input data. + /// + /// + /// A 1D Tensor for scaling factor, to scale the normalized x. + /// + /// + /// When is_training is True, a 1D Tensor for the computed batch + /// mean to be reused in gradient computation. When is_training is + /// False, a 1D Tensor for the population mean to be reused in both + /// 1st and 2nd order gradient computation. + /// + /// + /// When is_training is True, a 1D Tensor for the computed batch + /// variance (inverted variance in the cuDNN case) to be reused in + /// gradient computation. When is_training is False, a 1D Tensor + /// for the population variance to be reused in both 1st and 2nd + /// order gradient computation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FusedBatchNormGradV2'. + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// x_backprop : A 4D Tensor for the gradient with respect to x. + /// scale_backprop : A 1D Tensor for the gradient with respect to scale. + /// offset_backprop : A 1D Tensor for the gradient with respect to offset. + /// reserve_space_3 : Unused placeholder to match the mean input in FusedBatchNorm. + /// reserve_space_4 : Unused placeholder to match the variance input + /// in FusedBatchNorm. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + public static (Tensor x_backprop, Tensor scale_backprop, Tensor offset_backprop, Tensor reserve_space_3, Tensor reserve_space_4) fused_batch_norm_grad_v2 (Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float? epsilon = null, string data_format = null, bool? is_training = null, string name = "FusedBatchNormGradV2") + { + var dict = new Dictionary(); + dict["y_backprop"] = y_backprop; + dict["x"] = x; + dict["scale"] = scale; + dict["reserve_space_1"] = reserve_space_1; + dict["reserve_space_2"] = reserve_space_2; + if (epsilon.HasValue) + dict["epsilon"] = epsilon.Value; + if (data_format != null) + dict["data_format"] = data_format; + if (is_training.HasValue) + dict["is_training"] = is_training.Value; + var op = _op_def_lib._apply_op_helper("FusedBatchNormGradV2", name: name, keywords: dict); + int _idx = 0; + var x_backprop = op.outputs[_idx++]; + var scale_backprop = op.outputs[_idx++]; + var offset_backprop = op.outputs[_idx++]; + var reserve_space_3 = op.outputs[_idx++]; + var reserve_space_4 = op.outputs[_idx++]; + return (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4); + } + + /// + /// Batch normalization. + /// + /// + /// A 4D Tensor for input data. + /// + /// + /// A 1D Tensor for scaling factor, to scale the normalized x. + /// + /// + /// A 1D Tensor for offset, to shift to the normalized x. + /// + /// + /// A 1D Tensor for population mean. Used for inference only; + /// must be empty for training. + /// + /// + /// A 1D Tensor for population variance. Used for inference only; + /// must be empty for training. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FusedBatchNormV2'. + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// The data format for x and y. Either "NHWC" (default) or "NCHW". + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// y : A 4D Tensor for output data. + /// batch_mean : A 1D Tensor for the computed batch mean, to be used by TensorFlow + /// to compute the running mean. + /// batch_variance : A 1D Tensor for the computed batch variance, to be used by + /// TensorFlow to compute the running variance. + /// reserve_space_1 : A 1D Tensor for the computed batch mean, to be reused + /// in the gradient computation. + /// reserve_space_2 : A 1D Tensor for the computed batch variance (inverted variance + /// in the cuDNN case), to be reused in the gradient computation. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + public static (Tensor y, Tensor batch_mean, Tensor batch_variance, Tensor reserve_space_1, Tensor reserve_space_2) fused_batch_norm_v2 (Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float? epsilon = null, string data_format = null, bool? is_training = null, string name = "FusedBatchNormV2") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["scale"] = scale; + dict["offset"] = offset; + dict["mean"] = mean; + dict["variance"] = variance; + if (epsilon.HasValue) + dict["epsilon"] = epsilon.Value; + if (data_format != null) + dict["data_format"] = data_format; + if (is_training.HasValue) + dict["is_training"] = is_training.Value; + var op = _op_def_lib._apply_op_helper("FusedBatchNormV2", name: name, keywords: dict); + int _idx = 0; + var y = op.outputs[_idx++]; + var batch_mean = op.outputs[_idx++]; + var batch_variance = op.outputs[_idx++]; + var reserve_space_1 = op.outputs[_idx++]; + var reserve_space_2 = op.outputs[_idx++]; + return (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2); + } + + /// + /// Performs a padding as a preprocess during a convolution. + /// + /// + /// 4-D with shape [batch, in_height, in_width, in_channels]. + /// + /// + /// A two-column matrix specifying the padding sizes. The number of + /// rows must be the same as the rank of input. + /// + /// + /// 4-D with shape + /// [filter_height, filter_width, in_channels, out_channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FusedPadConv2D'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of input. Must be in the same order as the dimension specified with format. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Similar to FusedResizeAndPadConv2d, this op allows for an optimized + /// implementation where the spatial padding transformation stage is fused with the + /// im2col lookup, but in this case without the bilinear filtering required for + /// resizing. Fusing the padding prevents the need to write out the intermediate + /// results as whole tensors, reducing memory pressure, and we can get some latency + /// gains by merging the transformation calculations. + /// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' + /// order is used instead. + /// Internally this op uses a single per-graph scratch buffer, which means that it + /// will block if multiple versions are being run in parallel. This is because this + /// operator is primarily an optimization to minimize memory usage. + /// + public static Tensor fused_pad_conv2d (Tensor input, Tensor paddings, Tensor filter, string mode, int[] strides, string padding, string name = "FusedPadConv2D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["paddings"] = paddings; + dict["filter"] = filter; + dict["mode"] = mode; + dict["strides"] = strides; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("FusedPadConv2D", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs a resize and padding as a preprocess during a convolution. + /// + /// + /// 4-D with shape [batch, in_height, in_width, in_channels]. + /// + /// + /// A 1-D int32 Tensor of 2 elements: new_height, new_width. The + /// new size for the images. + /// + /// + /// A two-column matrix specifying the padding sizes. The number of + /// rows must be the same as the rank of input. + /// + /// + /// 4-D with shape + /// [filter_height, filter_width, in_channels, out_channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'FusedResizeAndPadConv2D'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of input. Must be in the same order as the dimension specified with format. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// It's often possible to do spatial transformations more efficiently as part of + /// the packing stage of a convolution, so this op allows for an optimized + /// implementation where these stages are fused together. This prevents the need to + /// write out the intermediate results as whole tensors, reducing memory pressure, + /// and we can get some latency gains by merging the transformation calculations. + /// The data_format attribute for Conv2D isn't supported by this op, and defaults to + /// 'NHWC' order. + /// Internally this op uses a single per-graph scratch buffer, which means that it + /// will block if multiple versions are being run in parallel. This is because this + /// operator is primarily an optimization to minimize memory usage. + /// + public static Tensor fused_resize_and_pad_conv2d (Tensor input, Tensor size, Tensor paddings, Tensor filter, string mode, int[] strides, string padding, bool? resize_align_corners = null, string name = "FusedResizeAndPadConv2D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["size"] = size; + dict["paddings"] = paddings; + dict["filter"] = filter; + dict["mode"] = mode; + dict["strides"] = strides; + dict["padding"] = padding; + if (resize_align_corners.HasValue) + dict["resize_align_corners"] = resize_align_corners.Value; + var op = _op_def_lib._apply_op_helper("FusedResizeAndPadConv2D", name: name, keywords: dict); + return op.output; + } + + /// + /// Gather slices from params according to indices. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Gather'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// indices must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape indices.shape + params.shape[1:] where: + /// + /// + /// # Scalar indices + /// output[:, ..., :] = params[indices, :, ... :] + /// + /// # Vector indices + /// output[i, :, ..., :] = params[indices[i], :, ... :] + /// + /// # Higher rank indices + /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + /// + /// + /// If indices is a permutation and len(indices) == params.shape[0] then + /// this operation will permute params accordingly. + /// + /// validate_indices: DEPRECATED. If this operation is assigned to CPU, values in + /// indices are always validated to be within range. If assigned to GPU, + /// out-of-bound indices result in safe but unspecified behavior, which may include + /// raising an error. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> + /// </div> + /// + public static Tensor gather (Tensor parameters, Tensor indices, bool? validate_indices = null, string name = "Gather") + { + var dict = new Dictionary(); + dict["params"] = parameters; + dict["indices"] = indices; + if (validate_indices.HasValue) + dict["validate_indices"] = validate_indices.Value; + var op = _op_def_lib._apply_op_helper("Gather", name: name, keywords: dict); + return op.output; + } + + /// + /// Gather slices from params into a Tensor with shape specified by indices. + /// + /// + /// The tensor from which to gather values. + /// + /// + /// Index tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'GatherNd'. + /// + /// + /// Values from params gathered from indices given by indices, with + /// shape indices.shape[:-1] + params.shape[indices.shape[-1]:]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// indices is an K-dimensional integer tensor, best thought of as a + /// (K-1)-dimensional tensor of indices into params, where each element defines a + /// slice of params: + /// + /// output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] + /// + /// Whereas in tf.gather indices defines slices into the first + /// dimension of params, in tf.gather_nd, indices defines slices into the + /// first N dimensions of params, where N = indices.shape[-1]. + /// + /// The last dimension of indices can be at most the rank of + /// params: + /// + /// indices.shape[-1] <= params.rank + /// + /// The last dimension of indices corresponds to elements + /// (if indices.shape[-1] == params.rank) or slices + /// (if indices.shape[-1] < params.rank) along dimension indices.shape[-1] + /// of params. The output tensor has shape + /// + /// indices.shape[:-1] + params.shape[indices.shape[-1]:] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, a 0 is stored in the + /// corresponding output value. + /// + /// Some examples below. + /// + /// Simple indexing into a matrix: + /// + /// + /// indices = [[0, 0], [1, 1]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = ['a', 'd'] + /// + /// + /// Slice indexing into a matrix: + /// + /// + /// indices = [[1], [0]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [['c', 'd'], ['a', 'b']] + /// + /// + /// Indexing into a 3-tensor: + /// + /// + /// indices = [[1]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[['a1', 'b1'], ['c1', 'd1']]] + /// + /// + /// indices = [[0, 1], [1, 0]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [['c0', 'd0'], ['a1', 'b1']] + /// + /// + /// indices = [[0, 0, 1], [1, 0, 1]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = ['b0', 'b1'] + /// + /// + /// Batched indexing into a matrix: + /// + /// + /// indices = [[[0, 0]], [[0, 1]]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [['a'], ['b']] + /// + /// + /// Batched slice indexing into a matrix: + /// + /// + /// indices = [[[1]], [[0]]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [[['c', 'd']], [['a', 'b']]] + /// + /// + /// Batched indexing into a 3-tensor: + /// + /// + /// indices = [[[1]], [[0]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[[['a1', 'b1'], ['c1', 'd1']]], + /// [[['a0', 'b0'], ['c0', 'd0']]]] + /// + /// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[['c0', 'd0'], ['a1', 'b1']], + /// [['a0', 'b0'], ['c1', 'd1']]] + /// + /// + /// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [['b0', 'b1'], ['d0', 'c1']] + /// + /// + /// See also tf.gather and tf.batch_gather. + /// + public static Tensor gather_nd (Tensor parameters, Tensor indices, string name = "GatherNd") + { + var dict = new Dictionary(); + dict["params"] = parameters; + dict["indices"] = indices; + var op = _op_def_lib._apply_op_helper("GatherNd", name: name, keywords: dict); + return op.output; + } + + /// + /// Gather slices from params axis axis according to indices. + /// + /// + /// The tensor from which to gather values. Must be at least rank + /// axis + 1. + /// + /// + /// Index tensor. Must be in range [0, params.shape[axis]). + /// + /// + /// The axis in params to gather indices from. Defaults to the first + /// dimension. Supports negative indexes. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'GatherV2'. + /// + /// + /// Values from params gathered from indices given by indices, with + /// shape params.shape[:axis] + indices.shape + params.shape[axis + 1:]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// indices must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape params.shape[:axis] + indices.shape + + /// params.shape[axis + 1:] where: + /// + /// + /// # Scalar indices (output is rank(params) - 1). + /// output[a_0, ..., a_n, b_0, ..., b_n] = + /// params[a_0, ..., a_n, indices, b_0, ..., b_n] + /// + /// # Vector indices (output is rank(params)). + /// output[a_0, ..., a_n, i, b_0, ..., b_n] = + /// params[a_0, ..., a_n, indices[i], b_0, ..., b_n] + /// + /// # Higher rank indices (output is rank(params) + rank(indices) - 1). + /// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + /// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + /// + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> + /// </div> + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, a 0 is stored in the + /// corresponding output value. + /// + /// See also tf.batch_gather and tf.gather_nd. + /// + public static Tensor gather_v2 (Tensor parameters, Tensor indices, Tensor axis, string name = "GatherV2") + { + var dict = new Dictionary(); + dict["params"] = parameters; + dict["indices"] = indices; + dict["axis"] = axis; + var op = _op_def_lib._apply_op_helper("GatherV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Given a path to new and old vocabulary files, returns a remapping Tensor of + /// + /// + /// Path to the new vocab file. + /// + /// + /// Path to the old vocab file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'GenerateVocabRemapping'. + /// + /// + /// Optional argument + /// How many entries into the new vocab file to start reading. + /// + /// + /// Optional argument + /// Number of entries in the new vocab file to remap. + /// + /// + /// Number of entries in the old vocab file to consider. If -1, + /// use the entire old vocabulary. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// remapping : A Tensor of length num_new_vocab where the element at index i + /// is equal to the old ID that maps to the new ID i. This element is -1 for any + /// new ID that is not found in the old vocabulary. + /// num_present : Number of new vocab entries found in old vocab. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// length num_new_vocab, where remapping[i] contains the row number in the old + /// vocabulary that corresponds to row i in the new vocabulary (starting at line + /// new_vocab_offset and up to num_new_vocab entities), or -1 if entry i + /// in the new vocabulary is not in the old vocabulary. The old vocabulary is + /// constrained to the first old_vocab_size entries if old_vocab_size is not the + /// default value of -1. + /// + /// num_vocab_offset enables + /// use in the partitioned variable case, and should generally be set through + /// examining partitioning info. The format of the files should be a text file, + /// with each line containing a single entity within the vocabulary. + /// + /// For example, with new_vocab_file a text file containing each of the following + /// elements on a single line: [f0, f1, f2, f3], old_vocab_file = [f1, f0, f3], + /// num_new_vocab = 3, new_vocab_offset = 1, the returned remapping would be + /// [0, -1, 2]. + /// + /// The op also returns a count of how many entries in the new vocabulary + /// were present in the old vocabulary, which is used to calculate the number of + /// values to initialize in a weight matrix remapping + /// + /// This functionality can be used to remap both row vocabularies (typically, + /// features) and column vocabularies (typically, classes) from TensorFlow + /// checkpoints. Note that the partitioning logic relies on contiguous vocabularies + /// corresponding to div-partitioned variables. Moreover, the underlying remapping + /// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should + /// use the corresponding index_table_from_file() as the FeatureColumn framework + /// does (as opposed to tf.feature_to_id(), which uses a CuckooTable). + /// + public static (Tensor remapping, Tensor num_present) generate_vocab_remapping (Tensor new_vocab_file, Tensor old_vocab_file, int new_vocab_offset, int num_new_vocab, int? old_vocab_size = null, string name = "GenerateVocabRemapping") + { + var dict = new Dictionary(); + dict["new_vocab_file"] = new_vocab_file; + dict["old_vocab_file"] = old_vocab_file; + dict["new_vocab_offset"] = new_vocab_offset; + dict["num_new_vocab"] = num_new_vocab; + if (old_vocab_size.HasValue) + dict["old_vocab_size"] = old_vocab_size.Value; + var op = _op_def_lib._apply_op_helper("GenerateVocabRemapping", name: name, keywords: dict); + int _idx = 0; + var remapping = op.outputs[_idx++]; + var num_present = op.outputs[_idx++]; + return (remapping, num_present); + } + + /// + /// Store the input tensor in the state of the current session. + /// + /// + /// The tensor to be stored. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'GetSessionHandle'. + /// + /// + /// The handle for the tensor stored in the session state, represented + /// as a string. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor get_session_handle (Tensor value, string name = "GetSessionHandle") + { + var dict = new Dictionary(); + dict["value"] = value; + var op = _op_def_lib._apply_op_helper("GetSessionHandle", name: name, keywords: dict); + return op.output; + } + + /// + /// Store the input tensor in the state of the current session. + /// + /// + /// The tensor to be stored. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'GetSessionHandleV2'. + /// + /// + /// The handle for the tensor stored in the session state, represented + /// as a ResourceHandle object. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor get_session_handle_v2 (Tensor value, string name = "GetSessionHandleV2") + { + var dict = new Dictionary(); + dict["value"] = value; + var op = _op_def_lib._apply_op_helper("GetSessionHandleV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Get the value of the tensor specified by its handle. + /// + /// + /// The handle for a tensor stored in the session state. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'GetSessionTensor'. + /// + /// + /// Optional argument + /// The type of the output value. + /// + /// + /// The tensor for the given handle. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor get_session_tensor (Tensor handle, TF_DataType dtype, string name = "GetSessionTensor") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("GetSessionTensor", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of (x > y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Greater'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Greater supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor greater (Tensor x, Tensor y, string name = "Greater") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Greater", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of (x >= y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'GreaterEqual'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: GreaterEqual supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor greater_equal (Tensor x, Tensor y, string name = "GreaterEqual") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("GreaterEqual", name: name, keywords: dict); + return op.output; + } + + /// + /// Gives a guarantee to the TF runtime that the input tensor is a constant. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'GuaranteeConst'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The runtime is then free to make optimizations based on this. + /// + /// Only accepts value typed tensors as inputs and rejects resource variable handles + /// as input. + /// + /// Returns the input tensor without modification. + /// + public static Tensor guarantee_const (Tensor input, string name = "GuaranteeConst") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("GuaranteeConst", name: name, keywords: dict); + return op.output; + } + + /// + /// Convert one or more images from HSV to RGB. + /// + /// + /// 1-D or higher rank. HSV data to convert. Last dimension must be size 3. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'HSVToRGB'. + /// + /// + /// images converted to RGB. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Outputs a tensor of the same shape as the images tensor, containing the RGB + /// value of the pixels. The output is only well defined if the value in images + /// are in [0,1]. + /// + /// See rgb_to_hsv for a description of the HSV encoding. + /// + public static Tensor h_s_v_to_r_g_b (Tensor images, string name = "HSVToRGB") + { + var dict = new Dictionary(); + dict["images"] = images; + var op = _op_def_lib._apply_op_helper("HSVToRGB", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a non-initialized hash table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'HashTable'. + /// + /// + /// Optional argument + /// Type of the table keys. + /// + /// + /// Optional argument + /// Type of the table values. + /// + /// + /// If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this table is shared under the given name across + /// multiple sessions. + /// + /// + /// If true and shared_name is empty, the table is shared + /// using the node name. + /// + /// + /// Handle to a table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op creates a hash table, specifying the type of its keys and values. + /// Before using the table you will have to initialize it. After initialization the + /// table will be immutable. + /// + public static Tensor hash_table (TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, string name = "HashTable") + { + var dict = new Dictionary(); + dict["key_dtype"] = key_dtype; + dict["value_dtype"] = value_dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (use_node_name_sharing.HasValue) + dict["use_node_name_sharing"] = use_node_name_sharing.Value; + var op = _op_def_lib._apply_op_helper("HashTable", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a non-initialized hash table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'HashTableV2'. + /// + /// + /// Optional argument + /// Type of the table keys. + /// + /// + /// Optional argument + /// Type of the table values. + /// + /// + /// If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this table is shared under the given name across + /// multiple sessions. + /// + /// + /// If true and shared_name is empty, the table is shared + /// using the node name. + /// + /// + /// Handle to a table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op creates a hash table, specifying the type of its keys and values. + /// Before using the table you will have to initialize it. After initialization the + /// table will be immutable. + /// + public static Tensor hash_table_v2 (TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, string name = "HashTableV2") + { + var dict = new Dictionary(); + dict["key_dtype"] = key_dtype; + dict["value_dtype"] = value_dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (use_node_name_sharing.HasValue) + dict["use_node_name_sharing"] = use_node_name_sharing.Value; + var op = _op_def_lib._apply_op_helper("HashTableV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Return histogram of values. + /// + /// + /// Numeric Tensor. + /// + /// + /// Shape [2] Tensor of same dtype as values. + /// values <= value_range[0] will be mapped to hist[0], + /// values >= value_range[1] will be mapped to hist[-1]. + /// + /// + /// Scalar int32 Tensor. Number of histogram bins. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'HistogramFixedWidth'. + /// + /// + /// + /// + /// A 1-D Tensor holding histogram of values. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given the tensor values, this operation returns a rank 1 histogram counting + /// the number of entries in values that fall into every bin. The bins are + /// equal width and determined by the arguments value_range and nbins. + /// + /// + /// # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + /// nbins = 5 + /// value_range = [0.0, 5.0] + /// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + /// + /// with tf.get_default_session() as sess: + /// hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + /// variables.global_variables_initializer().run() + /// sess.run(hist) => [2, 1, 1, 0, 2] + /// + /// + public static Tensor histogram_fixed_width (Tensor values, Tensor value_range, Tensor nbins, TF_DataType? dtype = null, string name = "HistogramFixedWidth") + { + var dict = new Dictionary(); + dict["values"] = values; + dict["value_range"] = value_range; + dict["nbins"] = nbins; + if (dtype.HasValue) + dict["dtype"] = dtype.Value; + var op = _op_def_lib._apply_op_helper("HistogramFixedWidth", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs a Summary protocol buffer with a histogram. + /// + /// + /// Scalar. Tag to use for the Summary.Value. + /// + /// + /// Any shape. Values to use to build the histogram. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'HistogramSummary'. + /// + /// + /// Scalar. Serialized Summary protocol buffer. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The generated + /// [Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + /// has one summary value containing a histogram for values. + /// + /// This op reports an InvalidArgument error if any value is not finite. + /// + public static Tensor histogram_summary (Tensor tag, Tensor values, string name = "HistogramSummary") + { + var dict = new Dictionary(); + dict["tag"] = tag; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("HistogramSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a constant tensor on the host. Only for writing C++ tests. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'HostConst'. + /// + /// + /// Optional argument + /// Attr value is the tensor to return. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor host_const (Tensor value, TF_DataType dtype, string name = "HostConst") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("HostConst", name: name, keywords: dict); + return op.output; + } + + /// + /// Inverse fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IFFT'. + /// + /// + /// A complex64 tensor of the same shape as input. The inner-most + /// dimension of input is replaced with its inverse 1D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.ifft + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the inverse 1-dimensional discrete Fourier transform over the + /// inner-most dimension of input. + /// + public static Tensor i_f_f_t (Tensor input, string name = "IFFT") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("IFFT", name: name, keywords: dict); + return op.output; + } + + /// + /// Inverse 2D fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IFFT2D'. + /// + /// + /// A complex64 tensor of the same shape as input. The inner-most 2 + /// dimensions of input are replaced with their inverse 2D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.ifft2 + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the inverse 2-dimensional discrete Fourier transform over the + /// inner-most 2 dimensions of input. + /// + public static Tensor i_f_f_t2d (Tensor input, string name = "IFFT2D") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("IFFT2D", name: name, keywords: dict); + return op.output; + } + + /// + /// Inverse 3D fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IFFT3D'. + /// + /// + /// A complex64 tensor of the same shape as input. The inner-most 3 + /// dimensions of input are replaced with their inverse 3D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.ifftn with 3 dimensions. + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the inverse 3-dimensional discrete Fourier transform over the + /// inner-most 3 dimensions of input. + /// + public static Tensor i_f_f_t3d (Tensor input, string name = "IFFT3D") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("IFFT3D", name: name, keywords: dict); + return op.output; + } + + /// + /// Inverse real-valued fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// An int32 tensor of shape [1]. The FFT length. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IRFFT'. + /// + /// + /// A float32 tensor of the same rank as input. The inner-most + /// dimension of input is replaced with the fft_length samples of its inverse + /// 1D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.irfft + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + /// signal over the inner-most dimension of input. + /// + /// The inner-most dimension of input is assumed to be the result of RFFT: the + /// fft_length / 2 + 1 unique components of the DFT of a real-valued signal. If + /// fft_length is not provided, it is computed from the size of the inner-most + /// dimension of input (fft_length = 2 * (inner - 1)). If the FFT length used to + /// compute input is odd, it should be provided since it cannot be inferred + /// properly. + /// + /// Along the axis IRFFT is computed on, if fft_length / 2 + 1 is smaller + /// than the corresponding dimension of input, the dimension is cropped. If it is + /// larger, the dimension is padded with zeros. + /// + public static Tensor i_r_f_f_t (Tensor input, Tensor fft_length, string name = "IRFFT") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["fft_length"] = fft_length; + var op = _op_def_lib._apply_op_helper("IRFFT", name: name, keywords: dict); + return op.output; + } + + /// + /// Inverse 2D real-valued fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// An int32 tensor of shape [2]. The FFT length for each dimension. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IRFFT2D'. + /// + /// + /// A float32 tensor of the same rank as input. The inner-most 2 + /// dimensions of input are replaced with the fft_length samples of their + /// inverse 2D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.irfft2 + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + /// signal over the inner-most 2 dimensions of input. + /// + /// The inner-most 2 dimensions of input are assumed to be the result of RFFT2D: + /// The inner-most dimension contains the fft_length / 2 + 1 unique components of + /// the DFT of a real-valued signal. If fft_length is not provided, it is computed + /// from the size of the inner-most 2 dimensions of input. If the FFT length used + /// to compute input is odd, it should be provided since it cannot be inferred + /// properly. + /// + /// Along each axis IRFFT2D is computed on, if fft_length (or + /// fft_length / 2 + 1 for the inner-most dimension) is smaller than the + /// corresponding dimension of input, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + public static Tensor i_r_f_f_t2d (Tensor input, Tensor fft_length, string name = "IRFFT2D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["fft_length"] = fft_length; + var op = _op_def_lib._apply_op_helper("IRFFT2D", name: name, keywords: dict); + return op.output; + } + + /// + /// Inverse 3D real-valued fast Fourier transform. + /// + /// + /// A complex64 tensor. + /// + /// + /// An int32 tensor of shape [3]. The FFT length for each dimension. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IRFFT3D'. + /// + /// + /// A float32 tensor of the same rank as input. The inner-most 3 + /// dimensions of input are replaced with the fft_length samples of their + /// inverse 3D real Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.irfftn with 3 dimensions. + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + /// signal over the inner-most 3 dimensions of input. + /// + /// The inner-most 3 dimensions of input are assumed to be the result of RFFT3D: + /// The inner-most dimension contains the fft_length / 2 + 1 unique components of + /// the DFT of a real-valued signal. If fft_length is not provided, it is computed + /// from the size of the inner-most 3 dimensions of input. If the FFT length used + /// to compute input is odd, it should be provided since it cannot be inferred + /// properly. + /// + /// Along each axis IRFFT3D is computed on, if fft_length (or + /// fft_length / 2 + 1 for the inner-most dimension) is smaller than the + /// corresponding dimension of input, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + public static Tensor i_r_f_f_t3d (Tensor input, Tensor fft_length, string name = "IRFFT3D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["fft_length"] = fft_length; + var op = _op_def_lib._apply_op_helper("IRFFT3D", name: name, keywords: dict); + return op.output; + } + + /// + /// Return a tensor with the same shape and contents as the input tensor or value. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Identity'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor identity (Tensor input, string name = "Identity") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("Identity", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a list of tensors with the same shapes and contents as the input + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IdentityN'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// tensors. + /// + /// This op can be used to override the gradient for complicated functions. For + /// example, suppose y = f(x) and we wish to apply a custom function g for backprop + /// such that dx = g(dy). In Python, + /// + /// + /// with tf.get_default_graph().gradient_override_map( + /// {'IdentityN': 'OverrideGradientWithG'}): + /// y, _ = identity_n([f(x), x]) + /// + /// @tf.RegisterGradient('OverrideGradientWithG') + /// def ApplyG(op, dy, _): + /// return [None, g(dy)] # Do not backprop to f(x). + /// + /// + public static Tensor[] identity_n (Tensor[] input, string name = "IdentityN") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("IdentityN", name: name, keywords: dict); + int _idx = 0; + var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray(); + return (output); + } + + /// + /// A Reader that outputs the queued work as both the key and value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IdentityReader'. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// To use, enqueue strings in a Queue. ReaderRead will take the front + /// work string and output (work, work). + /// + public static Tensor identity_reader (string container = null, string shared_name = null, string name = "IdentityReader") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("IdentityReader", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs the queued work as both the key and value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IdentityReaderV2'. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// To use, enqueue strings in a Queue. ReaderRead will take the front + /// work string and output (work, work). + /// + public static Tensor identity_reader_v2 (string container = null, string shared_name = null, string name = "IdentityReaderV2") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("IdentityReaderV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute the lower regularized incomplete Gamma function P(a, x). + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Igamma'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The lower regularized incomplete Gamma function is defined as: + /// + /// + /// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) + /// + /// where + /// + /// \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) + /// + /// is the lower incomplete Gamma function. + /// + /// Note, above Q(a, x) (Igammac) is the upper regularized complete + /// Gamma function. + /// + public static Tensor igamma (Tensor a, Tensor x, string name = "Igamma") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Igamma", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of igamma(a, x) wrt a. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IgammaGradA'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor igamma_grad_a (Tensor a, Tensor x, string name = "IgammaGradA") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("IgammaGradA", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute the upper regularized incomplete Gamma function Q(a, x). + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Igammac'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The upper regularized incomplete Gamma function is defined as: + /// + /// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) + /// + /// where + /// + /// \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\) + /// + /// is the upper incomplete Gama function. + /// + /// Note, above P(a, x) (Igamma) is the lower regularized complete + /// Gamma function. + /// + public static Tensor igammac (Tensor a, Tensor x, string name = "Igammac") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Igammac", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the imaginary part of a complex number. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Imag'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor input of complex numbers, this operation returns a tensor of + /// type float that is the imaginary part of each element in input. All + /// elements in input must be complex numbers of the form \\(a + bj\\), where *a* + /// is the real part and *b* is the imaginary part returned by this operation. + /// + /// For example: + /// + /// + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.imag(input) ==> [4.75, 5.75] + /// + /// + public static Tensor imag (Tensor input, TF_DataType? Tout = null, string name = "Imag") + { + var dict = new Dictionary(); + dict["input"] = input; + if (Tout.HasValue) + dict["Tout"] = Tout.Value; + var op = _op_def_lib._apply_op_helper("Imag", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs a Summary protocol buffer with images. + /// + /// + /// Scalar. Used to build the tag attribute of the summary values. + /// + /// + /// 4-D of shape [batch_size, height, width, channels] where + /// channels is 1, 3, or 4. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ImageSummary'. + /// + /// + /// Max number of batch elements to generate images for. + /// + /// + /// Color to use for pixels with non-finite values. + /// + /// + /// Scalar. Serialized Summary protocol buffer. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The summary has up to max_images summary values containing images. The + /// images are built from tensor which must be 4-D with shape [batch_size, + /// height, width, channels] and where channels can be: + /// + /// * 1: tensor is interpreted as Grayscale. + /// * 3: tensor is interpreted as RGB. + /// * 4: tensor is interpreted as RGBA. + /// + /// The images have the same number of channels as the input tensor. For float + /// input, the values are normalized one image at a time to fit in the range + /// [0, 255]. uint8 values are unchanged. The op uses two different + /// normalization algorithms: + /// + /// * If the input values are all positive, they are rescaled so the largest one + /// is 255. + /// + /// * If any input value is negative, the values are shifted so input value 0.0 + /// is at 127. They are then rescaled so that either the smallest value is 0, + /// or the largest one is 255. + /// + /// The tag argument is a scalar Tensor of type string. It is used to + /// build the tag of the summary values: + /// + /// * If max_images is 1, the summary value tag is '*tag*/image'. + /// * If max_images is greater than 1, the summary value tags are + /// generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. + /// + /// The bad_color argument is the color to use in the generated images for + /// non-finite input values. It is a uint8 1-D tensor of length channels. + /// Each element must be in the range [0, 255] (It represents the value of a + /// pixel in the output image). Non-finite values in the input tensor are + /// replaced by this tensor in the output image. The default value is the color + /// red. + /// + public static Tensor image_summary (Tensor tag, Tensor tensor, int? max_images = null, Tensor bad_color = null, string name = "ImageSummary") + { + var dict = new Dictionary(); + dict["tag"] = tag; + dict["tensor"] = tensor; + if (max_images.HasValue) + dict["max_images"] = max_images.Value; + if (bad_color != null) + dict["bad_color"] = bad_color; + var op = _op_def_lib._apply_op_helper("ImageSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns immutable tensor from memory region. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ImmutableConst'. + /// + /// + /// Optional argument + /// Type of the returned tensor. + /// + /// + /// Optional argument + /// Shape of the returned tensor. + /// + /// + /// Optional argument + /// Name of readonly memory region used by the tensor, see + /// NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The current implementation memmaps the tensor from a file. + /// + public static Tensor immutable_const (TF_DataType dtype, TensorShape shape, string memory_region_name, string name = "ImmutableConst") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + dict["shape"] = shape; + dict["memory_region_name"] = memory_region_name; + var op = _op_def_lib._apply_op_helper("ImmutableConst", name: name, keywords: dict); + return op.output; + } + + /// + /// Says whether the targets are in the top K predictions. + /// + /// + /// A batch_size x classes tensor. + /// + /// + /// A batch_size vector of class ids. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InTopK'. + /// + /// + /// Optional argument + /// Number of top elements to look at for computing precision. + /// + /// + /// Computed Precision at k as a bool Tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This outputs a batch_size bool array, an entry out[i] is true if the + /// prediction for the target class is among the top k predictions among + /// all predictions for example i. Note that the behavior of InTopK differs + /// from the TopK op in its handling of ties; if multiple classes have the + /// same prediction value and straddle the top-k boundary, all of those + /// classes are considered to be in the top k. + /// + /// More formally, let + /// + /// \\(predictions_i\\) be the predictions for all classes for example i, + /// \\(targets_i\\) be the target class for example i, + /// \\(out_i\\) be the output for example i, + /// + /// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + /// + public static Tensor in_top_k (Tensor predictions, Tensor targets, int k, string name = "InTopK") + { + var dict = new Dictionary(); + dict["predictions"] = predictions; + dict["targets"] = targets; + dict["k"] = k; + var op = _op_def_lib._apply_op_helper("InTopK", name: name, keywords: dict); + return op.output; + } + + /// + /// Says whether the targets are in the top K predictions. + /// + /// + /// A batch_size x classes tensor. + /// + /// + /// A batch_size vector of class ids. + /// + /// + /// Number of top elements to look at for computing precision. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InTopKV2'. + /// + /// + /// Computed precision at k as a bool Tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This outputs a batch_size bool array, an entry out[i] is true if the + /// prediction for the target class is among the top k predictions among + /// all predictions for example i. Note that the behavior of InTopK differs + /// from the TopK op in its handling of ties; if multiple classes have the + /// same prediction value and straddle the top-k boundary, all of those + /// classes are considered to be in the top k. + /// + /// More formally, let + /// + /// \\(predictions_i\\) be the predictions for all classes for example i, + /// \\(targets_i\\) be the target class for example i, + /// \\(out_i\\) be the output for example i, + /// + /// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + /// + public static Tensor in_top_k_v2 (Tensor predictions, Tensor targets, Tensor k, string name = "InTopKV2") + { + var dict = new Dictionary(); + dict["predictions"] = predictions; + dict["targets"] = targets; + dict["k"] = k; + var op = _op_def_lib._apply_op_helper("InTopKV2", name: name, keywords: dict); + return op.output; + } + + /// + /// A placeholder op for a value that will be fed into the computation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InfeedDequeue'. + /// + /// + /// Optional argument + /// The type of elements in the tensor. + /// + /// + /// Optional argument + /// The shape of the tensor. + /// + /// + /// A tensor that will be provided using the infeed mechanism. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor infeed_dequeue (TF_DataType dtype, TensorShape shape, string name = "InfeedDequeue") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("InfeedDequeue", name: name, keywords: dict); + return op.output; + } + + /// + /// A placeholder op for multiple values that will be fed into the computation + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InfeedDequeueTuple'. + /// + /// + /// Optional argument + /// The element types of each element in outputs. + /// + /// + /// Optional argument + /// The shapes of each tensor in outputs. + /// + /// + /// A list of tensors that will be provided using the infeed mechanism. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// simultaneously as an XLA tuple. + /// + public static Tensor[] infeed_dequeue_tuple (TF_DataType[] dtypes, TensorShape[] shapes, string name = "InfeedDequeueTuple") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + dict["shapes"] = shapes; + var op = _op_def_lib._apply_op_helper("InfeedDequeueTuple", name: name, keywords: dict); + int _idx = 0; + var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray(); + return (outputs); + } + + /// + /// An op which feeds a single Tensor value into the computation. + /// + /// + /// A tensor that will be provided using the infeed mechanism. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InfeedEnqueue'. + /// + /// + /// The shape of the tensor. + /// + /// + /// The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + /// + /// + /// Returns the description of the operation + /// + public static Operation infeed_enqueue (Tensor input, TensorShape shape = null, int? device_ordinal = null, string name = "InfeedEnqueue") + { + var dict = new Dictionary(); + dict["input"] = input; + if (shape != null) + dict["shape"] = shape; + if (device_ordinal.HasValue) + dict["device_ordinal"] = device_ordinal.Value; + var op = _op_def_lib._apply_op_helper("InfeedEnqueue", name: name, keywords: dict); + return op; + } + + /// + /// An op which feeds multiple Tensor values into the computation as an XLA tuple. + /// + /// + /// A list of tensors that will be provided using the infeed mechanism. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InfeedEnqueueTuple'. + /// + /// + /// Optional argument + /// The shapes of each tensor in inputs. + /// + /// + /// The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + /// + /// + /// Returns the description of the operation + /// + public static Operation infeed_enqueue_tuple (Tensor[] inputs, TensorShape[] shapes, int? device_ordinal = null, string name = "InfeedEnqueueTuple") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["shapes"] = shapes; + if (device_ordinal.HasValue) + dict["device_ordinal"] = device_ordinal.Value; + var op = _op_def_lib._apply_op_helper("InfeedEnqueueTuple", name: name, keywords: dict); + return op; + } + + /// + /// Table initializer that takes two tensors for keys and values respectively. + /// + /// + /// Handle to a table which will be initialized. + /// + /// + /// Keys of type Tkey. + /// + /// + /// Values of type Tval. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InitializeTable'. + /// + /// + /// Returns the description of the operation + /// + public static Operation initialize_table (Tensor table_handle, Tensor keys, Tensor values, string name = "InitializeTable") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["keys"] = keys; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("InitializeTable", name: name, keywords: dict); + return op; + } + + /// + /// Initializes a table from a text file. + /// + /// + /// Handle to a table which will be initialized. + /// + /// + /// Filename of a vocabulary text file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InitializeTableFromTextFile'. + /// + /// + /// Optional argument + /// Column index in a line to get the table key values from. + /// + /// + /// Optional argument + /// Column index that represents information of a line to get the table + /// value values from. + /// + /// + /// Number of elements of the file, use -1 if unknown. + /// + /// + /// Delimiter to separate fields in a line. + /// + /// + /// Returns the description of the operation + /// + /// + /// It inserts one key-value pair into the table for each line of the file. + /// The key and value is extracted from the whole line content, elements from the + /// split line based on delimiter or the line number (starting from zero). + /// Where to extract the key and value from a line is specified by key_index and + /// value_index. + /// + /// - A value of -1 means use the line number(starting from zero), expects int64. + /// - A value of -2 means use the whole line content, expects string. + /// - A value >= 0 means use the index (starting at zero) of the split line based + /// on delimiter. + /// + public static Operation initialize_table_from_text_file (Tensor table_handle, Tensor filename, int key_index, int value_index, int? vocab_size = null, string delimiter = null, string name = "InitializeTableFromTextFile") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["filename"] = filename; + dict["key_index"] = key_index; + dict["value_index"] = value_index; + if (vocab_size.HasValue) + dict["vocab_size"] = vocab_size.Value; + if (delimiter != null) + dict["delimiter"] = delimiter; + var op = _op_def_lib._apply_op_helper("InitializeTableFromTextFile", name: name, keywords: dict); + return op; + } + + /// + /// Initializes a table from a text file. + /// + /// + /// Handle to a table which will be initialized. + /// + /// + /// Filename of a vocabulary text file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InitializeTableFromTextFileV2'. + /// + /// + /// Optional argument + /// Column index in a line to get the table key values from. + /// + /// + /// Optional argument + /// Column index that represents information of a line to get the table + /// value values from. + /// + /// + /// Number of elements of the file, use -1 if unknown. + /// + /// + /// Delimiter to separate fields in a line. + /// + /// + /// Returns the description of the operation + /// + /// + /// It inserts one key-value pair into the table for each line of the file. + /// The key and value is extracted from the whole line content, elements from the + /// split line based on delimiter or the line number (starting from zero). + /// Where to extract the key and value from a line is specified by key_index and + /// value_index. + /// + /// - A value of -1 means use the line number(starting from zero), expects int64. + /// - A value of -2 means use the whole line content, expects string. + /// - A value >= 0 means use the index (starting at zero) of the split line based + /// on delimiter. + /// + public static Operation initialize_table_from_text_file_v2 (Tensor table_handle, Tensor filename, int key_index, int value_index, int? vocab_size = null, string delimiter = null, string name = "InitializeTableFromTextFileV2") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["filename"] = filename; + dict["key_index"] = key_index; + dict["value_index"] = value_index; + if (vocab_size.HasValue) + dict["vocab_size"] = vocab_size.Value; + if (delimiter != null) + dict["delimiter"] = delimiter; + var op = _op_def_lib._apply_op_helper("InitializeTableFromTextFileV2", name: name, keywords: dict); + return op; + } + + /// + /// Table initializer that takes two tensors for keys and values respectively. + /// + /// + /// Handle to a table which will be initialized. + /// + /// + /// Keys of type Tkey. + /// + /// + /// Values of type Tval. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InitializeTableV2'. + /// + /// + /// Returns the description of the operation + /// + public static Operation initialize_table_v2 (Tensor table_handle, Tensor keys, Tensor values, string name = "InitializeTableV2") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["keys"] = keys; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("InitializeTableV2", name: name, keywords: dict); + return op; + } + + /// + /// Adds v into specified rows of x. + /// + /// Computes y = x; y[i, :] += v; return y. + /// + /// + /// A Tensor of type T. + /// + /// + /// A vector. Indices into the left-most dimension of x. + /// + /// + /// A Tensor of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InplaceAdd'. + /// + /// + /// A Tensor of type T. An alias of x. The content of y is undefined if there are duplicates in i. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor inplace_add (Tensor x, Tensor i, Tensor v, string name = "InplaceAdd") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["i"] = i; + dict["v"] = v; + var op = _op_def_lib._apply_op_helper("InplaceAdd", name: name, keywords: dict); + return op.output; + } + + /// + /// Subtracts v into specified rows of x. + /// + /// Computes y = x; y[i, :] -= v; return y. + /// + /// + /// A Tensor of type T. + /// + /// + /// A vector. Indices into the left-most dimension of x. + /// + /// + /// A Tensor of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InplaceSub'. + /// + /// + /// A Tensor of type T. An alias of x. The content of y is undefined if there are duplicates in i. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor inplace_sub (Tensor x, Tensor i, Tensor v, string name = "InplaceSub") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["i"] = i; + dict["v"] = v; + var op = _op_def_lib._apply_op_helper("InplaceSub", name: name, keywords: dict); + return op.output; + } + + /// + /// Updates specified rows with values in v. + /// + /// Computes x[i, :] = v; return x. + /// + /// + /// A tensor of type T. + /// + /// + /// A vector. Indices into the left-most dimension of x. + /// + /// + /// A Tensor of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InplaceUpdate'. + /// + /// + /// A Tensor of type T. An alias of x. The content of y is undefined if there are duplicates in i. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor inplace_update (Tensor x, Tensor i, Tensor v, string name = "InplaceUpdate") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["i"] = i; + dict["v"] = v; + var op = _op_def_lib._apply_op_helper("InplaceUpdate", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the reciprocal of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Inv'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = 1 / x\\). + /// + public static Tensor inv (Tensor x, string name = "Inv") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Inv", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient for the inverse of x wrt its input. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InvGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, grad = -dy * y*y, where y = 1/x, and dy + /// is the corresponding input gradient. + /// + public static Tensor inv_grad (Tensor y, Tensor dy, string name = "InvGrad") + { + var dict = new Dictionary(); + dict["y"] = y; + dict["dy"] = dy; + var op = _op_def_lib._apply_op_helper("InvGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Flips all bits elementwise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Invert'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The result will have exactly those bits set, that are not set in x. The + /// computation is performed on the underlying representation of x. + /// + public static Tensor invert (Tensor x, string name = "Invert") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Invert", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the inverse permutation of a tensor. + /// + /// + /// 1-D. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'InvertPermutation'. + /// + /// + /// 1-D. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation computes the inverse of an index permutation. It takes a 1-D + /// integer tensor x, which represents the indices of a zero-based array, and + /// swaps each value with its index position. In other words, for an output tensor + /// y and an input tensor x, this operation computes the following: + /// + /// y[x[i]] = i for i in [0, 1, ..., len(x) - 1] + /// + /// The values must include 0. There can be no duplicate values or negative values. + /// + /// For example: + /// + /// + /// # tensor x is [3, 4, 0, 2, 1] + /// invert_permutation(x) ==> [2, 4, 3, 0, 1] + /// + /// + public static Tensor invert_permutation (Tensor x, string name = "InvertPermutation") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("InvertPermutation", name: name, keywords: dict); + return op.output; + } + + /// + /// Checks whether a tree ensemble has been initialized. + /// + /// + /// Handle to the tree ensemble resouce. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IsBoostedTreesEnsembleInitialized'. + /// + /// + /// output boolean on whether it is initialized or not. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor is_boosted_trees_ensemble_initialized (Tensor tree_ensemble_handle, string name = "IsBoostedTreesEnsembleInitialized") + { + var dict = new Dictionary(); + dict["tree_ensemble_handle"] = tree_ensemble_handle; + var op = _op_def_lib._apply_op_helper("IsBoostedTreesEnsembleInitialized", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns which elements of x are finite. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IsFinite'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isfinite + /// @end_compatibility + /// + public static Tensor is_finite (Tensor x, string name = "IsFinite") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("IsFinite", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns which elements of x are Inf. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IsInf'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isinf + /// @end_compatibility + /// + public static Tensor is_inf (Tensor x, string name = "IsInf") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("IsInf", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns which elements of x are NaN. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IsNan'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isnan + /// @end_compatibility + /// + public static Tensor is_nan (Tensor x, string name = "IsNan") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("IsNan", name: name, keywords: dict); + return op.output; + } + + /// + /// Checks whether a tensor has been initialized. + /// + /// + /// Should be from a Variable node. May be uninitialized. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IsVariableInitialized'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Outputs boolean scalar indicating whether the tensor has been initialized. + /// + public static Tensor is_variable_initialized (Tensor referecne, string name = "IsVariableInitialized") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + var op = _op_def_lib._apply_op_helper("IsVariableInitialized", name: name, keywords: dict); + return op.output; + } + + /// + /// A container for an iterator resource. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Iterator'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// A handle to the iterator that can be passed to a "MakeIterator" + /// or "IteratorGetNext" op. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor iterator (string shared_name, string container, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "Iterator") + { + var dict = new Dictionary(); + dict["shared_name"] = shared_name; + dict["container"] = container; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("Iterator", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts the given string representing a handle to an iterator to a resource. + /// + /// + /// A string representation of the given handle. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IteratorFromStringHandle'. + /// + /// + /// If specified, defines the type of each tuple component in an + /// element produced by the resulting iterator. + /// + /// + /// If specified, defines the shape of each tuple component in an + /// element produced by the resulting iterator. + /// + /// + /// A handle to an iterator resource. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor iterator_from_string_handle (Tensor string_handle, TF_DataType[] output_types = null, TensorShape[] output_shapes = null, string name = "IteratorFromStringHandle") + { + var dict = new Dictionary(); + dict["string_handle"] = string_handle; + if (output_types != null) + dict["output_types"] = output_types; + if (output_shapes != null) + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("IteratorFromStringHandle", name: name, keywords: dict); + return op.output; + } + + /// + /// Gets the next output from the given iterator . + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IteratorGetNext'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor[] iterator_get_next (Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "IteratorGetNext") + { + var dict = new Dictionary(); + dict["iterator"] = iterator; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("IteratorGetNext", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Gets the next output from the given iterator as an Optional variant. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IteratorGetNextAsOptional'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor iterator_get_next_as_optional (Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "IteratorGetNextAsOptional") + { + var dict = new Dictionary(); + dict["iterator"] = iterator; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("IteratorGetNextAsOptional", name: name, keywords: dict); + return op.output; + } + + /// + /// Gets the next output from the given iterator. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IteratorGetNextSync'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation is a synchronous version IteratorGetNext. It should only be used + /// in situations where the iterator does not block the calling thread, or where + /// the calling thread is not a member of the thread pool used to execute parallel + /// operations (e.g. in eager mode). + /// + public static Tensor[] iterator_get_next_sync (Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "IteratorGetNextSync") + { + var dict = new Dictionary(); + dict["iterator"] = iterator; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("IteratorGetNextSync", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Converts the given resource_handle representing an iterator to a string. + /// + /// + /// A handle to an iterator resource. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'IteratorToStringHandle'. + /// + /// + /// A string representation of the given handle. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor iterator_to_string_handle (Tensor resource_handle, string name = "IteratorToStringHandle") + { + var dict = new Dictionary(); + dict["resource_handle"] = resource_handle; + var op = _op_def_lib._apply_op_helper("IteratorToStringHandle", name: name, keywords: dict); + return op.output; + } + + /// + /// L2 Loss. + /// + /// + /// Typically 2-D, but may have any dimensions. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'L2Loss'. + /// + /// + /// 0-D. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes half the L2 norm of a tensor without the sqrt: + /// + /// output = sum(t ** 2) / 2 + /// + public static Tensor l2loss (Tensor t, string name = "L2Loss") + { + var dict = new Dictionary(); + dict["t"] = t; + var op = _op_def_lib._apply_op_helper("L2Loss", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs the records from a LMDB file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LMDBReader'. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor l_m_d_b_reader (string container = null, string shared_name = null, string name = "LMDBReader") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("LMDBReader", name: name, keywords: dict); + return op.output; + } + + /// + /// Local Response Normalization. + /// + /// + /// 4-D. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LRN'. + /// + /// + /// 0-D. Half-width of the 1-D normalization window. + /// + /// + /// An offset (usually positive to avoid dividing by 0). + /// + /// + /// A scale factor, usually positive. + /// + /// + /// An exponent. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last + /// dimension), and each vector is normalized independently. Within a given vector, + /// each component is divided by the weighted, squared sum of inputs within + /// depth_radius. In detail, + /// + /// sqr_sum[a, b, c, d] = + /// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + /// output = input / (bias + alpha * sqr_sum) ** beta + /// + /// For details, see [Krizhevsky et al., ImageNet classification with deep + /// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). + /// + public static Tensor l_r_n (Tensor input, int? depth_radius = null, float? bias = null, float? alpha = null, float? beta = null, string name = "LRN") + { + var dict = new Dictionary(); + dict["input"] = input; + if (depth_radius.HasValue) + dict["depth_radius"] = depth_radius.Value; + if (bias.HasValue) + dict["bias"] = bias.Value; + if (alpha.HasValue) + dict["alpha"] = alpha.Value; + if (beta.HasValue) + dict["beta"] = beta.Value; + var op = _op_def_lib._apply_op_helper("LRN", name: name, keywords: dict); + return op.output; + } + + /// + /// Gradients for Local Response Normalization. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LRNGrad'. + /// + /// + /// A depth radius. + /// + /// + /// An offset (usually > 0 to avoid dividing by 0). + /// + /// + /// A scale factor, usually positive. + /// + /// + /// An exponent. + /// + /// + /// The gradients for LRN. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor l_r_n_grad (Tensor input_grads, Tensor input_image, Tensor output_image, int? depth_radius = null, float? bias = null, float? alpha = null, float? beta = null, string name = "LRNGrad") + { + var dict = new Dictionary(); + dict["input_grads"] = input_grads; + dict["input_image"] = input_image; + dict["output_image"] = output_image; + if (depth_radius.HasValue) + dict["depth_radius"] = depth_radius.Value; + if (bias.HasValue) + dict["bias"] = bias.Value; + if (alpha.HasValue) + dict["alpha"] = alpha.Value; + if (beta.HasValue) + dict["beta"] = beta.Value; + var op = _op_def_lib._apply_op_helper("LRNGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Records the latency of producing input_dataset elements in a StatsAggregator. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LatencyStatsDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor latency_stats_dataset (Tensor input_dataset, Tensor tag, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "LatencyStatsDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["tag"] = tag; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("LatencyStatsDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Generates labels for candidate sampling with a learned unigram distribution. + /// + /// + /// A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LearnedUnigramCandidateSampler'. + /// + /// + /// Optional argument + /// Number of true labels per context. + /// + /// + /// Optional argument + /// Number of candidates to randomly sample. + /// + /// + /// Optional argument + /// If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// + /// + /// Optional argument + /// The sampler will sample integers from the interval [0, range_max). + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sampled_candidates : A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// true_expected_count : A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// sampled_expected_count : A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + public static (Tensor sampled_candidates, Tensor true_expected_count, Tensor sampled_expected_count) learned_unigram_candidate_sampler (Tensor true_classes, int num_true, int num_sampled, bool unique, int range_max, int? seed = null, int? seed2 = null, string name = "LearnedUnigramCandidateSampler") + { + var dict = new Dictionary(); + dict["true_classes"] = true_classes; + dict["num_true"] = num_true; + dict["num_sampled"] = num_sampled; + dict["unique"] = unique; + dict["range_max"] = range_max; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("LearnedUnigramCandidateSampler", name: name, keywords: dict); + int _idx = 0; + var sampled_candidates = op.outputs[_idx++]; + var true_expected_count = op.outputs[_idx++]; + var sampled_expected_count = op.outputs[_idx++]; + return (sampled_candidates, true_expected_count, sampled_expected_count); + } + + /// + /// Elementwise computes the bitwise left-shift of x and y. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LeftShift'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// If y is negative, or greater than or equal to the width of x in bits the + /// result is implementation defined. + /// + public static Tensor left_shift (Tensor x, Tensor y, string name = "LeftShift") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("LeftShift", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of (x < y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Less'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Less supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor less (Tensor x, Tensor y, string name = "Less") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Less", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of (x <= y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LessEqual'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: LessEqual supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor less_equal (Tensor x, Tensor y, string name = "LessEqual") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("LessEqual", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the log of the absolute value of Gamma(x) element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Lgamma'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor lgamma (Tensor x, string name = "Lgamma") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Lgamma", name: name, keywords: dict); + return op.output; + } + + /// + /// Generates values in an interval. + /// + /// + /// 0-D tensor. First entry in the range. + /// + /// + /// 0-D tensor. Last entry in the range. + /// + /// + /// 0-D tensor. Number of values to generate. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LinSpace'. + /// + /// + /// 1-D. The generated values. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// A sequence of num evenly-spaced values are generated beginning at start. + /// If num > 1, the values in the sequence increase by stop - start / num - 1, + /// so that the last one is exactly stop. + /// + /// For example: + /// + /// + /// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] + /// + /// + public static Tensor lin_space (Tensor start, Tensor stop, Tensor num, string name = "LinSpace") + { + var dict = new Dictionary(); + dict["start"] = start; + dict["stop"] = stop; + dict["num"] = num; + var op = _op_def_lib._apply_op_helper("LinSpace", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the difference between two lists of numbers or strings. + /// + /// + /// 1-D. Values to keep. + /// + /// + /// 1-D. Values to remove. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ListDiff'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : 1-D. Values present in x but not in y. + /// idx : 1-D. Positions of x values preserved in out. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Given a list x and a list y, this operation returns a list out that + /// represents all values that are in x but not in y. The returned list out + /// is sorted in the same order that the numbers appear in x (duplicates are + /// preserved). This operation also returns a list idx that represents the + /// position of each out element in x. In other words: + /// + /// out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1] + /// + /// For example, given this input: + /// + /// + /// x = [1, 2, 3, 4, 5, 6] + /// y = [1, 3, 5] + /// + /// + /// This operation would return: + /// + /// + /// out ==> [2, 4, 6] + /// idx ==> [1, 3, 5] + /// + /// + public static (Tensor output, Tensor idx) list_diff (Tensor x, Tensor y, TF_DataType? out_idx = null, string name = "ListDiff") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + if (out_idx.HasValue) + dict["out_idx"] = out_idx.Value; + var op = _op_def_lib._apply_op_helper("ListDiff", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var idx = op.outputs[_idx++]; + return (output, idx); + } + + /// + /// Loads a 2-D (matrix) Tensor with name old_tensor_name from the checkpoint + /// + /// + /// Path to the TensorFlow checkpoint (version 2, TensorBundle) from + /// which the old matrix Tensor will be loaded. + /// + /// + /// Name of the 2-D Tensor to load from checkpoint. + /// + /// + /// An int Tensor of row remappings (generally created by + /// generate_vocab_remapping). Even if no row remapping is needed, this must + /// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted + /// index-valued Tensor (e.g. [8, 9, 10, ...], for partitioned Variables). + /// + /// + /// An int Tensor of column remappings (generally created by + /// generate_vocab_remapping). May be a size-0 Tensor if only row remapping + /// is to be done (e.g. column ordering is the same). + /// + /// + /// A float Tensor containing values to fill in for cells + /// in the output matrix that are not loaded from the checkpoint. Length must be + /// exactly the same as the number of missing / new cells. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LoadAndRemapMatrix'. + /// + /// + /// Optional argument + /// Number of rows (length of the 1st dimension) in the output matrix. + /// + /// + /// Optional argument + /// Number of columns (length of the 2nd dimension) in the output matrix. + /// + /// + /// The maximum number of rows to load from the checkpoint at + /// once. If less than or equal to 0, the entire matrix will be loaded into + /// memory. Setting this arg trades increased disk reads for lower memory usage. + /// + /// + /// Output matrix containing existing values loaded from the + /// checkpoint, and with any missing values filled in from initializing_values. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// at ckpt_path and potentially reorders its rows and columns using the + /// specified remappings. + /// + /// Most users should use one of the wrapper initializers (such as + /// tf.contrib.framework.load_and_remap_matrix_initializer) instead of this + /// function directly. + /// + /// The remappings are 1-D tensors with the following properties: + /// + /// * row_remapping must have exactly num_rows entries. Row i of the output + /// matrix will be initialized from the row corresponding to index + /// row_remapping[i] in the old Tensor from the checkpoint. + /// * col_remapping must have either 0 entries (indicating that no column + /// reordering is needed) or num_cols entries. If specified, column j of the + /// output matrix will be initialized from the column corresponding to index + /// col_remapping[j] in the old Tensor from the checkpoint. + /// * A value of -1 in either of the remappings signifies a "missing" entry. In that + /// case, values from the initializing_values tensor will be used to fill that + /// missing row or column. If row_remapping has r missing entries and + /// col_remapping has c missing entries, then the following condition must be + /// true: + /// + /// (r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values) + /// + /// The remapping tensors can be generated using the GenerateVocabRemapping op. + /// + /// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], + /// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing + /// the value from row i, column j of the old tensor in the checkpoint, the output + /// matrix will look like the following: + /// + /// [[w(1, 0), w(1, 2), 0.5], + /// [w(0, 0), w(0, 2), -0.5], + /// [0.25, -0.25, 42]] + /// + public static Tensor load_and_remap_matrix (Tensor ckpt_path, Tensor old_tensor_name, Tensor row_remapping, Tensor col_remapping, Tensor initializing_values, int num_rows, int num_cols, int? max_rows_in_memory = null, string name = "LoadAndRemapMatrix") + { + var dict = new Dictionary(); + dict["ckpt_path"] = ckpt_path; + dict["old_tensor_name"] = old_tensor_name; + dict["row_remapping"] = row_remapping; + dict["col_remapping"] = col_remapping; + dict["initializing_values"] = initializing_values; + dict["num_rows"] = num_rows; + dict["num_cols"] = num_cols; + if (max_rows_in_memory.HasValue) + dict["max_rows_in_memory"] = max_rows_in_memory.Value; + var op = _op_def_lib._apply_op_helper("LoadAndRemapMatrix", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes natural logarithm of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Log'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = \log_e x\\). + /// + public static Tensor log (Tensor x, string name = "Log") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Log", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes natural logarithm of (1 + x) element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Log1p'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = \log_e (1 + x)\\). + /// + public static Tensor log1p (Tensor x, string name = "Log1p") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Log1p", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sign and the log of the absolute value of the determinant of + /// + /// + /// Shape is [N, M, M]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LogMatrixDeterminant'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sign : The signs of the log determinants of the inputs. Shape is [N]. + /// log_abs_determinant : The logs of the absolute values of the determinants + /// of the N input matrices. Shape is [N]. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// one or more square matrices. + /// + /// The input is a tensor of shape [N, M, M] whose inner-most 2 dimensions + /// form square matrices. The outputs are two tensors containing the signs and + /// absolute values of the log determinants for all N input submatrices + /// [..., :, :] such that the determinant = sign*exp(log_abs_determinant). + /// The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU + /// is the LU decomposition of the input and P is the corresponding + /// permutation matrix. + /// + public static (Tensor sign, Tensor log_abs_determinant) log_matrix_determinant (Tensor input, string name = "LogMatrixDeterminant") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("LogMatrixDeterminant", name: name, keywords: dict); + int _idx = 0; + var sign = op.outputs[_idx++]; + var log_abs_determinant = op.outputs[_idx++]; + return (sign, log_abs_determinant); + } + + /// + /// Computes log softmax activations. + /// + /// + /// 2-D with shape [batch_size, num_classes]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LogSoftmax'. + /// + /// + /// Same shape as logits. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For each batch i and class j we have + /// + /// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) + /// + public static Tensor log_softmax (Tensor logits, string name = "LogSoftmax") + { + var dict = new Dictionary(); + dict["logits"] = logits; + var op = _op_def_lib._apply_op_helper("LogSoftmax", name: name, keywords: dict); + return op.output; + } + + /// + /// Generates labels for candidate sampling with a log-uniform distribution. + /// + /// + /// A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LogUniformCandidateSampler'. + /// + /// + /// Optional argument + /// Number of true labels per context. + /// + /// + /// Optional argument + /// Number of candidates to randomly sample. + /// + /// + /// Optional argument + /// If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// + /// + /// Optional argument + /// The sampler will sample integers from the interval [0, range_max). + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sampled_candidates : A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// true_expected_count : A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// sampled_expected_count : A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + public static (Tensor sampled_candidates, Tensor true_expected_count, Tensor sampled_expected_count) log_uniform_candidate_sampler (Tensor true_classes, int num_true, int num_sampled, bool unique, int range_max, int? seed = null, int? seed2 = null, string name = "LogUniformCandidateSampler") + { + var dict = new Dictionary(); + dict["true_classes"] = true_classes; + dict["num_true"] = num_true; + dict["num_sampled"] = num_sampled; + dict["unique"] = unique; + dict["range_max"] = range_max; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("LogUniformCandidateSampler", name: name, keywords: dict); + int _idx = 0; + var sampled_candidates = op.outputs[_idx++]; + var true_expected_count = op.outputs[_idx++]; + var sampled_expected_count = op.outputs[_idx++]; + return (sampled_candidates, true_expected_count, sampled_expected_count); + } + + /// + /// Returns the truth value of x AND y element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LogicalAnd'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: LogicalAnd supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor logical_and (Tensor x, Tensor y, string name = "LogicalAnd") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("LogicalAnd", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of NOT x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LogicalNot'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor logical_not (Tensor x, string name = "LogicalNot") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("LogicalNot", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of x OR y element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LogicalOr'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: LogicalOr supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor logical_or (Tensor x, Tensor y, string name = "LogicalOr") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("LogicalOr", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs all keys and values in the table. + /// + /// + /// Handle to the table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableExport'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Returns a tuple with multiple values, as follows: + /// keys : Vector of all keys present in the table. + /// values : Tensor of all values in the table. Indexed in parallel with keys. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor keys, Tensor values) lookup_table_export (Tensor table_handle, TF_DataType Tkeys, TF_DataType Tvalues, string name = "LookupTableExport") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["Tkeys"] = Tkeys; + dict["Tvalues"] = Tvalues; + var op = _op_def_lib._apply_op_helper("LookupTableExport", name: name, keywords: dict); + int _idx = 0; + var keys = op.outputs[_idx++]; + var values = op.outputs[_idx++]; + return (keys, values); + } + + /// + /// Outputs all keys and values in the table. + /// + /// + /// Handle to the table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableExportV2'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Returns a tuple with multiple values, as follows: + /// keys : Vector of all keys present in the table. + /// values : Tensor of all values in the table. Indexed in parallel with keys. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor keys, Tensor values) lookup_table_export_v2 (Tensor table_handle, TF_DataType Tkeys, TF_DataType Tvalues, string name = "LookupTableExportV2") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["Tkeys"] = Tkeys; + dict["Tvalues"] = Tvalues; + var op = _op_def_lib._apply_op_helper("LookupTableExportV2", name: name, keywords: dict); + int _idx = 0; + var keys = op.outputs[_idx++]; + var values = op.outputs[_idx++]; + return (keys, values); + } + + /// + /// Looks up keys in a table, outputs the corresponding values. + /// + /// + /// Handle to the table. + /// + /// + /// Any shape. Keys to look up. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableFind'. + /// + /// + /// Same shape as keys. Values found in the table, or default_values + /// for missing keys. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The tensor keys must of the same type as the keys of the table. + /// The output values is of the type of the table values. + /// + /// The scalar default_value is the value output for keys not present in the + /// table. It must also be of the same type as the table values. + /// + public static Tensor lookup_table_find (Tensor table_handle, Tensor keys, Tensor default_value, string name = "LookupTableFind") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["keys"] = keys; + dict["default_value"] = default_value; + var op = _op_def_lib._apply_op_helper("LookupTableFind", name: name, keywords: dict); + return op.output; + } + + /// + /// Looks up keys in a table, outputs the corresponding values. + /// + /// + /// Handle to the table. + /// + /// + /// Any shape. Keys to look up. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableFindV2'. + /// + /// + /// Same shape as keys. Values found in the table, or default_values + /// for missing keys. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The tensor keys must of the same type as the keys of the table. + /// The output values is of the type of the table values. + /// + /// The scalar default_value is the value output for keys not present in the + /// table. It must also be of the same type as the table values. + /// + public static Tensor lookup_table_find_v2 (Tensor table_handle, Tensor keys, Tensor default_value, string name = "LookupTableFindV2") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["keys"] = keys; + dict["default_value"] = default_value; + var op = _op_def_lib._apply_op_helper("LookupTableFindV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Replaces the contents of the table with the specified keys and values. + /// + /// + /// Handle to the table. + /// + /// + /// Any shape. Keys to look up. + /// + /// + /// Values to associate with keys. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableImport'. + /// + /// + /// Returns the description of the operation + /// + /// + /// The tensor keys must be of the same type as the keys of the table. + /// The tensor values must be of the type of the table values. + /// + public static Operation lookup_table_import (Tensor table_handle, Tensor keys, Tensor values, string name = "LookupTableImport") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["keys"] = keys; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("LookupTableImport", name: name, keywords: dict); + return op; + } + + /// + /// Replaces the contents of the table with the specified keys and values. + /// + /// + /// Handle to the table. + /// + /// + /// Any shape. Keys to look up. + /// + /// + /// Values to associate with keys. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableImportV2'. + /// + /// + /// Returns the description of the operation + /// + /// + /// The tensor keys must be of the same type as the keys of the table. + /// The tensor values must be of the type of the table values. + /// + public static Operation lookup_table_import_v2 (Tensor table_handle, Tensor keys, Tensor values, string name = "LookupTableImportV2") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["keys"] = keys; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("LookupTableImportV2", name: name, keywords: dict); + return op; + } + + /// + /// Updates the table to associates keys with values. + /// + /// + /// Handle to the table. + /// + /// + /// Any shape. Keys to look up. + /// + /// + /// Values to associate with keys. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableInsert'. + /// + /// + /// Returns the description of the operation + /// + /// + /// The tensor keys must be of the same type as the keys of the table. + /// The tensor values must be of the type of the table values. + /// + public static Operation lookup_table_insert (Tensor table_handle, Tensor keys, Tensor values, string name = "LookupTableInsert") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["keys"] = keys; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("LookupTableInsert", name: name, keywords: dict); + return op; + } + + /// + /// Updates the table to associates keys with values. + /// + /// + /// Handle to the table. + /// + /// + /// Any shape. Keys to look up. + /// + /// + /// Values to associate with keys. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableInsertV2'. + /// + /// + /// Returns the description of the operation + /// + /// + /// The tensor keys must be of the same type as the keys of the table. + /// The tensor values must be of the type of the table values. + /// + public static Operation lookup_table_insert_v2 (Tensor table_handle, Tensor keys, Tensor values, string name = "LookupTableInsertV2") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + dict["keys"] = keys; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("LookupTableInsertV2", name: name, keywords: dict); + return op; + } + + /// + /// Computes the number of elements in the given table. + /// + /// + /// Handle to the table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableSize'. + /// + /// + /// Scalar that contains number of elements in the table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor lookup_table_size (Tensor table_handle, string name = "LookupTableSize") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + var op = _op_def_lib._apply_op_helper("LookupTableSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the number of elements in the given table. + /// + /// + /// Handle to the table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableSizeV2'. + /// + /// + /// Scalar that contains number of elements in the table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor lookup_table_size_v2 (Tensor table_handle, string name = "LookupTableSizeV2") + { + var dict = new Dictionary(); + dict["table_handle"] = table_handle; + var op = _op_def_lib._apply_op_helper("LookupTableSizeV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Forwards the input to the output. + /// + /// + /// A boolean scalar, representing the branch predicate of the Switch op. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'LoopCond'. + /// + /// + /// The same tensor as input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operator represents the loop termination condition used by the + /// "pivot" switches of a loop. + /// + public static Tensor loop_cond (Tensor input, string name = "LoopCond") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("LoopCond", name: name, keywords: dict); + return op.output; + } + + /// + /// Makes a new iterator from the given dataset and stores it in iterator. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MakeIterator'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation may be executed multiple times. Each execution will reset the + /// iterator in iterator to the first element of dataset. + /// + public static Operation make_iterator (Tensor dataset, Tensor iterator, string name = "MakeIterator") + { + var dict = new Dictionary(); + dict["dataset"] = dataset; + dict["iterator"] = iterator; + var op = _op_def_lib._apply_op_helper("MakeIterator", name: name, keywords: dict); + return op; + } + + /// + /// Op removes all elements in the underlying container. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapClear'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns the description of the operation + /// + public static Operation map_clear (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "MapClear") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("MapClear", name: name, keywords: dict); + return op; + } + + /// + /// Op returns the number of incomplete elements in the underlying container. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapIncompleteSize'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor map_incomplete_size (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "MapIncompleteSize") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("MapIncompleteSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Op peeks at the values at the specified key. If the + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapPeek'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// underlying container does not contain this key + /// this op will block until it does. + /// + public static Tensor[] map_peek (Tensor key, Tensor indices, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "MapPeek") + { + var dict = new Dictionary(); + dict["key"] = key; + dict["indices"] = indices; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("MapPeek", name: name, keywords: dict); + int _idx = 0; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (values); + } + + /// + /// Op returns the number of elements in the underlying container. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapSize'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor map_size (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "MapSize") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("MapSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Stage (key, values) in the underlying container which behaves like a hashtable. + /// + /// + /// int64 + /// + /// + /// + /// + /// a list of tensors + /// dtypes A list of data types that inserted values should adhere to. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapStage'. + /// + /// + /// Optional argument + /// + /// + /// Maximum number of elements in the Staging Area. If > 0, inserts + /// on the container will block when the capacity is reached. + /// + /// + /// + /// + /// If non-empty, this queue is placed in the given container. Otherwise, + /// a default container is used. + /// + /// + /// It is necessary to match this name to the matching Unstage Op. + /// + /// + /// Returns the description of the operation + /// + public static Operation map_stage (Tensor key, Tensor indices, Tensor[] values, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "MapStage") + { + var dict = new Dictionary(); + dict["key"] = key; + dict["indices"] = indices; + dict["values"] = values; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("MapStage", name: name, keywords: dict); + return op; + } + + /// + /// Op removes and returns the values associated with the key + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapUnstage'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// from the underlying container. If the underlying container + /// does not contain this key, the op will block until it does. + /// + public static Tensor[] map_unstage (Tensor key, Tensor indices, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "MapUnstage") + { + var dict = new Dictionary(); + dict["key"] = key; + dict["indices"] = indices; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("MapUnstage", name: name, keywords: dict); + int _idx = 0; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (values); + } + + /// + /// Op removes and returns a random (key, value) + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapUnstageNoKey'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// key : + /// values : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// from the underlying container. If the underlying container + /// does not contain elements, the op will block until it does. + /// + public static (Tensor key, Tensor[] values) map_unstage_no_key (Tensor indices, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "MapUnstageNoKey") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("MapUnstageNoKey", name: name, keywords: dict); + int _idx = 0; + var key = op.outputs[_idx++]; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (key, values); + } + + /// + /// Multiply the matrix "a" by the matrix "b". + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatMul'. + /// + /// + /// If true, "a" is transposed before multiplication. + /// + /// + /// If true, "b" is transposed before multiplication. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of + /// "a" (after being transposed if transpose_a is true) must match the + /// outer dimension of "b" (after being transposed if transposed_b is + /// true). + /// + /// *Note*: The default kernel implementation for MatMul on GPUs uses + /// cublas. + /// + public static Tensor mat_mul (Tensor a, Tensor b, bool? transpose_a = null, bool? transpose_b = null, string name = "MatMul") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["b"] = b; + if (transpose_a.HasValue) + dict["transpose_a"] = transpose_a.Value; + if (transpose_b.HasValue) + dict["transpose_b"] = transpose_b.Value; + var op = _op_def_lib._apply_op_helper("MatMul", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the set of files matching one or more glob patterns. + /// + /// + /// Shell wildcard pattern(s). Scalar or vector of type string. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatchingFiles'. + /// + /// + /// A vector of matching filenames. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Note that this routine only supports wildcard characters in the + /// basename portion of the pattern, not in the directory portion. + /// Note also that the order of filenames returned can be non-deterministic. + /// + public static Tensor matching_files (Tensor pattern, string name = "MatchingFiles") + { + var dict = new Dictionary(); + dict["pattern"] = pattern; + var op = _op_def_lib._apply_op_helper("MatchingFiles", name: name, keywords: dict); + return op.output; + } + + /// + /// Copy a tensor setting everything outside a central band in each innermost matrix + /// + /// + /// Rank k tensor. + /// + /// + /// 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + /// lower triangle. + /// + /// + /// 0-D tensor. Number of superdiagonals to keep. If negative, keep + /// entire upper triangle. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixBandPart'. + /// + /// + /// Rank k tensor of the same shape as input. The extracted banded tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// to zero. + /// + /// The band part is computed as follows: + /// Assume input has k dimensions [I, J, K, ..., M, N], then the output is a + /// tensor with the same shape where + /// + /// band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]. + /// + /// The indicator function + /// + /// in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && + /// (num_upper < 0 || (n-m) <= num_upper). + /// + /// For example: + /// + /// + /// # if 'input' is [[ 0, 1, 2, 3] + /// [-1, 0, 1, 2] + /// [-2, -1, 0, 1] + /// [-3, -2, -1, 0]], + /// + /// tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + /// [-1, 0, 1, 2] + /// [ 0, -1, 0, 1] + /// [ 0, 0, -1, 0]], + /// + /// tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + /// [-1, 0, 1, 0] + /// [-2, -1, 0, 1] + /// [ 0, -2, -1, 0]] + /// + /// + /// Useful special cases: + /// + /// + /// tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. + /// tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. + /// tf.matrix_band_part(input, 0, 0) ==> Diagonal. + /// + /// + public static Tensor matrix_band_part (Tensor input, Tensor num_lower, Tensor num_upper, string name = "MatrixBandPart") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["num_lower"] = num_lower; + dict["num_upper"] = num_upper; + var op = _op_def_lib._apply_op_helper("MatrixBandPart", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the determinant of one or more square matrices. + /// + /// + /// Shape is [..., M, M]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixDeterminant'. + /// + /// + /// Shape is [...]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The input is a tensor of shape [..., M, M] whose inner-most 2 dimensions + /// form square matrices. The output is a tensor containing the determinants + /// for all input submatrices [..., :, :]. + /// + public static Tensor matrix_determinant (Tensor input, string name = "MatrixDeterminant") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("MatrixDeterminant", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a batched diagonal tensor with a given batched diagonal values. + /// + /// + /// Rank k, where k >= 1. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixDiag'. + /// + /// + /// Rank k+1, with output.shape = diagonal.shape + [diagonal.shape[-1]]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a diagonal, this operation returns a tensor with the diagonal and + /// everything else padded with zeros. The diagonal is computed as follows: + /// + /// Assume diagonal has k dimensions [I, J, K, ..., N], then the output is a + /// tensor of rank k+1 with dimensions [I, J, K, ..., N, N] where: + /// + /// output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]. + /// + /// For example: + /// + /// + /// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] + /// + /// and diagonal.shape = (2, 4) + /// + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0] + /// [0, 6, 0, 0] + /// [0, 0, 7, 0] + /// [0, 0, 0, 8]]] + /// + /// which has shape (2, 4, 4) + /// + /// + public static Tensor matrix_diag (Tensor diagonal, string name = "MatrixDiag") + { + var dict = new Dictionary(); + dict["diagonal"] = diagonal; + var op = _op_def_lib._apply_op_helper("MatrixDiag", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the batched diagonal part of a batched tensor. + /// + /// + /// Rank k tensor where k >= 2. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixDiagPart'. + /// + /// + /// The extracted diagonal(s) having shape + /// diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns a tensor with the diagonal part + /// of the batched input. The diagonal part is computed as follows: + /// + /// Assume input has k dimensions [I, J, K, ..., M, N], then the output is a + /// tensor of rank k - 1 with dimensions [I, J, K, ..., min(M, N)] where: + /// + /// diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// + /// # 'input' is [[[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0] + /// [0, 6, 0, 0] + /// [0, 0, 7, 0] + /// [0, 0, 0, 8]]] + /// + /// and input.shape = (2, 4, 4) + /// + /// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] + /// + /// which has shape (2, 4) + /// + /// + public static Tensor matrix_diag_part (Tensor input, string name = "MatrixDiagPart") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("MatrixDiagPart", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated, use python implementation tf.linalg.matrix_exponential. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixExponential'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor matrix_exponential (Tensor input, string name = "MatrixExponential") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("MatrixExponential", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the inverse of one or more square invertible matrices or their + /// + /// + /// Shape is [..., M, M]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixInverse'. + /// + /// + /// + /// + /// Shape is [..., M, M]. + /// + /// @compatibility(numpy) + /// Equivalent to np.linalg.inv + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// adjoints (conjugate transposes). + /// + /// The input is a tensor of shape [..., M, M] whose inner-most 2 dimensions + /// form square matrices. The output is a tensor of the same shape as the input + /// containing the inverse for all input submatrices [..., :, :]. + /// + /// The op uses LU decomposition with partial pivoting to compute the inverses. + /// + /// If a matrix is not invertible there is no guarantee what the op does. It + /// may detect the condition and raise an exception or it may simply return a + /// garbage result. + /// + public static Tensor matrix_inverse (Tensor input, bool? adjoint = null, string name = "MatrixInverse") + { + var dict = new Dictionary(); + dict["input"] = input; + if (adjoint.HasValue) + dict["adjoint"] = adjoint.Value; + var op = _op_def_lib._apply_op_helper("MatrixInverse", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the matrix logarithm of one or more square matrices: + /// + /// + /// Shape is [..., M, M]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixLogarithm'. + /// + /// + /// Shape is [..., M, M]. + /// + /// @compatibility(scipy) + /// Equivalent to scipy.linalg.logm + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// + /// \\(log(exp(A)) = A\\) + /// + /// This op is only defined for complex matrices. If A is positive-definite and + /// real, then casting to a complex matrix, taking the logarithm and casting back + /// to a real matrix will give the correct result. + /// + /// This function computes the matrix logarithm using the Schur-Parlett algorithm. + /// Details of the algorithm can be found in Section 11.6.2 of: + /// Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008. + /// ISBN 978-0-898716-46-7. + /// + /// The input is a tensor of shape [..., M, M] whose inner-most 2 dimensions + /// form square matrices. The output is a tensor of the same shape as the input + /// containing the exponential for all input submatrices [..., :, :]. + /// + public static Tensor matrix_logarithm (Tensor input, string name = "MatrixLogarithm") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("MatrixLogarithm", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// + /// Rank k+1, where k >= 1. + /// + /// + /// Rank k, where k >= 1. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixSetDiag'. + /// + /// + /// Rank k+1, with output.shape = input.shape. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given input and diagonal, this operation returns a tensor with the + /// same shape and values as input, except for the main diagonal of the + /// innermost matrices. These will be overwritten by the values in diagonal. + /// + /// The output is computed as follows: + /// + /// Assume input has k+1 dimensions [I, J, K, ..., M, N] and diagonal has + /// k dimensions [I, J, K, ..., min(M, N)]. Then the output is a + /// tensor of rank k+1 with dimensions [I, J, K, ..., M, N] where: + /// + /// * output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n] for m == n. + /// * output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n] for m != n. + /// + public static Tensor matrix_set_diag (Tensor input, Tensor diagonal, string name = "MatrixSetDiag") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["diagonal"] = diagonal; + var op = _op_def_lib._apply_op_helper("MatrixSetDiag", name: name, keywords: dict); + return op.output; + } + + /// + /// Solves systems of linear equations. + /// + /// + /// Shape is [..., M, M]. + /// + /// + /// Shape is [..., M, K]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixSolve'. + /// + /// + /// Boolean indicating whether to solve with matrix or its (block-wise) + /// adjoint. + /// + /// + /// Shape is [..., M, K]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Matrix is a tensor of shape [..., M, M] whose inner-most 2 dimensions + /// form square matrices. Rhs is a tensor of shape [..., M, K]. The output is + /// a tensor shape [..., M, K]. If adjoint is False then each output matrix + /// satisfies matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]. + /// If adjoint is True then each output matrix satisfies + /// adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]. + /// + public static Tensor matrix_solve (Tensor matrix, Tensor rhs, bool? adjoint = null, string name = "MatrixSolve") + { + var dict = new Dictionary(); + dict["matrix"] = matrix; + dict["rhs"] = rhs; + if (adjoint.HasValue) + dict["adjoint"] = adjoint.Value; + var op = _op_def_lib._apply_op_helper("MatrixSolve", name: name, keywords: dict); + return op.output; + } + + /// + /// Solves one or more linear least-squares problems. + /// + /// + /// Shape is [..., M, N]. + /// + /// + /// Shape is [..., M, K]. + /// + /// + /// Scalar tensor. + /// + /// @compatibility(numpy) + /// Equivalent to np.linalg.lstsq + /// @end_compatibility + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixSolveLs'. + /// + /// + /// + /// + /// Shape is [..., N, K]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// matrix is a tensor of shape [..., M, N] whose inner-most 2 dimensions + /// form real or complex matrices of size [M, N]. Rhs is a tensor of the same + /// type as matrix and shape [..., M, K]. + /// The output is a tensor shape [..., N, K] where each output matrix solves + /// each of the equations + /// matrix[..., :, :] * output[..., :, :] = rhs[..., :, :] + /// in the least squares sense. + /// + /// We use the following notation for (complex) matrix and right-hand sides + /// in the batch: + /// + /// matrix=\\(A \in \mathbb{C}^{m \times n}\\), + /// rhs=\\(B \in \mathbb{C}^{m \times k}\\), + /// output=\\(X \in \mathbb{C}^{n \times k}\\), + /// l2_regularizer=\\(\lambda \in \mathbb{R}\\). + /// + /// If fast is True, then the solution is computed by solving the normal + /// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then + /// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares + /// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\). + /// If \\(m \lt n\\) then output is computed as + /// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the + /// minimum-norm solution to the under-determined linear system, i.e. + /// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\), + /// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable + /// when \\(A\\) is numerically full rank and has a condition number + /// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is + /// sufficiently large. + /// + /// If fast is False an algorithm based on the numerically robust complete + /// orthogonal decomposition is used. This computes the minimum-norm + /// least-squares solution, even when \\(A\\) is rank deficient. This path is + /// typically 6-7 times slower than the fast path. If fast is False then + /// l2_regularizer is ignored. + /// + public static Tensor matrix_solve_ls (Tensor matrix, Tensor rhs, Tensor l2_regularizer, bool? fast = null, string name = "MatrixSolveLs") + { + var dict = new Dictionary(); + dict["matrix"] = matrix; + dict["rhs"] = rhs; + dict["l2_regularizer"] = l2_regularizer; + if (fast.HasValue) + dict["fast"] = fast.Value; + var op = _op_def_lib._apply_op_helper("MatrixSolveLs", name: name, keywords: dict); + return op.output; + } + + /// + /// Solves systems of linear equations with upper or lower triangular matrices by + /// + /// + /// Shape is [..., M, M]. + /// + /// + /// Shape is [..., M, K]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MatrixTriangularSolve'. + /// + /// + /// Boolean indicating whether the innermost matrices in matrix are + /// lower or upper triangular. + /// + /// + /// Boolean indicating whether to solve with matrix or its (block-wise) + /// adjoint. + /// + /// @compatibility(numpy) + /// Equivalent to scipy.linalg.solve_triangular + /// @end_compatibility + /// + /// + /// Shape is [..., M, K]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// backsubstitution. + /// + /// matrix is a tensor of shape [..., M, M] whose inner-most 2 dimensions form + /// square matrices. If lower is True then the strictly upper triangular part + /// of each inner-most matrix is assumed to be zero and not accessed. + /// If lower is False then the strictly lower triangular part of each inner-most + /// matrix is assumed to be zero and not accessed. + /// rhs is a tensor of shape [..., M, K]. + /// + /// The output is a tensor of shape [..., M, K]. If adjoint is + /// True then the innermost matrices in output satisfy matrix equations + /// matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]. + /// If adjoint is False then the strictly then the innermost matrices in + /// output satisfy matrix equations + /// adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]. + /// + public static Tensor matrix_triangular_solve (Tensor matrix, Tensor rhs, bool? lower = null, bool? adjoint = null, string name = "MatrixTriangularSolve") + { + var dict = new Dictionary(); + dict["matrix"] = matrix; + dict["rhs"] = rhs; + if (lower.HasValue) + dict["lower"] = lower.Value; + if (adjoint.HasValue) + dict["adjoint"] = adjoint.Value; + var op = _op_def_lib._apply_op_helper("MatrixTriangularSolve", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the maximum of elements across dimensions of a tensor. + /// + /// + /// The tensor to reduce. + /// + /// + /// The dimensions to reduce. Must be in the range + /// [-rank(input), rank(input)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Max'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// The reduced tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Reduces input along the dimensions given in axis. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// axis. If keep_dims is true, the reduced dimensions are + /// retained with length 1. + /// + public static Tensor max (Tensor input, Tensor reduction_indices, bool? keep_dims = null, string name = "Max") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["reduction_indices"] = reduction_indices; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("Max", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs max pooling on the input. + /// + /// + /// 4-D input to pool over. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPool'. + /// + /// + /// Optional argument + /// The size of the window for each dimension of the input tensor. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// The max pooled output tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool (Tensor input, int[] ksize, int[] strides, string padding, string data_format = null, string name = "MaxPool") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPool", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs 3D max pooling on the input. + /// + /// + /// Shape [batch, depth, rows, cols, channels] tensor to pool over. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPool3D'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have ksize[0] = ksize[4] = 1. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// The max pooled output tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool3d (Tensor input, int[] ksize, int[] strides, string padding, string data_format = null, string name = "MaxPool3D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPool3D", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients of max pooling function. + /// + /// + /// The original input tensor. + /// + /// + /// The original output tensor. + /// + /// + /// Output backprop of shape [batch, depth, rows, cols, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPool3DGrad'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have ksize[0] = ksize[4] = 1. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool3d_grad (Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = null, string name = "MaxPool3DGrad") + { + var dict = new Dictionary(); + dict["orig_input"] = orig_input; + dict["orig_output"] = orig_output; + dict["grad"] = grad; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPool3DGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// The original input tensor. + /// + /// + /// The original output tensor. + /// + /// + /// Output backprop of shape [batch, depth, rows, cols, channels]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPool3DGradGrad'. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have ksize[0] = ksize[4] = 1. + /// + /// + /// Optional argument + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of input. Must have strides[0] = strides[4] = 1. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// Gradients of gradients w.r.t. the input to max_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool3d_grad_grad (Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = null, string name = "MaxPool3DGradGrad") + { + var dict = new Dictionary(); + dict["orig_input"] = orig_input; + dict["orig_output"] = orig_output; + dict["grad"] = grad; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPool3DGradGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// The original input tensor. + /// + /// + /// The original output tensor. + /// + /// + /// 4-D. Gradients w.r.t. the output of max_pool. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPoolGrad'. + /// + /// + /// Optional argument + /// The size of the window for each dimension of the input tensor. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// Gradients w.r.t. the input to max_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool_grad (Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = null, string name = "MaxPoolGrad") + { + var dict = new Dictionary(); + dict["orig_input"] = orig_input; + dict["orig_output"] = orig_output; + dict["grad"] = grad; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPoolGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// The original input tensor. + /// + /// + /// The original output tensor. + /// + /// + /// 4-D. Gradients of gradients w.r.t. the input of max_pool. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPoolGradGrad'. + /// + /// + /// Optional argument + /// The size of the window for each dimension of the input tensor. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// Gradients of gradients w.r.t. the input to max_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool_grad_grad (Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = null, string name = "MaxPoolGradGrad") + { + var dict = new Dictionary(); + dict["orig_input"] = orig_input; + dict["orig_output"] = orig_output; + dict["grad"] = grad; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPoolGradGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// The original input tensor. + /// + /// + /// The original output tensor. + /// + /// + /// 4-D. Gradients of gradients w.r.t. the input of max_pool. + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPoolGradGradV2'. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// Gradients of gradients w.r.t. the input to max_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool_grad_grad_v2 (Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format = null, string name = "MaxPoolGradGradV2") + { + var dict = new Dictionary(); + dict["orig_input"] = orig_input; + dict["orig_output"] = orig_output; + dict["grad"] = grad; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPoolGradGradV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// The original input. + /// + /// + /// 4-D with shape [batch, height, width, channels]. Gradients w.r.t. the + /// input of max_pool. + /// + /// + /// The indices of the maximum values chosen for each output of max_pool. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPoolGradGradWithArgmax'. + /// + /// + /// Optional argument + /// The size of the window for each dimension of the input tensor. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Gradients of gradients w.r.t. the input of max_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool_grad_grad_with_argmax (Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, string name = "MaxPoolGradGradWithArgmax") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["grad"] = grad; + dict["argmax"] = argmax; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("MaxPoolGradGradWithArgmax", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// The original input tensor. + /// + /// + /// The original output tensor. + /// + /// + /// 4-D. Gradients w.r.t. the output of max_pool. + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPoolGradV2'. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// Gradients w.r.t. the input to max_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool_grad_v2 (Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format = null, string name = "MaxPoolGradV2") + { + var dict = new Dictionary(); + dict["orig_input"] = orig_input; + dict["orig_output"] = orig_output; + dict["grad"] = grad; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPoolGradV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// The original input. + /// + /// + /// 4-D with shape [batch, height, width, channels]. Gradients w.r.t. the + /// output of max_pool. + /// + /// + /// The indices of the maximum values chosen for each output of max_pool. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPoolGradWithArgmax'. + /// + /// + /// Optional argument + /// The size of the window for each dimension of the input tensor. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Gradients w.r.t. the input of max_pool. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool_grad_with_argmax (Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, string name = "MaxPoolGradWithArgmax") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["grad"] = grad; + dict["argmax"] = argmax; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("MaxPoolGradWithArgmax", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs max pooling on the input. + /// + /// + /// 4-D input to pool over. + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPoolV2'. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// The max pooled output tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor max_pool_v2 (Tensor input, Tensor ksize, Tensor strides, string padding, string data_format = null, string name = "MaxPoolV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("MaxPoolV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Performs max pooling on the input and outputs both max values and indices. + /// + /// + /// 4-D with shape [batch, height, width, channels]. Input to pool over. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MaxPoolWithArgmax'. + /// + /// + /// Optional argument + /// The size of the window for each dimension of the input tensor. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : The max pooled output tensor. + /// argmax : 4-D. The flattened indices of the max values chosen for each output. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The indices in argmax are flattened, so that a maximum value at position + /// [b, y, x, c] becomes flattened index + /// ((b * height + y) * width + x) * channels + c. + /// + /// The indices returned are always in [0, height) x [0, width) before flattening, + /// even if padding is involved and the mathematically correct answer is outside + /// (either negative or too large). This is a bug, but fixing it is difficult to do + /// in a safe backwards compatible way, especially due to flattening. + /// + public static (Tensor output, Tensor argmax) max_pool_with_argmax (Tensor input, int[] ksize, int[] strides, string padding, TF_DataType? Targmax = null, string name = "MaxPoolWithArgmax") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + if (Targmax.HasValue) + dict["Targmax"] = Targmax.Value; + var op = _op_def_lib._apply_op_helper("MaxPoolWithArgmax", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var argmax = op.outputs[_idx++]; + return (output, argmax); + } + + /// + /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Maximum'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Maximum supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor maximum (Tensor x, Tensor y, string name = "Maximum") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Maximum", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the mean of elements across dimensions of a tensor. + /// + /// + /// The tensor to reduce. + /// + /// + /// The dimensions to reduce. Must be in the range + /// [-rank(input), rank(input)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Mean'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// The reduced tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Reduces input along the dimensions given in axis. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// axis. If keep_dims is true, the reduced dimensions are + /// retained with length 1. + /// + public static Tensor mean (Tensor input, Tensor reduction_indices, bool? keep_dims = null, string name = "Mean") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["reduction_indices"] = reduction_indices; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("Mean", name: name, keywords: dict); + return op.output; + } + + /// + /// Forwards the value of an available tensor from inputs to output. + /// + /// + /// The input tensors, exactly one of which will become available. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Merge'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : Will be set to the available input tensor. + /// value_index : The index of the chosen input tensor in inputs. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Merge waits for at least one of the tensors in inputs to become available. + /// It is usually combined with Switch to implement branching. + /// + /// Merge forwards the first tensor to become available to output, and sets + /// value_index to its index in inputs. + /// + public static (Tensor output, Tensor value_index) merge (Tensor[] inputs, string name = "Merge") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + var op = _op_def_lib._apply_op_helper("Merge", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var value_index = op.outputs[_idx++]; + return (output, value_index); + } + + /// + /// Merges summaries. + /// + /// + /// Can be of any shape. Each must contain serialized Summary protocol + /// buffers. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MergeSummary'. + /// + /// + /// Scalar. Serialized Summary protocol buffer. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op creates a + /// [Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + /// protocol buffer that contains the union of all the values in the input + /// summaries. + /// + /// When the Op is run, it reports an InvalidArgument error if multiple values + /// in the summaries to merge use the same tag. + /// + public static Tensor merge_summary (Tensor[] inputs, string name = "MergeSummary") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + var op = _op_def_lib._apply_op_helper("MergeSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// V2 format specific: merges the metadata files of sharded checkpoints. The + /// + /// + /// prefixes of V2 checkpoints to merge. + /// + /// + /// scalar. The desired final prefix. Allowed to be the same + /// as one of the checkpoint_prefixes. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MergeV2Checkpoints'. + /// + /// + /// see above. + /// + /// + /// Returns the description of the operation + /// + /// + /// result is one logical checkpoint, with one physical metadata file and renamed + /// data files. + /// + /// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. + /// + /// If delete_old_dirs is true, attempts to delete recursively the dirname of each + /// path in the input checkpoint_prefixes. This is useful when those paths are non + /// user-facing temporary locations. + /// + public static Operation merge_v2checkpoints (Tensor checkpoint_prefixes, Tensor destination_prefix, bool? delete_old_dirs = null, string name = "MergeV2Checkpoints") + { + var dict = new Dictionary(); + dict["checkpoint_prefixes"] = checkpoint_prefixes; + dict["destination_prefix"] = destination_prefix; + if (delete_old_dirs.HasValue) + dict["delete_old_dirs"] = delete_old_dirs.Value; + var op = _op_def_lib._apply_op_helper("MergeV2Checkpoints", name: name, keywords: dict); + return op; + } + + /// + /// Transforms a spectrogram into a form that's useful for speech recognition. + /// + /// + /// Typically produced by the Spectrogram op, with magnitude_squared + /// set to true. + /// + /// + /// How many samples per second the source audio used. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Mfcc'. + /// + /// + /// The highest frequency to use when calculating the + /// ceptstrum. + /// + /// + /// The lowest frequency to use when calculating the + /// ceptstrum. + /// + /// + /// Resolution of the Mel bank used internally. + /// + /// + /// How many output channels to produce per time slice. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Mel Frequency Cepstral Coefficients are a way of representing audio data that's + /// been effective as an input feature for machine learning. They are created by + /// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the + /// higher frequencies that are less significant to the human ear. They have a long + /// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum + /// is a good resource to learn more. + /// + public static Tensor mfcc (Tensor spectrogram, Tensor sample_rate, float? upper_frequency_limit = null, float? lower_frequency_limit = null, int? filterbank_channel_count = null, int? dct_coefficient_count = null, string name = "Mfcc") + { + var dict = new Dictionary(); + dict["spectrogram"] = spectrogram; + dict["sample_rate"] = sample_rate; + if (upper_frequency_limit.HasValue) + dict["upper_frequency_limit"] = upper_frequency_limit.Value; + if (lower_frequency_limit.HasValue) + dict["lower_frequency_limit"] = lower_frequency_limit.Value; + if (filterbank_channel_count.HasValue) + dict["filterbank_channel_count"] = filterbank_channel_count.Value; + if (dct_coefficient_count.HasValue) + dict["dct_coefficient_count"] = dct_coefficient_count.Value; + var op = _op_def_lib._apply_op_helper("Mfcc", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the minimum of elements across dimensions of a tensor. + /// + /// + /// The tensor to reduce. + /// + /// + /// The dimensions to reduce. Must be in the range + /// [-rank(input), rank(input)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Min'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// The reduced tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Reduces input along the dimensions given in axis. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// axis. If keep_dims is true, the reduced dimensions are + /// retained with length 1. + /// + public static Tensor min (Tensor input, Tensor reduction_indices, bool? keep_dims = null, string name = "Min") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["reduction_indices"] = reduction_indices; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("Min", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the min of x and y (i.e. x < y ? x : y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Minimum'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Minimum supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor minimum (Tensor x, Tensor y, string name = "Minimum") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Minimum", name: name, keywords: dict); + return op.output; + } + + /// + /// Pads a tensor with mirrored values. + /// + /// + /// The input tensor to be padded. + /// + /// + /// A two-column matrix specifying the padding sizes. The number of + /// rows must be the same as the rank of input. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MirrorPad'. + /// + /// + /// Optional argument + /// Either REFLECT or SYMMETRIC. In reflect mode the padded regions + /// do not include the borders, while in symmetric mode the padded regions + /// do include the borders. For example, if input is [1, 2, 3] and paddings + /// is [0, 2], then the output is [1, 2, 3, 2, 1] in reflect mode, and + /// it is [1, 2, 3, 3, 2] in symmetric mode. + /// + /// + /// The padded tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation pads a input with mirrored values according to the paddings + /// you specify. paddings is an integer tensor with shape [n, 2], where n is + /// the rank of input. For each dimension D of input, paddings[D, 0] indicates + /// how many values to add before the contents of input in that dimension, and + /// paddings[D, 1] indicates how many values to add after the contents of input + /// in that dimension. Both paddings[D, 0] and paddings[D, 1] must be no greater + /// than input.dim_size(D) (or input.dim_size(D) - 1) if copy_border is true + /// (if false, respectively). + /// + /// The padded size of each dimension D of the output is: + /// + /// paddings(D, 0) + input.dim_size(D) + paddings(D, 1) + /// + /// For example: + /// + /// + /// # 't' is [[1, 2, 3], [4, 5, 6]]. + /// # 'paddings' is [[1, 1]], [2, 2]]. + /// # 'mode' is SYMMETRIC. + /// # rank of 't' is 2. + /// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + /// [2, 1, 1, 2, 3, 3, 2] + /// [5, 4, 4, 5, 6, 6, 5] + /// [5, 4, 4, 5, 6, 6, 5]] + /// + /// + public static Tensor mirror_pad (Tensor input, Tensor paddings, string mode, string name = "MirrorPad") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["paddings"] = paddings; + dict["mode"] = mode; + var op = _op_def_lib._apply_op_helper("MirrorPad", name: name, keywords: dict); + return op.output; + } + + /// + /// Gradient op for MirrorPad op. This op folds a mirror-padded tensor. + /// + /// + /// The input tensor to be folded. + /// + /// + /// A two-column matrix specifying the padding sizes. The number of + /// rows must be the same as the rank of input. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MirrorPadGrad'. + /// + /// + /// Optional argument + /// The mode used in the MirrorPad op. + /// + /// + /// The folded tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation folds the padded areas of input by MirrorPad according to the + /// paddings you specify. paddings must be the same as paddings argument + /// given to the corresponding MirrorPad op. + /// + /// The folded size of each dimension D of the output is: + /// + /// input.dim_size(D) - paddings(D, 0) - paddings(D, 1) + /// + /// For example: + /// + /// + /// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. + /// # 'paddings' is [[0, 1]], [0, 1]]. + /// # 'mode' is SYMMETRIC. + /// # rank of 't' is 2. + /// pad(t, paddings) ==> [[ 1, 5] + /// [11, 28]] + /// + /// + public static Tensor mirror_pad_grad (Tensor input, Tensor paddings, string mode, string name = "MirrorPadGrad") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["paddings"] = paddings; + dict["mode"] = mode; + var op = _op_def_lib._apply_op_helper("MirrorPadGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns element-wise remainder of division. This emulates C semantics in that + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Mod'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// the result here is consistent with a truncating divide. E.g. + /// tf.truncatediv(x, y) * y + truncate_mod(x, y) = x. + /// + /// *NOTE*: Mod supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor mod (Tensor x, Tensor y, string name = "Mod") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Mod", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns x * y element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Mul'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Multiply supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor mul (Tensor x, Tensor y, string name = "Mul") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Mul", name: name, keywords: dict); + return op.output; + } + + /// + /// Draws samples from a multinomial distribution. + /// + /// + /// 2-D Tensor with shape [batch_size, num_classes]. Each slice [i, :] + /// represents the unnormalized log probabilities for all classes. + /// + /// + /// 0-D. Number of independent samples to draw for each row slice. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Multinomial'. + /// + /// + /// If either seed or seed2 is set to be non-zero, the internal random number + /// generator is seeded by the given seed. Otherwise, a random seed is used. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// + /// + /// 2-D Tensor with shape [batch_size, num_samples]. Each slice [i, :] + /// contains the drawn class labels with range [0, num_classes). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor multinomial (Tensor logits, Tensor num_samples, int? seed = null, int? seed2 = null, TF_DataType? output_dtype = null, string name = "Multinomial") + { + var dict = new Dictionary(); + dict["logits"] = logits; + dict["num_samples"] = num_samples; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + if (output_dtype.HasValue) + dict["output_dtype"] = output_dtype.Value; + var op = _op_def_lib._apply_op_helper("Multinomial", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates an empty hash table that uses tensors as the backing store. + /// + /// + /// The key used to represent empty key buckets internally. Must not + /// be used in insert or lookup operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MutableDenseHashTable'. + /// + /// + /// Optional argument + /// Type of the table values. + /// + /// + /// If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this table is shared under the given name across + /// multiple sessions. + /// + /// + /// + /// + /// The shape of each value. + /// + /// + /// The initial number of hash table buckets. Must be a power + /// to 2. + /// + /// + /// The maximum ratio between number of entries and number of + /// buckets before growing the table. Must be between 0 and 1. + /// + /// + /// Handle to a table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// It uses "open addressing" with quadratic reprobing to resolve + /// collisions. + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a scalar. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + public static Tensor mutable_dense_hash_table (Tensor empty_key, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, TensorShape value_shape = null, int? initial_num_buckets = null, float? max_load_factor = null, string name = "MutableDenseHashTable") + { + var dict = new Dictionary(); + dict["empty_key"] = empty_key; + dict["value_dtype"] = value_dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (use_node_name_sharing.HasValue) + dict["use_node_name_sharing"] = use_node_name_sharing.Value; + if (value_shape != null) + dict["value_shape"] = value_shape; + if (initial_num_buckets.HasValue) + dict["initial_num_buckets"] = initial_num_buckets.Value; + if (max_load_factor.HasValue) + dict["max_load_factor"] = max_load_factor.Value; + var op = _op_def_lib._apply_op_helper("MutableDenseHashTable", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates an empty hash table that uses tensors as the backing store. + /// + /// + /// The key used to represent empty key buckets internally. Must not + /// be used in insert or lookup operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MutableDenseHashTableV2'. + /// + /// + /// Optional argument + /// Type of the table values. + /// + /// + /// If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this table is shared under the given name across + /// multiple sessions. + /// + /// + /// + /// + /// The shape of each value. + /// + /// + /// The initial number of hash table buckets. Must be a power + /// to 2. + /// + /// + /// The maximum ratio between number of entries and number of + /// buckets before growing the table. Must be between 0 and 1. + /// + /// + /// Handle to a table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// It uses "open addressing" with quadratic reprobing to resolve + /// collisions. + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a scalar. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + public static Tensor mutable_dense_hash_table_v2 (Tensor empty_key, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, TensorShape value_shape = null, int? initial_num_buckets = null, float? max_load_factor = null, string name = "MutableDenseHashTableV2") + { + var dict = new Dictionary(); + dict["empty_key"] = empty_key; + dict["value_dtype"] = value_dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (use_node_name_sharing.HasValue) + dict["use_node_name_sharing"] = use_node_name_sharing.Value; + if (value_shape != null) + dict["value_shape"] = value_shape; + if (initial_num_buckets.HasValue) + dict["initial_num_buckets"] = initial_num_buckets.Value; + if (max_load_factor.HasValue) + dict["max_load_factor"] = max_load_factor.Value; + var op = _op_def_lib._apply_op_helper("MutableDenseHashTableV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates an empty hash table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MutableHashTable'. + /// + /// + /// Optional argument + /// Type of the table keys. + /// + /// + /// Optional argument + /// Type of the table values. + /// + /// + /// If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this table is shared under the given name across + /// multiple sessions. + /// + /// + /// If true and shared_name is empty, the table is shared + /// using the node name. + /// + /// + /// Handle to a table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a scalar. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + public static Tensor mutable_hash_table (TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, string name = "MutableHashTable") + { + var dict = new Dictionary(); + dict["key_dtype"] = key_dtype; + dict["value_dtype"] = value_dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (use_node_name_sharing.HasValue) + dict["use_node_name_sharing"] = use_node_name_sharing.Value; + var op = _op_def_lib._apply_op_helper("MutableHashTable", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates an empty hash table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MutableHashTableOfTensors'. + /// + /// + /// Optional argument + /// Type of the table keys. + /// + /// + /// Optional argument + /// Type of the table values. + /// + /// + /// If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this table is shared under the given name across + /// multiple sessions. + /// + /// + /// + /// + /// + /// + /// Handle to a table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a vector. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + public static Tensor mutable_hash_table_of_tensors (TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, TensorShape value_shape = null, string name = "MutableHashTableOfTensors") + { + var dict = new Dictionary(); + dict["key_dtype"] = key_dtype; + dict["value_dtype"] = value_dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (use_node_name_sharing.HasValue) + dict["use_node_name_sharing"] = use_node_name_sharing.Value; + if (value_shape != null) + dict["value_shape"] = value_shape; + var op = _op_def_lib._apply_op_helper("MutableHashTableOfTensors", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates an empty hash table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MutableHashTableOfTensorsV2'. + /// + /// + /// Optional argument + /// Type of the table keys. + /// + /// + /// Optional argument + /// Type of the table values. + /// + /// + /// If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this table is shared under the given name across + /// multiple sessions. + /// + /// + /// + /// + /// + /// + /// Handle to a table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a vector. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + public static Tensor mutable_hash_table_of_tensors_v2 (TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, TensorShape value_shape = null, string name = "MutableHashTableOfTensorsV2") + { + var dict = new Dictionary(); + dict["key_dtype"] = key_dtype; + dict["value_dtype"] = value_dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (use_node_name_sharing.HasValue) + dict["use_node_name_sharing"] = use_node_name_sharing.Value; + if (value_shape != null) + dict["value_shape"] = value_shape; + var op = _op_def_lib._apply_op_helper("MutableHashTableOfTensorsV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates an empty hash table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MutableHashTableV2'. + /// + /// + /// Optional argument + /// Type of the table keys. + /// + /// + /// Optional argument + /// Type of the table values. + /// + /// + /// If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this table is shared under the given name across + /// multiple sessions. + /// + /// + /// If true and shared_name is empty, the table is shared + /// using the node name. + /// + /// + /// Handle to a table. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a scalar. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + public static Tensor mutable_hash_table_v2 (TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, string name = "MutableHashTableV2") + { + var dict = new Dictionary(); + dict["key_dtype"] = key_dtype; + dict["value_dtype"] = value_dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (use_node_name_sharing.HasValue) + dict["use_node_name_sharing"] = use_node_name_sharing.Value; + var op = _op_def_lib._apply_op_helper("MutableHashTableV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Locks a mutex resource. The output is the lock. So long as the lock tensor + /// + /// + /// The mutex resource to lock. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MutexLock'. + /// + /// + /// A tensor that keeps a shared pointer to a lock on the mutex; + /// when the Tensor is destroyed, the use count on the shared pointer is decreased + /// by 1. When it reaches 0, the lock is released. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// is alive, any other request to use MutexLock with this mutex will wait. + /// + /// This is particularly useful for creating a critical section when used in + /// conjunction with MutexLockIdentity: + /// + /// + /// + /// mutex = mutex_v2( + /// shared_name=handle_name, container=container, name=name) + /// + /// def execute_in_critical_section(fn, *args, **kwargs): + /// lock = gen_resource_variable_ops.mutex_lock(mutex) + /// + /// with ops.control_dependencies([lock]): + /// r = fn(*args, **kwargs) + /// + /// with ops.control_dependencies(nest.flatten(r)): + /// with ops.colocate_with(mutex): + /// ensure_lock_exists = mutex_lock_identity(lock) + /// + /// # Make sure that if any element of r is accessed, all of + /// # them are executed together. + /// r = nest.map_structure(tf.identity, r) + /// + /// with ops.control_dependencies([ensure_lock_exists]): + /// return nest.map_structure(tf.identity, r) + /// + /// + /// While fn is running in the critical section, no other functions which wish to + /// use this critical section may run. + /// + /// Often the use case is that two executions of the same graph, in parallel, + /// wish to run fn; and we wish to ensure that only one of them executes + /// at a time. This is especially important if fn modifies one or more + /// variables at a time. + /// + /// It is also useful if two separate functions must share a resource, but we + /// wish to ensure the usage is exclusive. + /// + public static Tensor mutex_lock (Tensor mutex, string name = "MutexLock") + { + var dict = new Dictionary(); + dict["mutex"] = mutex; + var op = _op_def_lib._apply_op_helper("MutexLock", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a Mutex resource that can be locked by MutexLock. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'MutexV2'. + /// + /// + /// If non-empty, this variable is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this variable is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The mutex resource. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor mutex_v2 (string container = null, string shared_name = null, string name = "MutexV2") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("MutexV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes numerical negative value element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Neg'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = -x\\). + /// + public static Tensor neg (Tensor x, string name = "Neg") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Neg", name: name, keywords: dict); + return op.output; + } + + /// + /// Training via negative sampling. + /// + /// + /// input word embedding. + /// + /// + /// output word embedding. + /// + /// + /// A vector of word ids. + /// + /// + /// A vector of word ids. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NegTrain'. + /// + /// + /// Optional argument + /// Count of words in the vocabulary. + /// + /// + /// Optional argument + /// Number of negative samples per example. + /// + /// + /// Returns the description of the operation + /// + public static Operation neg_train (Tensor w_in, Tensor w_out, Tensor examples, Tensor labels, Tensor lr, int[] vocab_count, int num_negative_samples, string name = "NegTrain") + { + var dict = new Dictionary(); + dict["w_in"] = w_in; + dict["w_out"] = w_out; + dict["examples"] = examples; + dict["labels"] = labels; + dict["lr"] = lr; + dict["vocab_count"] = vocab_count; + dict["num_negative_samples"] = num_negative_samples; + var op = _op_def_lib._apply_op_helper("NegTrain", name: name, keywords: dict); + return op; + } + + /// + /// Makes its input available to the next iteration. + /// + /// + /// The tensor to be made available to the next iteration. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NextIteration'. + /// + /// + /// The same tensor as data. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor next_iteration (Tensor data, string name = "NextIteration") + { + var dict = new Dictionary(); + dict["data"] = data; + var op = _op_def_lib._apply_op_helper("NextIteration", name: name, keywords: dict); + return op.output; + } + + /// + /// Does nothing. Only useful as a placeholder for control edges. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NoOp'. + /// + /// + /// Returns the description of the operation + /// + public static Operation no_op (string name = "NoOp") + { + var dict = new Dictionary(); + var op = _op_def_lib._apply_op_helper("NoOp", name: name, keywords: dict); + return op; + } + + /// + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// + /// A 2-D float tensor of shape [num_boxes, 4]. + /// + /// + /// A 1-D float tensor of shape [num_boxes] representing a single + /// score corresponding to each box (each row of boxes). + /// + /// + /// A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NonMaxSuppression'. + /// + /// + /// A float representing the threshold for deciding whether boxes + /// overlap too much with respect to IOU. + /// + /// + /// A 1-D integer tensor of shape [M] representing the selected + /// indices from the boxes tensor, where M <= max_output_size. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system. Note that this + /// algorithm is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the tf.gather operation. For example: + /// selected_indices = tf.image.non_max_suppression( + /// boxes, scores, max_output_size, iou_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + public static Tensor non_max_suppression (Tensor boxes, Tensor scores, Tensor max_output_size, float? iou_threshold = null, string name = "NonMaxSuppression") + { + var dict = new Dictionary(); + dict["boxes"] = boxes; + dict["scores"] = scores; + dict["max_output_size"] = max_output_size; + if (iou_threshold.HasValue) + dict["iou_threshold"] = iou_threshold.Value; + var op = _op_def_lib._apply_op_helper("NonMaxSuppression", name: name, keywords: dict); + return op.output; + } + + /// + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// + /// A 2-D float tensor of shape [num_boxes, 4]. + /// + /// + /// A 1-D float tensor of shape [num_boxes] representing a single + /// score corresponding to each box (each row of boxes). + /// + /// + /// A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// + /// + /// A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too much with respect to IOU. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NonMaxSuppressionV2'. + /// + /// + /// A 1-D integer tensor of shape [M] representing the selected + /// indices from the boxes tensor, where M <= max_output_size. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system. Note that this + /// algorithm is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the tf.gather operation. For example: + /// + /// selected_indices = tf.image.non_max_suppression_v2( + /// boxes, scores, max_output_size, iou_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + public static Tensor non_max_suppression_v2 (Tensor boxes, Tensor scores, Tensor max_output_size, Tensor iou_threshold, string name = "NonMaxSuppressionV2") + { + var dict = new Dictionary(); + dict["boxes"] = boxes; + dict["scores"] = scores; + dict["max_output_size"] = max_output_size; + dict["iou_threshold"] = iou_threshold; + var op = _op_def_lib._apply_op_helper("NonMaxSuppressionV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// + /// A 2-D float tensor of shape [num_boxes, 4]. + /// + /// + /// A 1-D float tensor of shape [num_boxes] representing a single + /// score corresponding to each box (each row of boxes). + /// + /// + /// A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// + /// + /// A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too much with respect to IOU. + /// + /// + /// A 0-D float tensor representing the threshold for deciding when to remove + /// boxes based on score. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NonMaxSuppressionV3'. + /// + /// + /// A 1-D integer tensor of shape [M] representing the selected + /// indices from the boxes tensor, where M <= max_output_size. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes with score less than + /// score_threshold are removed. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system and more + /// generally is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the tf.gather operation. For example: + /// selected_indices = tf.image.non_max_suppression_v2( + /// boxes, scores, max_output_size, iou_threshold, score_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + public static Tensor non_max_suppression_v3 (Tensor boxes, Tensor scores, Tensor max_output_size, Tensor iou_threshold, Tensor score_threshold, string name = "NonMaxSuppressionV3") + { + var dict = new Dictionary(); + dict["boxes"] = boxes; + dict["scores"] = scores; + dict["max_output_size"] = max_output_size; + dict["iou_threshold"] = iou_threshold; + dict["score_threshold"] = score_threshold; + var op = _op_def_lib._apply_op_helper("NonMaxSuppressionV3", name: name, keywords: dict); + return op.output; + } + + /// + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// + /// A 2-D float tensor of shape [num_boxes, 4]. + /// + /// + /// A 1-D float tensor of shape [num_boxes] representing a single + /// score corresponding to each box (each row of boxes). + /// + /// + /// A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// + /// + /// A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too much with respect to IOU. + /// + /// + /// A 0-D float tensor representing the threshold for deciding when to remove + /// boxes based on score. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NonMaxSuppressionV4'. + /// + /// + /// If true, the output selected_indices is padded to be of length + /// max_output_size. Defaults to false. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// selected_indices : A 1-D integer tensor of shape [M] representing the selected + /// indices from the boxes tensor, where M <= max_output_size. + /// valid_outputs : A 0-D integer tensor representing the number of valid elements in + /// selected_indices, with the valid elements appearing first. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes with score less than + /// score_threshold are removed. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system and more + /// generally is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the tf.gather operation. For example: + /// selected_indices = tf.image.non_max_suppression_v2( + /// boxes, scores, max_output_size, iou_threshold, score_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + public static (Tensor selected_indices, Tensor valid_outputs) non_max_suppression_v4 (Tensor boxes, Tensor scores, Tensor max_output_size, Tensor iou_threshold, Tensor score_threshold, bool? pad_to_max_output_size = null, string name = "NonMaxSuppressionV4") + { + var dict = new Dictionary(); + dict["boxes"] = boxes; + dict["scores"] = scores; + dict["max_output_size"] = max_output_size; + dict["iou_threshold"] = iou_threshold; + dict["score_threshold"] = score_threshold; + if (pad_to_max_output_size.HasValue) + dict["pad_to_max_output_size"] = pad_to_max_output_size.Value; + var op = _op_def_lib._apply_op_helper("NonMaxSuppressionV4", name: name, keywords: dict); + int _idx = 0; + var selected_indices = op.outputs[_idx++]; + var valid_outputs = op.outputs[_idx++]; + return (selected_indices, valid_outputs); + } + + /// + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// + /// A 2-D float tensor of shape [num_boxes, num_boxes] representing + /// the n-by-n box overlap values. + /// + /// + /// A 1-D float tensor of shape [num_boxes] representing a single + /// score corresponding to each box (each row of boxes). + /// + /// + /// A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// + /// + /// A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too. + /// + /// + /// A 0-D float tensor representing the threshold for deciding when to remove + /// boxes based on score. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NonMaxSuppressionWithOverlaps'. + /// + /// + /// A 1-D integer tensor of shape [M] representing the selected + /// indices from the boxes tensor, where M <= max_output_size. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// pruning away boxes that have high overlaps + /// with previously selected boxes. Bounding boxes with score less than + /// score_threshold are removed. N-by-n overlap values are supplied as square matrix, + /// which allows for defining a custom overlap criterium (eg. intersection over union, + /// intersection over area, etc.). + /// + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the tf.gather operation. For example: + /// + /// selected_indices = tf.image.non_max_suppression_with_overlaps( + /// overlaps, scores, max_output_size, overlap_threshold, score_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + public static Tensor non_max_suppression_with_overlaps (Tensor overlaps, Tensor scores, Tensor max_output_size, Tensor overlap_threshold, Tensor score_threshold, string name = "NonMaxSuppressionWithOverlaps") + { + var dict = new Dictionary(); + dict["overlaps"] = overlaps; + dict["scores"] = scores; + dict["max_output_size"] = max_output_size; + dict["overlap_threshold"] = overlap_threshold; + dict["score_threshold"] = score_threshold; + var op = _op_def_lib._apply_op_helper("NonMaxSuppressionWithOverlaps", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the truth value of (x != y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NotEqual'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: NotEqual supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor not_equal (Tensor x, Tensor y, string name = "NotEqual") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("NotEqual", name: name, keywords: dict); + return op.output; + } + + /// + /// Finds values of the n-th order statistic for the last dimension. + /// + /// + /// 1-D or higher with last dimension at least n+1. + /// + /// + /// 0-D. Position of sorted vector to select along the last dimension (along + /// each row for matrices). Valid range of n is [0, input.shape[:-1]) + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'NthElement'. + /// + /// + /// When set to True, find the nth-largest value in the vector and vice + /// versa. + /// + /// + /// The n-th order statistic along each last dimensional slice. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// If the input is a vector (rank-1), finds the entries which is the nth-smallest + /// value in the vector and outputs their values as scalar tensor. + /// + /// For matrices (resp. higher rank input), computes the entries which is the + /// nth-smallest value in each row (resp. vector along the last dimension). Thus, + /// + /// values.shape = input.shape[:-1] + /// + public static Tensor nth_element (Tensor input, Tensor n, bool? reverse = null, string name = "NthElement") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["n"] = n; + if (reverse.HasValue) + dict["reverse"] = reverse.Value; + var op = _op_def_lib._apply_op_helper("NthElement", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a one-hot tensor. + /// + /// + /// A tensor of indices. + /// + /// + /// A scalar defining the depth of the one hot dimension. + /// + /// + /// A scalar defining the value to fill in output when indices[j] = i. + /// + /// + /// A scalar defining the value to fill in output when indices[j] != i. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OneHot'. + /// + /// + /// The axis to fill (default: -1, a new inner-most axis). + /// + /// + /// The one-hot tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The locations represented by indices in indices take value on_value, + /// while all other locations take value off_value. + /// + /// If the input indices is rank N, the output will have rank N+1, + /// The new axis is created at dimension axis (default: the new axis is + /// appended at the end). + /// + /// If indices is a scalar the output shape will be a vector of length depth. + /// + /// If indices is a vector of length features, the output shape will be: + /// + /// features x depth if axis == -1 + /// depth x features if axis == 0 + /// + /// + /// If indices is a matrix (batch) with shape [batch, features], + /// the output shape will be: + /// + /// batch x features x depth if axis == -1 + /// batch x depth x features if axis == 1 + /// depth x batch x features if axis == 0 + /// + /// + /// + /// Examples + /// ========= + /// + /// Suppose that + /// + /// + /// indices = [0, 2, -1, 1] + /// depth = 3 + /// on_value = 5.0 + /// off_value = 0.0 + /// axis = -1 + /// + /// + /// Then output is [4 x 3]: + /// + /// + /// output = + /// [5.0 0.0 0.0] // one_hot(0) + /// [0.0 0.0 5.0] // one_hot(2) + /// [0.0 0.0 0.0] // one_hot(-1) + /// [0.0 5.0 0.0] // one_hot(1) + /// + /// + /// Suppose that + /// + /// + /// indices = [0, 2, -1, 1] + /// depth = 3 + /// on_value = 0.0 + /// off_value = 3.0 + /// axis = 0 + /// + /// + /// Then output is [3 x 4]: + /// + /// + /// output = + /// [0.0 3.0 3.0 3.0] + /// [3.0 3.0 3.0 0.0] + /// [3.0 3.0 3.0 3.0] + /// [3.0 0.0 3.0 3.0] + /// // ^ one_hot(0) + /// // ^ one_hot(2) + /// // ^ one_hot(-1) + /// // ^ one_hot(1) + /// + /// Suppose that + /// + /// + /// indices = [[0, 2], [1, -1]] + /// depth = 3 + /// on_value = 1.0 + /// off_value = 0.0 + /// axis = -1 + /// + /// + /// Then output is [2 x 2 x 3]: + /// + /// + /// output = + /// [ + /// [1.0, 0.0, 0.0] // one_hot(0) + /// [0.0, 0.0, 1.0] // one_hot(2) + /// ][ + /// [0.0, 1.0, 0.0] // one_hot(1) + /// [0.0, 0.0, 0.0] // one_hot(-1) + /// ] + /// + public static Tensor one_hot (Tensor indices, Tensor depth, Tensor on_value, Tensor off_value, int? axis = null, string name = "OneHot") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["depth"] = depth; + dict["on_value"] = on_value; + dict["off_value"] = off_value; + if (axis.HasValue) + dict["axis"] = axis.Value; + var op = _op_def_lib._apply_op_helper("OneHot", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a tensor of ones with the same shape and type as x. + /// + /// + /// a tensor of type T. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OnesLike'. + /// + /// + /// a tensor of the same shape and type as x but filled with ones. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor ones_like (Tensor x, string name = "OnesLike") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("OnesLike", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset by applying optimizations to input_dataset. + /// + /// + /// A variant tensor representing the input dataset. + /// + /// + /// A tf.string vector tf.Tensor identifying optimizations to use. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OptimizeDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Creates a dataset by applying optimizations to input_dataset. + /// + public static Tensor optimize_dataset (Tensor input_dataset, Tensor optimizations, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "OptimizeDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["optimizations"] = optimizations; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("OptimizeDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Constructs an Optional variant from a tuple of tensors. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OptionalFromValue'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor optional_from_value (Tensor[] components, string name = "OptionalFromValue") + { + var dict = new Dictionary(); + dict["components"] = components; + var op = _op_def_lib._apply_op_helper("OptionalFromValue", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the value stored in an Optional variant or raises an error if none exists. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OptionalGetValue'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor[] optional_get_value (Tensor optional, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "OptionalGetValue") + { + var dict = new Dictionary(); + dict["optional"] = optional; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("OptionalGetValue", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Returns true if and only if the given Optional variant has a value. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OptionalHasValue'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor optional_has_value (Tensor optional, string name = "OptionalHasValue") + { + var dict = new Dictionary(); + dict["optional"] = optional; + var op = _op_def_lib._apply_op_helper("OptionalHasValue", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates an Optional variant with no value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OptionalNone'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor optional_none (string name = "OptionalNone") + { + var dict = new Dictionary(); + var op = _op_def_lib._apply_op_helper("OptionalNone", name: name, keywords: dict); + return op.output; + } + + /// + /// Op removes all elements in the underlying container. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OrderedMapClear'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns the description of the operation + /// + public static Operation ordered_map_clear (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "OrderedMapClear") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("OrderedMapClear", name: name, keywords: dict); + return op; + } + + /// + /// Op returns the number of incomplete elements in the underlying container. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OrderedMapIncompleteSize'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor ordered_map_incomplete_size (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "OrderedMapIncompleteSize") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("OrderedMapIncompleteSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Op peeks at the values at the specified key. If the + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OrderedMapPeek'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// underlying container does not contain this key + /// this op will block until it does. This Op is optimized for + /// performance. + /// + public static Tensor[] ordered_map_peek (Tensor key, Tensor indices, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "OrderedMapPeek") + { + var dict = new Dictionary(); + dict["key"] = key; + dict["indices"] = indices; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("OrderedMapPeek", name: name, keywords: dict); + int _idx = 0; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (values); + } + + /// + /// Op returns the number of elements in the underlying container. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OrderedMapSize'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor ordered_map_size (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "OrderedMapSize") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("OrderedMapSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Stage (key, values) in the underlying container which behaves like a ordered + /// + /// + /// int64 + /// + /// + /// + /// + /// a list of tensors + /// dtypes A list of data types that inserted values should adhere to. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OrderedMapStage'. + /// + /// + /// Optional argument + /// + /// + /// Maximum number of elements in the Staging Area. If > 0, inserts + /// on the container will block when the capacity is reached. + /// + /// + /// + /// + /// If non-empty, this queue is placed in the given container. Otherwise, + /// a default container is used. + /// + /// + /// It is necessary to match this name to the matching Unstage Op. + /// + /// + /// Returns the description of the operation + /// + /// + /// associative container. Elements are ordered by key. + /// + public static Operation ordered_map_stage (Tensor key, Tensor indices, Tensor[] values, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "OrderedMapStage") + { + var dict = new Dictionary(); + dict["key"] = key; + dict["indices"] = indices; + dict["values"] = values; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("OrderedMapStage", name: name, keywords: dict); + return op; + } + + /// + /// Op removes and returns the values associated with the key + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OrderedMapUnstage'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// from the underlying container. If the underlying container + /// does not contain this key, the op will block until it does. + /// + public static Tensor[] ordered_map_unstage (Tensor key, Tensor indices, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "OrderedMapUnstage") + { + var dict = new Dictionary(); + dict["key"] = key; + dict["indices"] = indices; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("OrderedMapUnstage", name: name, keywords: dict); + int _idx = 0; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (values); + } + + /// + /// Op removes and returns the (key, value) element with the smallest + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OrderedMapUnstageNoKey'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// key : + /// values : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// key from the underlying container. If the underlying container + /// does not contain elements, the op will block until it does. + /// + public static (Tensor key, Tensor[] values) ordered_map_unstage_no_key (Tensor indices, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "OrderedMapUnstageNoKey") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("OrderedMapUnstageNoKey", name: name, keywords: dict); + int _idx = 0; + var key = op.outputs[_idx++]; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (key, values); + } + + /// + /// Retrieves a single tensor from the computation outfeed. This operation will + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OutfeedDequeue'. + /// + /// + /// Optional argument + /// The type of elements in the tensor. + /// + /// + /// Optional argument + /// The shape of the tensor. + /// + /// + /// The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + /// + /// + /// A tensor that will be read from the device outfeed. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// block indefinitely until data is available. + /// + public static Tensor outfeed_dequeue (TF_DataType dtype, TensorShape shape, int? device_ordinal = null, string name = "OutfeedDequeue") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + dict["shape"] = shape; + if (device_ordinal.HasValue) + dict["device_ordinal"] = device_ordinal.Value; + var op = _op_def_lib._apply_op_helper("OutfeedDequeue", name: name, keywords: dict); + return op.output; + } + + /// + /// Retrieve multiple values that will be emitted by the computation as an XLA + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OutfeedDequeueTuple'. + /// + /// + /// Optional argument + /// The element types of each element in outputs. + /// + /// + /// Optional argument + /// The shapes of each tensor in outputs. + /// + /// + /// The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + /// + /// + /// A list of tensors that will be read from the outfeed. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// tuple. This operations will block indefinitely until data is available. + /// Output i corresponds to XLA tuple element i. + /// + public static Tensor[] outfeed_dequeue_tuple (TF_DataType[] dtypes, TensorShape[] shapes, int? device_ordinal = null, string name = "OutfeedDequeueTuple") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + dict["shapes"] = shapes; + if (device_ordinal.HasValue) + dict["device_ordinal"] = device_ordinal.Value; + var op = _op_def_lib._apply_op_helper("OutfeedDequeueTuple", name: name, keywords: dict); + int _idx = 0; + var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray(); + return (outputs); + } + + /// + /// An op which emits a single Tensor value from an XLA computation. + /// + /// + /// A tensor that will be inserted into the outfeed queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OutfeedEnqueue'. + /// + /// + /// Returns the description of the operation + /// + public static Operation outfeed_enqueue (Tensor input, string name = "OutfeedEnqueue") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("OutfeedEnqueue", name: name, keywords: dict); + return op; + } + + /// + /// An op which emits multiple Tensor values from an XLA computation. + /// + /// + /// A list of tensors that will be inserted into the outfeed queue as an + /// XLA tuple. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'OutfeedEnqueueTuple'. + /// + /// + /// Returns the description of the operation + /// + public static Operation outfeed_enqueue_tuple (Tensor[] inputs, string name = "OutfeedEnqueueTuple") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + var op = _op_def_lib._apply_op_helper("OutfeedEnqueueTuple", name: name, keywords: dict); + return op; + } + + /// + /// Packs a list of N rank-R tensors into one rank-(R+1) tensor. + /// + /// + /// Must be of same shape and type. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Pack'. + /// + /// + /// Dimension along which to pack. Negative values wrap around, so the + /// valid range is [-(R+1), R+1). + /// + /// + /// The packed tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Packs the N tensors in values into a tensor with rank one higher than each + /// tensor in values, by packing them along the axis dimension. + /// Given a list of tensors of shape (A, B, C); + /// + /// if axis == 0 then the output tensor will have the shape (N, A, B, C). + /// if axis == 1 then the output tensor will have the shape (A, N, B, C). + /// Etc. + /// + /// For example: + /// + /// + /// # 'x' is [1, 4] + /// # 'y' is [2, 5] + /// # 'z' is [3, 6] + /// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + /// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + /// + /// + /// This is the opposite of unpack. + /// + public static Tensor pack (Tensor[] values, int? axis = null, string name = "Pack") + { + var dict = new Dictionary(); + dict["values"] = values; + if (axis.HasValue) + dict["axis"] = axis.Value; + var op = _op_def_lib._apply_op_helper("Pack", name: name, keywords: dict); + return op.output; + } + + /// + /// Pads a tensor with zeros. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Pad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation pads a input with zeros according to the paddings you + /// specify. paddings is an integer tensor with shape [Dn, 2], where n is the + /// rank of input. For each dimension D of input, paddings[D, 0] indicates + /// how many zeros to add before the contents of input in that dimension, and + /// paddings[D, 1] indicates how many zeros to add after the contents of input + /// in that dimension. + /// + /// The padded size of each dimension D of the output is: + /// + /// paddings(D, 0) + input.dim_size(D) + paddings(D, 1) + /// + /// For example: + /// + /// + /// # 't' is [[1, 1], [2, 2]] + /// # 'paddings' is [[1, 1], [2, 2]] + /// # rank of 't' is 2 + /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + /// [0, 0, 1, 1, 0, 0] + /// [0, 0, 2, 2, 0, 0] + /// [0, 0, 0, 0, 0, 0]] + /// + /// + /// + public static Tensor pad (Tensor input, Tensor paddings, string name = "Pad") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["paddings"] = paddings; + var op = _op_def_lib._apply_op_helper("Pad", name: name, keywords: dict); + return op.output; + } + + /// + /// Pads a tensor. + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PadV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation pads input according to the paddings and constant_values + /// you specify. paddings is an integer tensor with shape [Dn, 2], where n is + /// the rank of input. For each dimension D of input, paddings[D, 0] indicates + /// how many padding values to add before the contents of input in that dimension, + /// and paddings[D, 1] indicates how many padding values to add after the contents + /// of input in that dimension. constant_values is a scalar tensor of the same + /// type as input that indicates the value to use for padding input. + /// + /// The padded size of each dimension D of the output is: + /// + /// paddings(D, 0) + input.dim_size(D) + paddings(D, 1) + /// + /// For example: + /// + /// + /// # 't' is [[1, 1], [2, 2]] + /// # 'paddings' is [[1, 1], [2, 2]] + /// # 'constant_values' is 0 + /// # rank of 't' is 2 + /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + /// [0, 0, 1, 1, 0, 0] + /// [0, 0, 2, 2, 0, 0] + /// [0, 0, 0, 0, 0, 0]] + /// + /// + public static Tensor pad_v2 (Tensor input, Tensor paddings, Tensor constant_values, string name = "PadV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["paddings"] = paddings; + dict["constant_values"] = constant_values; + var op = _op_def_lib._apply_op_helper("PadV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that batches and pads batch_size elements from the input. + /// + /// + /// + /// + /// A scalar representing the number of elements to accumulate in a + /// batch. + /// + /// + /// A list of int64 tensors representing the desired padded shapes + /// of the corresponding output components. These shapes may be partially + /// specified, using -1 to indicate that a particular dimension should be + /// padded to the maximum size of all batch elements. + /// + /// + /// A list of scalars containing the padding value to use for + /// each of the outputs. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PaddedBatchDataset'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor padded_batch_dataset (Tensor input_dataset, Tensor batch_size, Tensor[] padded_shapes, Tensor[] padding_values, TensorShape[] output_shapes, string name = "PaddedBatchDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["batch_size"] = batch_size; + dict["padded_shapes"] = padded_shapes; + dict["padding_values"] = padding_values; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("PaddedBatchDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that batches and pads batch_size elements from the input. + /// + /// + /// + /// + /// A scalar representing the number of elements to accumulate in a + /// batch. + /// + /// + /// A list of int64 tensors representing the desired padded shapes + /// of the corresponding output components. These shapes may be partially + /// specified, using -1 to indicate that a particular dimension should be + /// padded to the maximum size of all batch elements. + /// + /// + /// A list of scalars containing the padding value to use for + /// each of the outputs. + /// + /// + /// A scalar representing whether the last batch should be dropped in case its size + /// is smaller than desired. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PaddedBatchDatasetV2'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor padded_batch_dataset_v2 (Tensor input_dataset, Tensor batch_size, Tensor[] padded_shapes, Tensor[] padding_values, Tensor drop_remainder, TensorShape[] output_shapes, string name = "PaddedBatchDatasetV2") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["batch_size"] = batch_size; + dict["padded_shapes"] = padded_shapes; + dict["padding_values"] = padding_values; + dict["drop_remainder"] = drop_remainder; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("PaddedBatchDatasetV2", name: name, keywords: dict); + return op.output; + } + + /// + /// A queue that produces elements in first-in first-out order. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PaddingFIFOQueue'. + /// + /// + /// Optional argument + /// The type of each component in a value. + /// + /// + /// The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. + /// Shapes of fixed rank but variable size are allowed by setting + /// any shape dimension to -1. In this case, the inputs' shape may vary along + /// the given dimension, and DequeueMany will pad the given dimension with + /// zeros up to the maximum shape of all elements in the given batch. + /// If the length of this attr is 0, different queue elements may have + /// different ranks and shapes, but only one element may be dequeued at a time. + /// + /// + /// The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// + /// + /// If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Variable-size shapes are allowed by setting the corresponding shape dimensions + /// to 0 in the shape attr. In this case DequeueMany will pad up to the maximum + /// size of any given element in the minibatch. See below for details. + /// + public static Tensor padding_f_i_f_o_queue (TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "PaddingFIFOQueue") + { + var dict = new Dictionary(); + dict["component_types"] = component_types; + if (shapes != null) + dict["shapes"] = shapes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("PaddingFIFOQueue", name: name, keywords: dict); + return op.output; + } + + /// + /// A queue that produces elements in first-in first-out order. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PaddingFIFOQueueV2'. + /// + /// + /// Optional argument + /// The type of each component in a value. + /// + /// + /// The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. + /// Shapes of fixed rank but variable size are allowed by setting + /// any shape dimension to -1. In this case, the inputs' shape may vary along + /// the given dimension, and DequeueMany will pad the given dimension with + /// zeros up to the maximum shape of all elements in the given batch. + /// If the length of this attr is 0, different queue elements may have + /// different ranks and shapes, but only one element may be dequeued at a time. + /// + /// + /// The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// + /// + /// If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Variable-size shapes are allowed by setting the corresponding shape dimensions + /// to 0 in the shape attr. In this case DequeueMany will pad up to the maximum + /// size of any given element in the minibatch. See below for details. + /// + public static Tensor padding_f_i_f_o_queue_v2 (TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "PaddingFIFOQueueV2") + { + var dict = new Dictionary(); + dict["component_types"] = component_types; + if (shapes != null) + dict["shapes"] = shapes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("PaddingFIFOQueueV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Concatenates a list of N tensors along the first dimension. + /// + /// + /// Tensors to be concatenated. All must have size 1 in the first dimension + /// and same shape. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParallelConcat'. + /// + /// + /// Optional argument + /// the final shape of the result; should be equal to the shapes of any input + /// but with the number of input values in the first dimension. + /// + /// + /// The concatenated tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The input tensors are all required to have size 1 in the first dimension. + /// + /// For example: + /// + /// + /// # 'x' is [[1, 4]] + /// # 'y' is [[2, 5]] + /// # 'z' is [[3, 6]] + /// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + /// + /// + /// The difference between concat and parallel_concat is that concat requires all + /// of the inputs be computed before the operation will begin but doesn't require + /// that the input shapes be known during graph construction. Parallel concat + /// will copy pieces of the input into the output as they become available, in + /// some situations this can provide a performance benefit. + /// + public static Tensor parallel_concat (Tensor[] values, TensorShape shape, string name = "ParallelConcat") + { + var dict = new Dictionary(); + dict["values"] = values; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("ParallelConcat", name: name, keywords: dict); + return op.output; + } + + /// + /// Interleave the values from the data tensors into a single tensor. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParallelDynamicStitch'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Builds a merged tensor such that + /// + /// + /// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + /// + /// + /// For example, if each indices[m] is scalar or vector, we have + /// + /// + /// # Scalar indices: + /// merged[indices[m], ...] = data[m][...] + /// + /// # Vector indices: + /// merged[indices[m][i], ...] = data[m][i, ...] + /// + /// + /// Each data[i].shape must start with the corresponding indices[i].shape, + /// and the rest of data[i].shape must be constant w.r.t. i. That is, we + /// must have data[i].shape = indices[i].shape + constant. In terms of this + /// constant, the output shape is + /// + /// merged.shape = [max(indices)] + constant + /// + /// Values may be merged in parallel, so if an index appears in both indices[m][i] + /// and indices[n][j], the result may be invalid. This differs from the normal + /// DynamicStitch operator that defines the behavior in that case. + /// + /// For example: + /// + /// + /// indices[0] = 6 + /// indices[1] = [4, 1] + /// indices[2] = [[5, 2], [0, 3]] + /// data[0] = [61, 62] + /// data[1] = [[41, 42], [11, 12]] + /// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + /// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + /// [51, 52], [61, 62]] + /// + /// + /// This method can be used to merge partitions created by dynamic_partition + /// as illustrated on the following example: + /// + /// + /// # Apply function (increments x_i) on elements for which a certain condition + /// # apply (x_i != -1 in this example). + /// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + /// condition_mask=tf.not_equal(x,tf.constant(-1.)) + /// partitioned_data = tf.dynamic_partition( + /// x, tf.cast(condition_mask, tf.int32) , 2) + /// partitioned_data[1] = partitioned_data[1] + 1.0 + /// condition_indices = tf.dynamic_partition( + /// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + /// x = tf.dynamic_stitch(condition_indices, partitioned_data) + /// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + /// # unchanged. + /// + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt> + /// </div> + /// + public static Tensor parallel_dynamic_stitch (Tensor[] indices, Tensor[] data, string name = "ParallelDynamicStitch") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["data"] = data; + var op = _op_def_lib._apply_op_helper("ParallelDynamicStitch", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs random values from a normal distribution. The parameters may each be a + /// + /// + /// The shape of the output tensor. Batches are indexed by the 0th dimension. + /// + /// + /// The mean parameter of each batch. + /// + /// + /// The standard deviation parameter of each batch. Must be greater than 0. + /// + /// + /// The minimum cutoff. May be -infinity. + /// + /// + /// The maximum cutoff. May be +infinity, and must be more than the minval + /// for each batch. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParameterizedTruncatedNormal'. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// A matrix of shape num_batches x samples_per_batch, filled with random + /// truncated normal values using the parameters for each row. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// scalar which applies to the entire output, or a vector of length shape[0] which + /// stores the parameters for each batch. + /// + public static Tensor parameterized_truncated_normal (Tensor shape, Tensor means, Tensor stdevs, Tensor minvals, Tensor maxvals, int? seed = null, int? seed2 = null, string name = "ParameterizedTruncatedNormal") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["means"] = means; + dict["stdevs"] = stdevs; + dict["minvals"] = minvals; + dict["maxvals"] = maxvals; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("ParameterizedTruncatedNormal", name: name, keywords: dict); + return op.output; + } + + /// + /// Transforms a vector of brain.Example protos (as strings) into typed tensors. + /// + /// + /// A vector containing a batch of binary serialized Example protos. + /// + /// + /// A vector containing the names of the serialized protos. + /// May contain, for example, table key (descriptive) names for the + /// corresponding serialized protos. These are purely useful for debugging + /// purposes, and the presence of values here has no effect on the output. + /// May also be an empty vector if no names are available. + /// If non-empty, this vector must be the same length as "serialized". + /// + /// + /// A list of Nsparse string Tensors (scalars). + /// The keys expected in the Examples' features associated with sparse values. + /// + /// + /// A list of Ndense string Tensors (scalars). + /// The keys expected in the Examples' features associated with dense values. + /// + /// + /// A list of Ndense Tensors (some may be empty). + /// dense_defaults[j] provides default values + /// when the example's feature_map lacks dense_key[j]. If an empty Tensor is + /// provided for dense_defaults[j], then the Feature dense_keys[j] is required. + /// The input type is inferred from dense_defaults[j], even when it's empty. + /// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + /// then the shape of dense_defaults[j] must match that of dense_shapes[j]. + /// If dense_shapes[j] has an undefined major dimension (variable strides dense + /// feature), dense_defaults[j] must contain a single element: + /// the padding element. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParseExample'. + /// + /// + /// Optional argument + /// A list of Nsparse types; the data types of data in each Feature + /// given in sparse_keys. + /// Currently the ParseExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// + /// + /// Optional argument + /// A list of Ndense shapes; the shapes of data in each Feature + /// given in dense_keys. + /// The number of elements in the Feature corresponding to dense_key[j] + /// must always equal dense_shapes[j].NumEntries(). + /// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output + /// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): + /// The dense outputs are just the inputs row-stacked by batch. + /// This works for dense_shapes[j] = (-1, D1, ..., DN). In this case + /// the shape of the output Tensor dense_values[j] will be + /// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks + /// of elements of length D1 * .... * DN, across all minibatch entries + /// in the input. Any minibatch entry with less than M blocks of elements of + /// length D1 * ... * DN will be padded with the corresponding default_value + /// scalar element along the second dimension. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sparse_indices : + /// sparse_values : + /// sparse_shapes : + /// dense_values : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor[] sparse_indices, Tensor[] sparse_values, Tensor[] sparse_shapes, Tensor[] dense_values) parse_example (Tensor serialized, Tensor names, Tensor[] sparse_keys, Tensor[] dense_keys, Tensor[] dense_defaults, TF_DataType[] sparse_types, TensorShape[] dense_shapes, string name = "ParseExample") + { + var dict = new Dictionary(); + dict["serialized"] = serialized; + dict["names"] = names; + dict["sparse_keys"] = sparse_keys; + dict["dense_keys"] = dense_keys; + dict["dense_defaults"] = dense_defaults; + dict["sparse_types"] = sparse_types; + dict["dense_shapes"] = dense_shapes; + var op = _op_def_lib._apply_op_helper("ParseExample", name: name, keywords: dict); + int _idx = 0; + var sparse_indices = Enumerable.Range(0, op.OutputListLength("sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray(); + var sparse_values = Enumerable.Range(0, op.OutputListLength("sparse_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var sparse_shapes = Enumerable.Range(0, op.OutputListLength("sparse_shapes")).Select(_ => op.outputs[_idx++]).ToArray(); + var dense_values = Enumerable.Range(0, op.OutputListLength("dense_values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (sparse_indices, sparse_values, sparse_shapes, dense_values); + } + + /// + /// Transforms input_dataset containing Example protos as vectors of DT_STRING into a dataset of Tensor or SparseTensor objects representing the parsed features. + /// + /// + /// + /// + /// + /// + /// A dict mapping string keys to Tensors. + /// The keys of the dict must match the dense_keys of the feature. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParseExampleDataset'. + /// + /// + /// Optional argument + /// A list of string keys in the examples features. + /// The results for these keys will be returned as SparseTensor objects. + /// + /// + /// Optional argument + /// A list of Ndense string Tensors (scalars). + /// The keys expected in the Examples features associated with dense values. + /// + /// + /// Optional argument + /// A list of DTypes of the same length as sparse_keys. + /// Only tf.float32 (FloatList), tf.int64 (Int64List), + /// and tf.string (BytesList) are supported. + /// + /// + /// Optional argument + /// List of tuples with the same length as dense_keys. + /// The shape of the data for each dense feature referenced by dense_keys. + /// Required for any input tensors identified by dense_keys. Must be + /// either fully defined, or may contain an unknown first dimension. + /// An unknown first dimension means the feature is treated as having + /// a variable number of blocks, and the output shape along this dimension + /// is considered unknown at graph build time. Padding is applied for + /// minibatch elements smaller than the maximum number of blocks for the + /// given feature along this dimension. + /// + /// + /// Optional argument + /// The type list for the return values. + /// + /// + /// Optional argument + /// The list of shapes being produced. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor parse_example_dataset (Tensor input_dataset, Tensor num_parallel_calls, Tensor[] dense_defaults, string[] sparse_keys, string[] dense_keys, TF_DataType[] sparse_types, TensorShape[] dense_shapes, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "ParseExampleDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["num_parallel_calls"] = num_parallel_calls; + dict["dense_defaults"] = dense_defaults; + dict["sparse_keys"] = sparse_keys; + dict["dense_keys"] = dense_keys; + dict["sparse_types"] = sparse_types; + dict["dense_shapes"] = dense_shapes; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("ParseExampleDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors. + /// + /// + /// A vector containing binary serialized SequenceExample protos. + /// + /// + /// A vector containing the names of the serialized protos. + /// May contain, for example, table key (descriptive) name for the + /// corresponding serialized proto. This is purely useful for debugging + /// purposes, and the presence of values here has no effect on the output. + /// May also be an empty vector if no name is available. + /// + /// + /// A list of Ncontext_dense Tensors (some may be empty). + /// context_dense_defaults[j] provides default values + /// when the SequenceExample's context map lacks context_dense_key[j]. + /// If an empty Tensor is provided for context_dense_defaults[j], + /// then the Feature context_dense_keys[j] is required. + /// The input type is inferred from context_dense_defaults[j], even when it's + /// empty. If context_dense_defaults[j] is not empty, its shape must match + /// context_dense_shapes[j]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParseSequenceExample'. + /// + /// + /// Optional argument + /// A vector listing the + /// FeatureList keys which may be missing from the SequenceExamples. If the + /// associated FeatureList is missing, it is treated as empty. By default, + /// any FeatureList not listed in this vector must exist in the SequenceExamples. + /// + /// + /// Optional argument + /// A list of Ncontext_sparse string Tensors (scalars). + /// The keys expected in the Examples' features associated with context_sparse + /// values. + /// + /// + /// Optional argument + /// A list of Ncontext_dense string Tensors (scalars). + /// The keys expected in the SequenceExamples' context features associated with + /// dense values. + /// + /// + /// Optional argument + /// A list of Nfeature_list_sparse string Tensors + /// (scalars). The keys expected in the FeatureLists associated with sparse + /// values. + /// + /// + /// Optional argument + /// A list of Nfeature_list_dense string Tensors (scalars). + /// The keys expected in the SequenceExamples' feature_lists associated + /// with lists of dense values. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A list of Ncontext_sparse types; the data types of data in + /// each context Feature given in context_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// + /// + /// + /// + /// A list of Ncontext_dense shapes; the shapes of data in + /// each context Feature given in context_dense_keys. + /// The number of elements in the Feature corresponding to context_dense_key[j] + /// must always equal context_dense_shapes[j].NumEntries(). + /// The shape of context_dense_values[j] will match context_dense_shapes[j]. + /// + /// + /// A list of Nfeature_list_sparse types; the data types + /// of data in each FeatureList given in feature_list_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// + /// + /// A list of Nfeature_list_dense shapes; the shapes of + /// data in each FeatureList given in feature_list_dense_keys. + /// The shape of each Feature in the FeatureList corresponding to + /// feature_list_dense_key[j] must always equal + /// feature_list_dense_shapes[j].NumEntries(). + /// + /// + /// Returns a tuple with multiple values, as follows: + /// context_sparse_indices : + /// context_sparse_values : + /// context_sparse_shapes : + /// context_dense_values : + /// feature_list_sparse_indices : + /// feature_list_sparse_values : + /// feature_list_sparse_shapes : + /// feature_list_dense_values : + /// feature_list_dense_lengths : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor[] context_sparse_indices, Tensor[] context_sparse_values, Tensor[] context_sparse_shapes, Tensor[] context_dense_values, Tensor[] feature_list_sparse_indices, Tensor[] feature_list_sparse_values, Tensor[] feature_list_sparse_shapes, Tensor[] feature_list_dense_values, Tensor[] feature_list_dense_lengths) parse_sequence_example (Tensor serialized, Tensor debug_name, Tensor[] context_dense_defaults, string[] feature_list_dense_missing_assumed_empty, string[] context_sparse_keys, string[] context_dense_keys, string[] feature_list_sparse_keys, string[] feature_list_dense_keys, int? Ncontext_sparse = null, int? Ncontext_dense = null, int? Nfeature_list_sparse = null, int? Nfeature_list_dense = null, TF_DataType[] context_sparse_types = null, TF_DataType[] feature_list_dense_types = null, TensorShape[] context_dense_shapes = null, TF_DataType[] feature_list_sparse_types = null, TensorShape[] feature_list_dense_shapes = null, string name = "ParseSequenceExample") + { + var dict = new Dictionary(); + dict["serialized"] = serialized; + dict["debug_name"] = debug_name; + dict["context_dense_defaults"] = context_dense_defaults; + dict["feature_list_dense_missing_assumed_empty"] = feature_list_dense_missing_assumed_empty; + dict["context_sparse_keys"] = context_sparse_keys; + dict["context_dense_keys"] = context_dense_keys; + dict["feature_list_sparse_keys"] = feature_list_sparse_keys; + dict["feature_list_dense_keys"] = feature_list_dense_keys; + if (Ncontext_sparse.HasValue) + dict["Ncontext_sparse"] = Ncontext_sparse.Value; + if (Ncontext_dense.HasValue) + dict["Ncontext_dense"] = Ncontext_dense.Value; + if (Nfeature_list_sparse.HasValue) + dict["Nfeature_list_sparse"] = Nfeature_list_sparse.Value; + if (Nfeature_list_dense.HasValue) + dict["Nfeature_list_dense"] = Nfeature_list_dense.Value; + if (context_sparse_types != null) + dict["context_sparse_types"] = context_sparse_types; + if (feature_list_dense_types != null) + dict["feature_list_dense_types"] = feature_list_dense_types; + if (context_dense_shapes != null) + dict["context_dense_shapes"] = context_dense_shapes; + if (feature_list_sparse_types != null) + dict["feature_list_sparse_types"] = feature_list_sparse_types; + if (feature_list_dense_shapes != null) + dict["feature_list_dense_shapes"] = feature_list_dense_shapes; + var op = _op_def_lib._apply_op_helper("ParseSequenceExample", name: name, keywords: dict); + int _idx = 0; + var context_sparse_indices = Enumerable.Range(0, op.OutputListLength("context_sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray(); + var context_sparse_values = Enumerable.Range(0, op.OutputListLength("context_sparse_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var context_sparse_shapes = Enumerable.Range(0, op.OutputListLength("context_sparse_shapes")).Select(_ => op.outputs[_idx++]).ToArray(); + var context_dense_values = Enumerable.Range(0, op.OutputListLength("context_dense_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_sparse_indices = Enumerable.Range(0, op.OutputListLength("feature_list_sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_sparse_values = Enumerable.Range(0, op.OutputListLength("feature_list_sparse_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_sparse_shapes = Enumerable.Range(0, op.OutputListLength("feature_list_sparse_shapes")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_dense_values = Enumerable.Range(0, op.OutputListLength("feature_list_dense_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_dense_lengths = Enumerable.Range(0, op.OutputListLength("feature_list_dense_lengths")).Select(_ => op.outputs[_idx++]).ToArray(); + return (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths); + } + + /// + /// Transforms a tf.Example proto (as a string) into typed tensors. + /// + /// + /// A vector containing a batch of binary serialized Example protos. + /// + /// + /// A list of Tensors (some may be empty), whose length matches + /// the length of dense_keys. dense_defaults[j] provides default values + /// when the example's feature_map lacks dense_key[j]. If an empty Tensor is + /// provided for dense_defaults[j], then the Feature dense_keys[j] is required. + /// The input type is inferred from dense_defaults[j], even when it's empty. + /// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + /// then the shape of dense_defaults[j] must match that of dense_shapes[j]. + /// If dense_shapes[j] has an undefined major dimension (variable strides dense + /// feature), dense_defaults[j] must contain a single element: + /// the padding element. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParseSingleExample'. + /// + /// + /// Optional argument + /// The number of sparse features to be parsed from the example. This + /// must match the lengths of sparse_keys and sparse_types. + /// + /// + /// Optional argument + /// A list of num_sparse strings. + /// The keys expected in the Examples' features associated with sparse values. + /// + /// + /// Optional argument + /// The keys expected in the Examples' features associated with dense + /// values. + /// + /// + /// Optional argument + /// A list of num_sparse types; the data types of data in each + /// Feature given in sparse_keys. + /// Currently the ParseSingleExample op supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// + /// + /// Optional argument + /// The shapes of data in each Feature given in dense_keys. + /// The length of this list must match the length of dense_keys. The + /// number of elements in the Feature corresponding to dense_key[j] must + /// always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == + /// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] + /// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, + /// ..., DN), the shape of the output Tensor dense_values[j] will be (M, + /// D1, .., DN), where M is the number of blocks of elements of length + /// D1 * .... * DN, in the input. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sparse_indices : + /// sparse_values : + /// sparse_shapes : + /// dense_values : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor[] sparse_indices, Tensor[] sparse_values, Tensor[] sparse_shapes, Tensor[] dense_values) parse_single_example (Tensor serialized, Tensor[] dense_defaults, int num_sparse, string[] sparse_keys, string[] dense_keys, TF_DataType[] sparse_types, TensorShape[] dense_shapes, string name = "ParseSingleExample") + { + var dict = new Dictionary(); + dict["serialized"] = serialized; + dict["dense_defaults"] = dense_defaults; + dict["num_sparse"] = num_sparse; + dict["sparse_keys"] = sparse_keys; + dict["dense_keys"] = dense_keys; + dict["sparse_types"] = sparse_types; + dict["dense_shapes"] = dense_shapes; + var op = _op_def_lib._apply_op_helper("ParseSingleExample", name: name, keywords: dict); + int _idx = 0; + var sparse_indices = Enumerable.Range(0, op.OutputListLength("sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray(); + var sparse_values = Enumerable.Range(0, op.OutputListLength("sparse_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var sparse_shapes = Enumerable.Range(0, op.OutputListLength("sparse_shapes")).Select(_ => op.outputs[_idx++]).ToArray(); + var dense_values = Enumerable.Range(0, op.OutputListLength("dense_values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (sparse_indices, sparse_values, sparse_shapes, dense_values); + } + + /// + /// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. + /// + /// + /// A scalar containing a binary serialized SequenceExample proto. + /// + /// + /// A vector listing the + /// FeatureList keys which may be missing from the SequenceExample. If the + /// associated FeatureList is missing, it is treated as empty. By default, + /// any FeatureList not listed in this vector must exist in the SequenceExample. + /// + /// + /// A list of Ncontext_sparse string Tensors (scalars). + /// The keys expected in the Examples' features associated with context_sparse + /// values. + /// + /// + /// A list of Ncontext_dense string Tensors (scalars). + /// The keys expected in the SequenceExamples' context features associated with + /// dense values. + /// + /// + /// A list of Nfeature_list_sparse string Tensors + /// (scalars). The keys expected in the FeatureLists associated with sparse + /// values. + /// + /// + /// A list of Nfeature_list_dense string Tensors (scalars). + /// The keys expected in the SequenceExamples' feature_lists associated + /// with lists of dense values. + /// + /// + /// A list of Ncontext_dense Tensors (some may be empty). + /// context_dense_defaults[j] provides default values + /// when the SequenceExample's context map lacks context_dense_key[j]. + /// If an empty Tensor is provided for context_dense_defaults[j], + /// then the Feature context_dense_keys[j] is required. + /// The input type is inferred from context_dense_defaults[j], even when it's + /// empty. If context_dense_defaults[j] is not empty, its shape must match + /// context_dense_shapes[j]. + /// + /// + /// A scalar containing the name of the serialized proto. + /// May contain, for example, table key (descriptive) name for the + /// corresponding serialized proto. This is purely useful for debugging + /// purposes, and the presence of values here has no effect on the output. + /// May also be an empty scalar if no name is available. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParseSingleSequenceExample'. + /// + /// + /// A list of Ncontext_sparse types; the data types of data in + /// each context Feature given in context_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// + /// + /// + /// + /// A list of Ncontext_dense shapes; the shapes of data in + /// each context Feature given in context_dense_keys. + /// The number of elements in the Feature corresponding to context_dense_key[j] + /// must always equal context_dense_shapes[j].NumEntries(). + /// The shape of context_dense_values[j] will match context_dense_shapes[j]. + /// + /// + /// A list of Nfeature_list_sparse types; the data types + /// of data in each FeatureList given in feature_list_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// + /// + /// A list of Nfeature_list_dense shapes; the shapes of + /// data in each FeatureList given in feature_list_dense_keys. + /// The shape of each Feature in the FeatureList corresponding to + /// feature_list_dense_key[j] must always equal + /// feature_list_dense_shapes[j].NumEntries(). + /// + /// + /// Returns a tuple with multiple values, as follows: + /// context_sparse_indices : + /// context_sparse_values : + /// context_sparse_shapes : + /// context_dense_values : + /// feature_list_sparse_indices : + /// feature_list_sparse_values : + /// feature_list_sparse_shapes : + /// feature_list_dense_values : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor[] context_sparse_indices, Tensor[] context_sparse_values, Tensor[] context_sparse_shapes, Tensor[] context_dense_values, Tensor[] feature_list_sparse_indices, Tensor[] feature_list_sparse_values, Tensor[] feature_list_sparse_shapes, Tensor[] feature_list_dense_values) parse_single_sequence_example (Tensor serialized, Tensor feature_list_dense_missing_assumed_empty, Tensor[] context_sparse_keys, Tensor[] context_dense_keys, Tensor[] feature_list_sparse_keys, Tensor[] feature_list_dense_keys, Tensor[] context_dense_defaults, Tensor debug_name, TF_DataType[] context_sparse_types = null, TF_DataType[] feature_list_dense_types = null, TensorShape[] context_dense_shapes = null, TF_DataType[] feature_list_sparse_types = null, TensorShape[] feature_list_dense_shapes = null, string name = "ParseSingleSequenceExample") + { + var dict = new Dictionary(); + dict["serialized"] = serialized; + dict["feature_list_dense_missing_assumed_empty"] = feature_list_dense_missing_assumed_empty; + dict["context_sparse_keys"] = context_sparse_keys; + dict["context_dense_keys"] = context_dense_keys; + dict["feature_list_sparse_keys"] = feature_list_sparse_keys; + dict["feature_list_dense_keys"] = feature_list_dense_keys; + dict["context_dense_defaults"] = context_dense_defaults; + dict["debug_name"] = debug_name; + if (context_sparse_types != null) + dict["context_sparse_types"] = context_sparse_types; + if (feature_list_dense_types != null) + dict["feature_list_dense_types"] = feature_list_dense_types; + if (context_dense_shapes != null) + dict["context_dense_shapes"] = context_dense_shapes; + if (feature_list_sparse_types != null) + dict["feature_list_sparse_types"] = feature_list_sparse_types; + if (feature_list_dense_shapes != null) + dict["feature_list_dense_shapes"] = feature_list_dense_shapes; + var op = _op_def_lib._apply_op_helper("ParseSingleSequenceExample", name: name, keywords: dict); + int _idx = 0; + var context_sparse_indices = Enumerable.Range(0, op.OutputListLength("context_sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray(); + var context_sparse_values = Enumerable.Range(0, op.OutputListLength("context_sparse_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var context_sparse_shapes = Enumerable.Range(0, op.OutputListLength("context_sparse_shapes")).Select(_ => op.outputs[_idx++]).ToArray(); + var context_dense_values = Enumerable.Range(0, op.OutputListLength("context_dense_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_sparse_indices = Enumerable.Range(0, op.OutputListLength("feature_list_sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_sparse_values = Enumerable.Range(0, op.OutputListLength("feature_list_sparse_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_sparse_shapes = Enumerable.Range(0, op.OutputListLength("feature_list_sparse_shapes")).Select(_ => op.outputs[_idx++]).ToArray(); + var feature_list_dense_values = Enumerable.Range(0, op.OutputListLength("feature_list_dense_values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values); + } + + /// + /// Transforms a serialized tensorflow.TensorProto proto into a Tensor. + /// + /// + /// A scalar string containing a serialized TensorProto proto. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ParseTensor'. + /// + /// + /// Optional argument + /// The type of the serialized tensor. The provided type must match the + /// type of the serialized tensor and no implicit conversion will take place. + /// + /// + /// A Tensor of type out_type. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor parse_tensor (Tensor serialized, TF_DataType out_type, string name = "ParseTensor") + { + var dict = new Dictionary(); + dict["serialized"] = serialized; + dict["out_type"] = out_type; + var op = _op_def_lib._apply_op_helper("ParseTensor", name: name, keywords: dict); + return op.output; + } + + /// + /// A placeholder op for a value that will be fed into the computation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Placeholder'. + /// + /// + /// Optional argument + /// The type of elements in the tensor. + /// + /// + /// (Optional) The shape of the tensor. If the shape has 0 dimensions, the + /// shape is unconstrained. + /// + /// + /// A placeholder tensor that must be replaced using the feed mechanism. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// N.B. This operation will fail with an error if it is executed. It is + /// intended as a way to represent a value that will always be fed, and to + /// provide attrs that enable the fed value to be checked at runtime. + /// + public static Tensor placeholder (TF_DataType dtype, TensorShape shape = null, string name = "Placeholder") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + if (shape != null) + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("Placeholder", name: name, keywords: dict); + return op.output; + } + + /// + /// A placeholder op for a value that will be fed into the computation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PlaceholderV2'. + /// + /// + /// Optional argument + /// The type of elements in the tensor. + /// + /// + /// Optional argument + /// The shape of the tensor. The shape can be any partially-specified + /// shape. To be unconstrained, pass in a shape with unknown rank. + /// + /// + /// A placeholder tensor that must be replaced using the feed mechanism. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// N.B. This operation will fail with an error if it is executed. It is + /// intended as a way to represent a value that will always be fed, and to + /// provide attrs that enable the fed value to be checked at runtime. + /// + public static Tensor placeholder_v2 (TF_DataType dtype, TensorShape shape, string name = "PlaceholderV2") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("PlaceholderV2", name: name, keywords: dict); + return op.output; + } + + /// + /// A placeholder op that passes through input when its output is not fed. + /// + /// + /// The default value to produce when output is not fed. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PlaceholderWithDefault'. + /// + /// + /// Optional argument + /// The (possibly partial) shape of the tensor. + /// + /// + /// A placeholder tensor that defaults to input if it is not fed. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor placeholder_with_default (Tensor input, TensorShape shape, string name = "PlaceholderWithDefault") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("PlaceholderWithDefault", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute the polygamma function \\(\psi^{(n)}(x)\\). + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Polygamma'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The polygamma function is defined as: + /// + /// + /// \\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\) + /// + /// where \\(\psi(x)\\) is the digamma function. + /// + public static Tensor polygamma (Tensor a, Tensor x, string name = "Polygamma") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Polygamma", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PopulationCount'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For each entry in x, calculates the number of 1 (on) bits in the binary + /// representation of that entry. + /// + /// **NOTE**: It is more efficient to first tf.bitcast your tensors into + /// int32 or int64 and perform the bitcount on the result, than to feed in + /// 8- or 16-bit inputs and then aggregate the resulting counts. + /// + public static Tensor population_count (Tensor x, string name = "PopulationCount") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("PopulationCount", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the power of one value to another. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Pow'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor x and a tensor y, this operation computes \\(x^y\\) for + /// corresponding elements in x and y. For example: + /// + /// + /// # tensor 'x' is [[2, 2]], [3, 3]] + /// # tensor 'y' is [[8, 16], [2, 3]] + /// tf.pow(x, y) ==> [[256, 65536], [9, 27]] + /// + /// + public static Tensor pow (Tensor x, Tensor y, string name = "Pow") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Pow", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that asynchronously prefetches elements from input_dataset. + /// + /// + /// + /// + /// The maximum number of elements to buffer in an iterator over + /// this dataset. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PrefetchDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor prefetch_dataset (Tensor input_dataset, Tensor buffer_size, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "PrefetchDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["buffer_size"] = buffer_size; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("PrefetchDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// An identity op that triggers an error if a gradient is requested. + /// + /// + /// any tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PreventGradient'. + /// + /// + /// Will be printed in the error when anyone tries to differentiate + /// this operation. + /// + /// + /// the same input tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// When executed in a graph, this op outputs its input tensor as-is. + /// + /// When building ops to compute gradients, the TensorFlow gradient system + /// will return an error when trying to lookup the gradient of this op, + /// because no gradient must ever be registered for this function. This + /// op exists to prevent subtle bugs from silently returning unimplemented + /// gradients in some corner cases. + /// + public static Tensor prevent_gradient (Tensor input, string message = null, string name = "PreventGradient") + { + var dict = new Dictionary(); + dict["input"] = input; + if (message != null) + dict["message"] = message; + var op = _op_def_lib._apply_op_helper("PreventGradient", name: name, keywords: dict); + return op.output; + } + + /// + /// Prints a list of tensors. + /// + /// + /// The tensor passed to output + /// + /// + /// A list of tensors to print out when op is evaluated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Print'. + /// + /// + /// A string, prefix of the error message. + /// + /// + /// Only log first_n number of times. -1 disables logging. + /// + /// + /// Only print this many entries of each tensor. + /// + /// + /// = The unmodified input tensor + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Passes input through to output and prints data when evaluating. + /// + public static Tensor print (Tensor input, Tensor[] data, string message = null, int? first_n = null, int? summarize = null, string name = "Print") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["data"] = data; + if (message != null) + dict["message"] = message; + if (first_n.HasValue) + dict["first_n"] = first_n.Value; + if (summarize.HasValue) + dict["summarize"] = summarize.Value; + var op = _op_def_lib._apply_op_helper("Print", name: name, keywords: dict); + return op.output; + } + + /// + /// A queue that produces elements sorted by the first component value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PriorityQueue'. + /// + /// + /// Optional argument + /// The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// + /// + /// The type of each component in a value. + /// + /// + /// The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// + /// + /// If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Note that the PriorityQueue requires the first component of any element + /// to be a scalar int64, in addition to the other elements declared by + /// component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue + /// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra + /// entry in their input (resp. output) lists. + /// + public static Tensor priority_queue (TensorShape[] shapes, TF_DataType[] component_types = null, int? capacity = null, string container = null, string shared_name = null, string name = "PriorityQueue") + { + var dict = new Dictionary(); + dict["shapes"] = shapes; + if (component_types != null) + dict["component_types"] = component_types; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("PriorityQueue", name: name, keywords: dict); + return op.output; + } + + /// + /// A queue that produces elements sorted by the first component value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'PriorityQueueV2'. + /// + /// + /// Optional argument + /// The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// + /// + /// The type of each component in a value. + /// + /// + /// The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// + /// + /// If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Note that the PriorityQueue requires the first component of any element + /// to be a scalar int64, in addition to the other elements declared by + /// component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue + /// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra + /// entry in their input (resp. output) lists. + /// + public static Tensor priority_queue_v2 (TensorShape[] shapes, TF_DataType[] component_types = null, int? capacity = null, string container = null, string shared_name = null, string name = "PriorityQueueV2") + { + var dict = new Dictionary(); + dict["shapes"] = shapes; + if (component_types != null) + dict["component_types"] = component_types; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("PriorityQueueV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the product of elements across dimensions of a tensor. + /// + /// + /// The tensor to reduce. + /// + /// + /// The dimensions to reduce. Must be in the range + /// [-rank(input), rank(input)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Prod'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// The reduced tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Reduces input along the dimensions given in axis. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// axis. If keep_dims is true, the reduced dimensions are + /// retained with length 1. + /// + public static Tensor prod (Tensor input, Tensor reduction_indices, bool? keep_dims = null, string name = "Prod") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["reduction_indices"] = reduction_indices; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("Prod", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the QR decompositions of one or more matrices. + /// + /// + /// A tensor of shape [..., M, N] whose inner-most 2 dimensions + /// form matrices of size [M, N]. Let P be the minimum of M and N. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Qr'. + /// + /// + /// If true, compute full-sized q and r. If false + /// (the default), compute only the leading P columns of q. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// q : Orthonormal basis for range of a. If full_matrices is False then + /// shape is [..., M, P]; if full_matrices is True then shape is + /// [..., M, M]. + /// r : Triangular factor. If full_matrices is False then shape is + /// [..., P, N]. If full_matrices is True then shape is [..., M, N]. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Computes the QR decomposition of each inner matrix in tensor such that + /// tensor[..., :, :] = q[..., :, :] * r[..., :,:]) + /// + /// + /// # a is a tensor. + /// # q is a tensor of orthonormal matrices. + /// # r is a tensor of upper triangular matrices. + /// q, r = qr(a) + /// q_full, r_full = qr(a, full_matrices=True) + /// + /// + public static (Tensor q, Tensor r) qr (Tensor input, bool? full_matrices = null, string name = "Qr") + { + var dict = new Dictionary(); + dict["input"] = input; + if (full_matrices.HasValue) + dict["full_matrices"] = full_matrices.Value; + var op = _op_def_lib._apply_op_helper("Qr", name: name, keywords: dict); + int _idx = 0; + var q = op.outputs[_idx++]; + var r = op.outputs[_idx++]; + return (q, r); + } + + /// + /// Use QuantizeAndDequantizeV2 instead. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizeAndDequantize'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor quantize_and_dequantize (Tensor input, bool? signed_input = null, int? num_bits = null, bool? range_given = null, float? input_min = null, float? input_max = null, string name = "QuantizeAndDequantize") + { + var dict = new Dictionary(); + dict["input"] = input; + if (signed_input.HasValue) + dict["signed_input"] = signed_input.Value; + if (num_bits.HasValue) + dict["num_bits"] = num_bits.Value; + if (range_given.HasValue) + dict["range_given"] = range_given.Value; + if (input_min.HasValue) + dict["input_min"] = input_min.Value; + if (input_max.HasValue) + dict["input_max"] = input_max.Value; + var op = _op_def_lib._apply_op_helper("QuantizeAndDequantize", name: name, keywords: dict); + return op.output; + } + + /// + /// Quantizes then dequantizes a tensor. + /// + /// + /// Tensor to quantize and then dequantize. + /// + /// + /// If range_given == True, this specifies the minimum input value that needs to + /// be represented, otherwise it is determined from the min value of the input + /// tensor. + /// + /// + /// If range_given == True, this specifies the maximum input value that needs to + /// be represented, otherwise it is determined from the max value of the input + /// tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizeAndDequantizeV2'. + /// + /// + /// Whether the quantization is signed or unsigned. (actually this parameter should + /// have been called <b>signed_output</b>) + /// + /// + /// The bitwidth of the quantization. + /// + /// + /// Whether the range is given or should be determined from the input tensor. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op simulates the precision loss from the quantized forward pass by: + /// + /// 1. Quantizing the tensor to fixed point numbers, which should match the target + /// quantization method when it is used in inference. + /// 2. Dequantizing it back to floating point numbers for the following ops, most + /// likely matmul. + /// + /// There are different ways to quantize. This version uses only scaling, so 0.0 + /// maps to 0. + /// + /// From the specified 'num_bits' in the quantized output type, it determines + /// minimum and maximum representable quantized values. + /// + /// e.g. + /// + /// * [-128, 127] for signed, num_bits = 8, or + /// * [0, 255] for unsigned, num_bits = 8. + /// + /// If range_given == False, the initial input_min, input_max will be determined + /// automatically as the minimum and maximum values in the input tensor, otherwise + /// the specified values of input_min, input_max are used. + /// + /// Note: If the input_min, input_max are specified, they do not need to equal the + /// actual minimum and maximum values in the tensor. e.g. in some cases it may be + /// beneficial to specify these values such that the low probability extremes of the + /// input distribution are clipped. + /// + /// This op determines the maximum scale_factor that would map the initial + /// [input_min, input_max] range to a range that lies within the representable + /// quantized range. + /// + /// It determines the scale from one of input_min and input_max, then updates the + /// other one to maximize the respresentable range. + /// + /// e.g. + /// + /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, + /// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it + /// would update input_max to be 127 / 12.8 = 9.921875 + /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, + /// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it + /// would update input_min to be 128.0 / 12.7 = -10.07874 + /// * if the output is unsigned, input_min is forced to be 0, and only the + /// specified input_max is used. + /// + /// After determining the scale_factor and updating the input range, it applies the + /// following to each value in the 'input' tensor. + /// + /// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. + /// + /// + public static Tensor quantize_and_dequantize_v2 (Tensor input, Tensor input_min, Tensor input_max, bool? signed_input = null, int? num_bits = null, bool? range_given = null, string name = "QuantizeAndDequantizeV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_min"] = input_min; + dict["input_max"] = input_max; + if (signed_input.HasValue) + dict["signed_input"] = signed_input.Value; + if (num_bits.HasValue) + dict["num_bits"] = num_bits.Value; + if (range_given.HasValue) + dict["range_given"] = range_given.Value; + var op = _op_def_lib._apply_op_helper("QuantizeAndDequantizeV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Quantizes then dequantizes a tensor. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizeAndDequantizeV3'. + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + /// tensor, so its value can change during training. + /// + public static Tensor quantize_and_dequantize_v3 (Tensor input, Tensor input_min, Tensor input_max, Tensor num_bits, bool? signed_input = null, bool? range_given = null, string name = "QuantizeAndDequantizeV3") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_min"] = input_min; + dict["input_max"] = input_max; + dict["num_bits"] = num_bits; + if (signed_input.HasValue) + dict["signed_input"] = signed_input.Value; + if (range_given.HasValue) + dict["range_given"] = range_given.Value; + var op = _op_def_lib._apply_op_helper("QuantizeAndDequantizeV3", name: name, keywords: dict); + return op.output; + } + + /// + /// Convert the quantized 'input' tensor into a lower-precision 'output', using the + /// + /// + /// + /// + /// The float value that the minimum quantized input value represents. + /// + /// + /// The float value that the maximum quantized input value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizeDownAndShrinkRange'. + /// + /// + /// Optional argument + /// The type of the output. Should be a lower bit depth than Tinput. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// output_min : The float value that the minimum quantized output value represents. + /// output_max : The float value that the maximum quantized output value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// actual distribution of the values to maximize the usage of the lower bit depth + /// and adjusting the output min and max ranges accordingly. + /// + /// [input_min, input_max] are scalar floats that specify the range for the float + /// interpretation of the 'input' data. For example, if input_min is -1.0f and + /// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + /// + /// This operator tries to squeeze as much precision as possible into an output with + /// a lower bit depth by calculating the actual min and max values found in the + /// data. For example, maybe that quint16 input has no values lower than 16,384 and + /// none higher than 49,152. That means only half the range is actually needed, all + /// the float interpretations are between -0.5f and 0.5f, so if we want to compress + /// the data into a quint8 output, we can use that range rather than the theoretical + /// -1.0f to 1.0f that is suggested by the input min and max. + /// + /// In practice, this is most useful for taking output from operations like + /// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + /// may have large potential output ranges, but in practice have a distribution of + /// input values that only uses a small fraction of the possible range. By feeding + /// that output into this operator, we can reduce it from 32 bits down to 8 with + /// minimal loss of accuracy. + /// + public static (Tensor output, Tensor output_min, Tensor output_max) quantize_down_and_shrink_range (Tensor input, Tensor input_min, Tensor input_max, TF_DataType out_type, string name = "QuantizeDownAndShrinkRange") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_min"] = input_min; + dict["input_max"] = input_max; + dict["out_type"] = out_type; + var op = _op_def_lib._apply_op_helper("QuantizeDownAndShrinkRange", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var output_min = op.outputs[_idx++]; + var output_max = op.outputs[_idx++]; + return (output, output_min, output_max); + } + + /// + /// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. + /// + /// + /// + /// + /// The minimum scalar value possibly produced for the input. + /// + /// + /// The maximum scalar value possibly produced for the input. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizeV2'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : The quantized data produced from the float input. + /// output_min : The actual minimum scalar value used for the output. + /// output_max : The actual maximum scalar value used for the output. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// [min_range, max_range] are scalar floats that specify the range for + /// the 'input' data. The 'mode' attribute controls exactly which calculations are + /// used to convert the float values to their quantized equivalents. The + /// 'round_mode' attribute controls which rounding tie-breaking algorithm is used + /// when rounding float values to their quantized equivalents. + /// + /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + /// + /// + /// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + /// if T == qint8, out[i] -= (range(T) + 1) / 2.0 + /// + /// + /// here range(T) = numeric_limits<T>::max() - numeric_limits<T>::min() + /// + /// *MIN_COMBINED Mode Example* + /// + /// Assume the input is type float and has a possible range of [0.0, 6.0] and the + /// output type is quint8 ([0, 255]). The min_range and max_range values should be + /// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + /// value of the input by 255/6 and cast to quint8. + /// + /// If the output type was qint8 ([-128, 127]), the operation will additionally + /// subtract each value by 128 prior to casting, so that the range of values aligns + /// with the range of qint8. + /// + /// If the mode is 'MIN_FIRST', then this approach is used: + /// + /// + /// num_discrete_values = 1 << (# of bits in T) + /// range_adjust = num_discrete_values / (num_discrete_values - 1) + /// range = (range_max - range_min) * range_adjust + /// range_scale = num_discrete_values / range + /// quantized = round(input * range_scale) - round(range_min * range_scale) + + /// numeric_limits<T>::min() + /// quantized = max(quantized, numeric_limits<T>::min()) + /// quantized = min(quantized, numeric_limits<T>::max()) + /// + /// + /// The biggest difference between this and MIN_COMBINED is that the minimum range + /// is rounded first, before it's subtracted from the rounded value. With + /// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + /// and dequantizing will introduce a larger and larger error. + /// + /// *SCALED mode Example* + /// + /// SCALED mode matches the quantization approach used in + /// QuantizeAndDequantize{V2|V3}. + /// + /// If the mode is SCALED, we do not use the full range of the output type, + /// choosing to elide the lowest possible value for symmetry (e.g., output range is + /// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to + /// 0. + /// + /// We first find the range of values in our tensor. The + /// range we use is always centered on 0, so we find m such that + /// + /// + /// m = max(abs(input_min), abs(input_max)) + /// + /// + /// Our input tensor range is then [-m, m]. + /// + /// Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. + /// If T is signed, this is + /// + /// + /// num_bits = sizeof(T) * 8 + /// [min_fixed, max_fixed] = + /// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1] + /// + /// + /// Otherwise, if T is unsigned, the fixed-point range is + /// + /// + /// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1] + /// + /// + /// From this we compute our scaling factor, s: + /// + /// + /// s = (max_fixed - min_fixed) / (2 * m) + /// + /// + /// Now we can quantize the elements of our tensor: + /// + /// + /// result = round(input * s) + /// + /// + /// One thing to watch out for is that the operator may choose to adjust the + /// requested minimum and maximum values slightly during the quantization process, + /// so you should always use the output ports as the range for further calculations. + /// For example, if the requested minimum and maximum values are close to equal, + /// they will be separated by a small epsilon value to prevent ill-formed quantized + /// buffers from being created. Otherwise, you can end up with buffers where all the + /// quantized values map to the same float value, which causes problems for + /// operations that have to perform further calculations on them. + /// + public static (Tensor output, Tensor output_min, Tensor output_max) quantize_v2 (Tensor input, Tensor min_range, Tensor max_range, TF_DataType T, string mode = null, string round_mode = null, string name = "QuantizeV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["min_range"] = min_range; + dict["max_range"] = max_range; + dict["T"] = T; + if (mode != null) + dict["mode"] = mode; + if (round_mode != null) + dict["round_mode"] = round_mode; + var op = _op_def_lib._apply_op_helper("QuantizeV2", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var output_min = op.outputs[_idx++]; + var output_max = op.outputs[_idx++]; + return (output, output_min, output_max); + } + + /// + /// Returns x + y element-wise, working on quantized buffers. + /// + /// + /// + /// + /// + /// + /// The float value that the lowest quantized x value represents. + /// + /// + /// The float value that the highest quantized x value represents. + /// + /// + /// The float value that the lowest quantized y value represents. + /// + /// + /// The float value that the highest quantized y value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedAdd'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// z : + /// min_z : The float value that the lowest quantized output value represents. + /// max_z : The float value that the highest quantized output value represents. + /// + /// *NOTE*: QuantizedAdd supports limited forms of broadcasting. More about + /// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor z, Tensor min_z, Tensor max_z) quantized_add (Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType? Toutput = null, string name = "QuantizedAdd") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + dict["min_x"] = min_x; + dict["max_x"] = max_x; + dict["min_y"] = min_y; + dict["max_y"] = max_y; + if (Toutput.HasValue) + dict["Toutput"] = Toutput.Value; + var op = _op_def_lib._apply_op_helper("QuantizedAdd", name: name, keywords: dict); + int _idx = 0; + var z = op.outputs[_idx++]; + var min_z = op.outputs[_idx++]; + var max_z = op.outputs[_idx++]; + return (z, min_z, max_z); + } + + /// + /// Produces the average pool of the input tensor for quantized types. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// The float value that the lowest quantized input value represents. + /// + /// + /// The float value that the highest quantized input value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedAvgPool'. + /// + /// + /// Optional argument + /// The size of the window for each dimension of the input tensor. + /// The length must be 4 to match the number of dimensions of the input. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input + /// tensor. The length must be 4 to match the number of dimensions of the input. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// min_output : The float value that the lowest quantized output value represents. + /// max_output : The float value that the highest quantized output value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor output, Tensor min_output, Tensor max_output) quantized_avg_pool (Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string name = "QuantizedAvgPool") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["min_input"] = min_input; + dict["max_input"] = max_input; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("QuantizedAvgPool", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var min_output = op.outputs[_idx++]; + var max_output = op.outputs[_idx++]; + return (output, min_output, max_output); + } + + /// + /// Quantized Batch normalization. + /// + /// + /// A 4D input Tensor. + /// + /// + /// The value represented by the lowest quantized input. + /// + /// + /// The value represented by the highest quantized input. + /// + /// + /// A 1D mean Tensor with size matching the last dimension of t. + /// This is the first output from tf.nn.moments, + /// or a saved moving average thereof. + /// + /// + /// The value represented by the lowest quantized mean. + /// + /// + /// The value represented by the highest quantized mean. + /// + /// + /// A 1D variance Tensor with size matching the last dimension of t. + /// This is the second output from tf.nn.moments, + /// or a saved moving average thereof. + /// + /// + /// The value represented by the lowest quantized variance. + /// + /// + /// The value represented by the highest quantized variance. + /// + /// + /// A 1D beta Tensor with size matching the last dimension of t. + /// An offset to be added to the normalized tensor. + /// + /// + /// The value represented by the lowest quantized offset. + /// + /// + /// The value represented by the highest quantized offset. + /// + /// + /// A 1D gamma Tensor with size matching the last dimension of t. + /// If "scale_after_normalization" is true, this tensor will be multiplied + /// with the normalized tensor. + /// + /// + /// The value represented by the lowest quantized gamma. + /// + /// + /// The value represented by the highest quantized gamma. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedBatchNormWithGlobalNormalization'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// A small float number to avoid dividing by 0. + /// + /// + /// Optional argument + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// result : + /// result_min : + /// result_max : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This op is deprecated and will be removed in the future. Prefer + /// tf.nn.batch_normalization. + /// + public static (Tensor result, Tensor result_min, Tensor result_max) quantized_batch_norm_with_global_normalization (Tensor t, Tensor t_min, Tensor t_max, Tensor m, Tensor m_min, Tensor m_max, Tensor v, Tensor v_min, Tensor v_max, Tensor beta, Tensor beta_min, Tensor beta_max, Tensor gamma, Tensor gamma_min, Tensor gamma_max, TF_DataType out_type, float variance_epsilon, bool scale_after_normalization, string name = "QuantizedBatchNormWithGlobalNormalization") + { + var dict = new Dictionary(); + dict["t"] = t; + dict["t_min"] = t_min; + dict["t_max"] = t_max; + dict["m"] = m; + dict["m_min"] = m_min; + dict["m_max"] = m_max; + dict["v"] = v; + dict["v_min"] = v_min; + dict["v_max"] = v_max; + dict["beta"] = beta; + dict["beta_min"] = beta_min; + dict["beta_max"] = beta_max; + dict["gamma"] = gamma; + dict["gamma_min"] = gamma_min; + dict["gamma_max"] = gamma_max; + dict["out_type"] = out_type; + dict["variance_epsilon"] = variance_epsilon; + dict["scale_after_normalization"] = scale_after_normalization; + var op = _op_def_lib._apply_op_helper("QuantizedBatchNormWithGlobalNormalization", name: name, keywords: dict); + int _idx = 0; + var result = op.outputs[_idx++]; + var result_min = op.outputs[_idx++]; + var result_max = op.outputs[_idx++]; + return (result, result_min, result_max); + } + + /// + /// Adds Tensor 'bias' to Tensor 'input' for Quantized types. + /// + /// + /// + /// + /// A 1D bias Tensor with size matching the last dimension of 'input'. + /// + /// + /// The float value that the lowest quantized input value represents. + /// + /// + /// The float value that the highest quantized input value represents. + /// + /// + /// The float value that the lowest quantized bias value represents. + /// + /// + /// The float value that the highest quantized bias value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedBiasAdd'. + /// + /// + /// Optional argument + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// min_out : The float value that the lowest quantized output value represents. + /// max_out : The float value that the highest quantized output value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Broadcasts the values of bias on dimensions 0..N-2 of 'input'. + /// + public static (Tensor output, Tensor min_out, Tensor max_out) quantized_bias_add (Tensor input, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_bias, Tensor max_bias, TF_DataType out_type, string name = "QuantizedBiasAdd") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["bias"] = bias; + dict["min_input"] = min_input; + dict["max_input"] = max_input; + dict["min_bias"] = min_bias; + dict["max_bias"] = max_bias; + dict["out_type"] = out_type; + var op = _op_def_lib._apply_op_helper("QuantizedBiasAdd", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var min_out = op.outputs[_idx++]; + var max_out = op.outputs[_idx++]; + return (output, min_out, max_out); + } + + /// + /// Concatenates quantized tensors along one dimension. + /// + /// + /// 0-D. The dimension along which to concatenate. Must be in the + /// range [0, rank(values)). + /// + /// + /// The N Tensors to concatenate. Their ranks and types must match, + /// and their sizes must match in all dimensions except concat_dim. + /// + /// + /// The minimum scalar values for each of the input tensors. + /// + /// + /// The maximum scalar values for each of the input tensors. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedConcat'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : A Tensor with the concatenation of values stacked along the + /// concat_dim dimension. This tensor's shape matches that of values except + /// in concat_dim where it has the sum of the sizes. + /// output_min : The float value that the minimum quantized output value represents. + /// output_max : The float value that the maximum quantized output value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor output, Tensor output_min, Tensor output_max) quantized_concat (Tensor concat_dim, Tensor[] values, Tensor[] input_mins, Tensor[] input_maxes, string name = "QuantizedConcat") + { + var dict = new Dictionary(); + dict["concat_dim"] = concat_dim; + dict["values"] = values; + dict["input_mins"] = input_mins; + dict["input_maxes"] = input_maxes; + var op = _op_def_lib._apply_op_helper("QuantizedConcat", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var output_min = op.outputs[_idx++]; + var output_max = op.outputs[_idx++]; + return (output, output_min, output_max); + } + + /// + /// Computes a 2D convolution given quantized 4D input and filter tensors. + /// + /// + /// + /// + /// filter's input_depth dimension must match input's depth dimensions. + /// + /// + /// The float value that the lowest quantized input value represents. + /// + /// + /// The float value that the highest quantized input value represents. + /// + /// + /// The float value that the lowest quantized filter value represents. + /// + /// + /// The float value that the highest quantized filter value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedConv2D'. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input + /// tensor. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// input. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of data_format, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// min_output : The float value that the lowest quantized output value represents. + /// max_output : The float value that the highest quantized output value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The inputs are quantized tensors where the lowest value represents the real + /// number of the associated minimum, and the highest represents the maximum. + /// This means that you can only interpret the quantized output in the same way, by + /// taking the returned minimum and maximum values into account. + /// + public static (Tensor output, Tensor min_output, Tensor max_output) quantized_conv2d (Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType? out_type = null, int[] dilations = null, string name = "QuantizedConv2D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["filter"] = filter; + dict["min_input"] = min_input; + dict["max_input"] = max_input; + dict["min_filter"] = min_filter; + dict["max_filter"] = max_filter; + dict["strides"] = strides; + dict["padding"] = padding; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + if (dilations != null) + dict["dilations"] = dilations; + var op = _op_def_lib._apply_op_helper("QuantizedConv2D", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var min_output = op.outputs[_idx++]; + var max_output = op.outputs[_idx++]; + return (output, min_output, max_output); + } + + /// + /// Quantized Instance normalization. + /// + /// + /// A 4D input Tensor. + /// + /// + /// The value represented by the lowest quantized input. + /// + /// + /// The value represented by the highest quantized input. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedInstanceNorm'. + /// + /// + /// If True, given_y_min and given_y_min + /// and given_y_max are used as the output range. Otherwise, + /// the implementation computes the output range. + /// + /// + /// Output in y_min if output_range_given is True. + /// + /// + /// Output in y_max if output_range_given is True. + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// Minimum value of y_max - y_min + /// + /// + /// Returns a tuple with multiple values, as follows: + /// y : A 4D Tensor. + /// y_min : The value represented by the lowest quantized output. + /// y_max : The value represented by the highest quantized output. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor y, Tensor y_min, Tensor y_max) quantized_instance_norm (Tensor x, Tensor x_min, Tensor x_max, bool? output_range_given = null, float? given_y_min = null, float? given_y_max = null, float? variance_epsilon = null, float? min_separation = null, string name = "QuantizedInstanceNorm") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["x_min"] = x_min; + dict["x_max"] = x_max; + if (output_range_given.HasValue) + dict["output_range_given"] = output_range_given.Value; + if (given_y_min.HasValue) + dict["given_y_min"] = given_y_min.Value; + if (given_y_max.HasValue) + dict["given_y_max"] = given_y_max.Value; + if (variance_epsilon.HasValue) + dict["variance_epsilon"] = variance_epsilon.Value; + if (min_separation.HasValue) + dict["min_separation"] = min_separation.Value; + var op = _op_def_lib._apply_op_helper("QuantizedInstanceNorm", name: name, keywords: dict); + int _idx = 0; + var y = op.outputs[_idx++]; + var y_min = op.outputs[_idx++]; + var y_max = op.outputs[_idx++]; + return (y, y_min, y_max); + } + + /// + /// Perform a quantized matrix multiplication of a by the matrix b. + /// + /// + /// Must be a two-dimensional tensor. + /// + /// + /// Must be a two-dimensional tensor. + /// + /// + /// The float value that the lowest quantized a value represents. + /// + /// + /// The float value that the highest quantized a value represents. + /// + /// + /// The float value that the lowest quantized b value represents. + /// + /// + /// The float value that the highest quantized b value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedMatMul'. + /// + /// + /// + /// + /// If true, a is transposed before multiplication. + /// + /// + /// If true, b is transposed before multiplication. + /// + /// + /// The type of output produced by activation function + /// following this operation. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// min_out : The float value that the lowest quantized output value represents. + /// max_out : The float value that the highest quantized output value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of + /// a (after being transposed if transpose_a is non-zero) must match the + /// outer dimension of b (after being transposed if transposed_b is + /// non-zero). + /// + public static (Tensor output, Tensor min_out, Tensor max_out) quantized_mat_mul (Tensor a, Tensor b, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType? Toutput = null, bool? transpose_a = null, bool? transpose_b = null, TF_DataType? Tactivation = null, string name = "QuantizedMatMul") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["b"] = b; + dict["min_a"] = min_a; + dict["max_a"] = max_a; + dict["min_b"] = min_b; + dict["max_b"] = max_b; + if (Toutput.HasValue) + dict["Toutput"] = Toutput.Value; + if (transpose_a.HasValue) + dict["transpose_a"] = transpose_a.Value; + if (transpose_b.HasValue) + dict["transpose_b"] = transpose_b.Value; + if (Tactivation.HasValue) + dict["Tactivation"] = Tactivation.Value; + var op = _op_def_lib._apply_op_helper("QuantizedMatMul", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var min_out = op.outputs[_idx++]; + var max_out = op.outputs[_idx++]; + return (output, min_out, max_out); + } + + /// + /// Produces the max pool of the input tensor for quantized types. + /// + /// + /// The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. + /// + /// + /// The float value that the lowest quantized input value represents. + /// + /// + /// The float value that the highest quantized input value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedMaxPool'. + /// + /// + /// Optional argument + /// The size of the window for each dimension of the input tensor. + /// The length must be 4 to match the number of dimensions of the input. + /// + /// + /// Optional argument + /// The stride of the sliding window for each dimension of the input + /// tensor. The length must be 4 to match the number of dimensions of the input. + /// + /// + /// Optional argument + /// The type of padding algorithm to use. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// min_output : The float value that the lowest quantized output value represents. + /// max_output : The float value that the highest quantized output value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor output, Tensor min_output, Tensor max_output) quantized_max_pool (Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string name = "QuantizedMaxPool") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["min_input"] = min_input; + dict["max_input"] = max_input; + dict["ksize"] = ksize; + dict["strides"] = strides; + dict["padding"] = padding; + var op = _op_def_lib._apply_op_helper("QuantizedMaxPool", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var min_output = op.outputs[_idx++]; + var max_output = op.outputs[_idx++]; + return (output, min_output, max_output); + } + + /// + /// Returns x * y element-wise, working on quantized buffers. + /// + /// + /// + /// + /// + /// + /// The float value that the lowest quantized x value represents. + /// + /// + /// The float value that the highest quantized x value represents. + /// + /// + /// The float value that the lowest quantized y value represents. + /// + /// + /// The float value that the highest quantized y value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedMul'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// z : + /// min_z : The float value that the lowest quantized output value represents. + /// max_z : The float value that the highest quantized output value represents. + /// + /// *NOTE*: QuantizedMul supports limited forms of broadcasting. More about + /// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor z, Tensor min_z, Tensor max_z) quantized_mul (Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType? Toutput = null, string name = "QuantizedMul") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + dict["min_x"] = min_x; + dict["max_x"] = max_x; + dict["min_y"] = min_y; + dict["max_y"] = max_y; + if (Toutput.HasValue) + dict["Toutput"] = Toutput.Value; + var op = _op_def_lib._apply_op_helper("QuantizedMul", name: name, keywords: dict); + int _idx = 0; + var z = op.outputs[_idx++]; + var min_z = op.outputs[_idx++]; + var max_z = op.outputs[_idx++]; + return (z, min_z, max_z); + } + + /// + /// Computes Quantized Rectified Linear: max(features, 0) + /// + /// + /// + /// + /// The float value that the lowest quantized value represents. + /// + /// + /// The float value that the highest quantized value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedRelu'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// activations : Has the same output shape as "features". + /// min_activations : The float value that the lowest quantized value represents. + /// max_activations : The float value that the highest quantized value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor activations, Tensor min_activations, Tensor max_activations) quantized_relu (Tensor features, Tensor min_features, Tensor max_features, TF_DataType? out_type = null, string name = "QuantizedRelu") + { + var dict = new Dictionary(); + dict["features"] = features; + dict["min_features"] = min_features; + dict["max_features"] = max_features; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("QuantizedRelu", name: name, keywords: dict); + int _idx = 0; + var activations = op.outputs[_idx++]; + var min_activations = op.outputs[_idx++]; + var max_activations = op.outputs[_idx++]; + return (activations, min_activations, max_activations); + } + + /// + /// Computes Quantized Rectified Linear 6: min(max(features, 0), 6) + /// + /// + /// + /// + /// The float value that the lowest quantized value represents. + /// + /// + /// The float value that the highest quantized value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedRelu6'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// activations : Has the same output shape as "features". + /// min_activations : The float value that the lowest quantized value represents. + /// max_activations : The float value that the highest quantized value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor activations, Tensor min_activations, Tensor max_activations) quantized_relu6 (Tensor features, Tensor min_features, Tensor max_features, TF_DataType? out_type = null, string name = "QuantizedRelu6") + { + var dict = new Dictionary(); + dict["features"] = features; + dict["min_features"] = min_features; + dict["max_features"] = max_features; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("QuantizedRelu6", name: name, keywords: dict); + int _idx = 0; + var activations = op.outputs[_idx++]; + var min_activations = op.outputs[_idx++]; + var max_activations = op.outputs[_idx++]; + return (activations, min_activations, max_activations); + } + + /// + /// Computes Quantized Rectified Linear X: min(max(features, 0), max_value) + /// + /// + /// + /// + /// + /// + /// The float value that the lowest quantized value represents. + /// + /// + /// The float value that the highest quantized value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedReluX'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// activations : Has the same output shape as "features". + /// min_activations : The float value that the lowest quantized value represents. + /// max_activations : The float value that the highest quantized value represents. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor activations, Tensor min_activations, Tensor max_activations) quantized_relu_x (Tensor features, Tensor max_value, Tensor min_features, Tensor max_features, TF_DataType? out_type = null, string name = "QuantizedReluX") + { + var dict = new Dictionary(); + dict["features"] = features; + dict["max_value"] = max_value; + dict["min_features"] = min_features; + dict["max_features"] = max_features; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("QuantizedReluX", name: name, keywords: dict); + int _idx = 0; + var activations = op.outputs[_idx++]; + var min_activations = op.outputs[_idx++]; + var max_activations = op.outputs[_idx++]; + return (activations, min_activations, max_activations); + } + + /// + /// Reshapes a quantized tensor as per the Reshape op. + /// + /// + /// + /// + /// Defines the shape of the output tensor. + /// + /// + /// The minimum value of the input. + /// + /// + /// The maximum value of the input. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedReshape'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// output_min : This value is copied from input_min. + /// output_max : This value is copied from input_max. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// + /// + public static (Tensor output, Tensor output_min, Tensor output_max) quantized_reshape (Tensor tensor, Tensor shape, Tensor input_min, Tensor input_max, string name = "QuantizedReshape") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + dict["shape"] = shape; + dict["input_min"] = input_min; + dict["input_max"] = input_max; + var op = _op_def_lib._apply_op_helper("QuantizedReshape", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var output_min = op.outputs[_idx++]; + var output_max = op.outputs[_idx++]; + return (output, output_min, output_max); + } + + /// + /// Resize quantized images to size using quantized bilinear interpolation. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// = A 1-D int32 Tensor of 2 elements: new_height, new_width. The + /// new size for the images. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedResizeBilinear'. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// resized_images : 4-D with shape + /// [batch, new_height, new_width, channels]. + /// out_min : + /// out_max : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Input images and output images must be quantized types. + /// + public static (Tensor resized_images, Tensor out_min, Tensor out_max) quantized_resize_bilinear (Tensor images, Tensor size, Tensor min, Tensor max, bool? align_corners = null, string name = "QuantizedResizeBilinear") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["size"] = size; + dict["min"] = min; + dict["max"] = max; + if (align_corners.HasValue) + dict["align_corners"] = align_corners.Value; + var op = _op_def_lib._apply_op_helper("QuantizedResizeBilinear", name: name, keywords: dict); + int _idx = 0; + var resized_images = op.outputs[_idx++]; + var out_min = op.outputs[_idx++]; + var out_max = op.outputs[_idx++]; + return (resized_images, out_min, out_max); + } + + /// + /// Closes the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueClose'. + /// + /// + /// If true, all pending enqueue requests that are + /// blocked on the given queue will be canceled. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation signals that no more elements will be enqueued in the + /// given queue. Subsequent Enqueue(Many) operations will fail. + /// Subsequent Dequeue(Many) operations will continue to succeed if + /// sufficient elements remain in the queue. Subsequent Dequeue(Many) + /// operations that would block will fail immediately. + /// + public static Operation queue_close (Tensor handle, bool? cancel_pending_enqueues = null, string name = "QueueClose") + { + var dict = new Dictionary(); + dict["handle"] = handle; + if (cancel_pending_enqueues.HasValue) + dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value; + var op = _op_def_lib._apply_op_helper("QueueClose", name: name, keywords: dict); + return op; + } + + /// + /// Closes the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueCloseV2'. + /// + /// + /// If true, all pending enqueue requests that are + /// blocked on the given queue will be canceled. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation signals that no more elements will be enqueued in the + /// given queue. Subsequent Enqueue(Many) operations will fail. + /// Subsequent Dequeue(Many) operations will continue to succeed if + /// sufficient elements remain in the queue. Subsequent Dequeue(Many) + /// operations that would block will fail immediately. + /// + public static Operation queue_close_v2 (Tensor handle, bool? cancel_pending_enqueues = null, string name = "QueueCloseV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + if (cancel_pending_enqueues.HasValue) + dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value; + var op = _op_def_lib._apply_op_helper("QueueCloseV2", name: name, keywords: dict); + return op; + } + + /// + /// Dequeues a tuple of one or more tensors from the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeue'. + /// + /// + /// Optional argument + /// The type of each component in a tuple. + /// + /// + /// If the queue is empty, this operation will block for up to + /// timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// One or more tensors that were dequeued as a tuple. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation has k outputs, where k is the number of components + /// in the tuples stored in the given queue, and output i is the ith + /// component of the dequeued tuple. + /// + /// N.B. If the queue is empty, this operation will block until an element + /// has been dequeued (or 'timeout_ms' elapses, if specified). + /// + public static Tensor[] queue_dequeue (Tensor handle, TF_DataType[] component_types, int? timeout_ms = null, string name = "QueueDequeue") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["component_types"] = component_types; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueDequeue", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Dequeues n tuples of one or more tensors from the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// The number of tuples to dequeue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueMany'. + /// + /// + /// Optional argument + /// The type of each component in a tuple. + /// + /// + /// If the queue has fewer than n elements, this operation + /// will block for up to timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// One or more tensors that were dequeued as a tuple. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// If the queue is closed and there are fewer than n elements, then an + /// OutOfRange error is returned. + /// + /// This operation concatenates queue-element component tensors along the + /// 0th dimension to make a single component tensor. All of the components + /// in the dequeued tuple will have size n in the 0th dimension. + /// + /// This operation has k outputs, where k is the number of components in + /// the tuples stored in the given queue, and output i is the ith + /// component of the dequeued tuple. + /// + /// N.B. If the queue is empty, this operation will block until n elements + /// have been dequeued (or 'timeout_ms' elapses, if specified). + /// + public static Tensor[] queue_dequeue_many (Tensor handle, Tensor n, TF_DataType[] component_types, int? timeout_ms = null, string name = "QueueDequeueMany") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["n"] = n; + dict["component_types"] = component_types; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueDequeueMany", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Dequeues n tuples of one or more tensors from the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// The number of tuples to dequeue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueManyV2'. + /// + /// + /// Optional argument + /// The type of each component in a tuple. + /// + /// + /// If the queue has fewer than n elements, this operation + /// will block for up to timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// One or more tensors that were dequeued as a tuple. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// If the queue is closed and there are fewer than n elements, then an + /// OutOfRange error is returned. + /// + /// This operation concatenates queue-element component tensors along the + /// 0th dimension to make a single component tensor. All of the components + /// in the dequeued tuple will have size n in the 0th dimension. + /// + /// This operation has k outputs, where k is the number of components in + /// the tuples stored in the given queue, and output i is the ith + /// component of the dequeued tuple. + /// + /// N.B. If the queue is empty, this operation will block until n elements + /// have been dequeued (or 'timeout_ms' elapses, if specified). + /// + public static Tensor[] queue_dequeue_many_v2 (Tensor handle, Tensor n, TF_DataType[] component_types, int? timeout_ms = null, string name = "QueueDequeueManyV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["n"] = n; + dict["component_types"] = component_types; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueDequeueManyV2", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Dequeues n tuples of one or more tensors from the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// The number of tuples to dequeue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueUpTo'. + /// + /// + /// Optional argument + /// The type of each component in a tuple. + /// + /// + /// If the queue has fewer than n elements, this operation + /// will block for up to timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// One or more tensors that were dequeued as a tuple. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation is not supported by all queues. If a queue does not support + /// DequeueUpTo, then an Unimplemented error is returned. + /// + /// If the queue is closed and there are more than 0 but less than n + /// elements remaining, then instead of returning an OutOfRange error like + /// QueueDequeueMany, less than n elements are returned immediately. If + /// the queue is closed and there are 0 elements left in the queue, then + /// an OutOfRange error is returned just like in QueueDequeueMany. + /// Otherwise the behavior is identical to QueueDequeueMany: + /// + /// This operation concatenates queue-element component tensors along the + /// 0th dimension to make a single component tensor. All of the components + /// in the dequeued tuple will have size n in the 0th dimension. + /// + /// This operation has k outputs, where k is the number of components in + /// the tuples stored in the given queue, and output i is the ith + /// component of the dequeued tuple. + /// + public static Tensor[] queue_dequeue_up_to (Tensor handle, Tensor n, TF_DataType[] component_types, int? timeout_ms = null, string name = "QueueDequeueUpTo") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["n"] = n; + dict["component_types"] = component_types; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueDequeueUpTo", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Dequeues n tuples of one or more tensors from the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// The number of tuples to dequeue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueUpToV2'. + /// + /// + /// Optional argument + /// The type of each component in a tuple. + /// + /// + /// If the queue has fewer than n elements, this operation + /// will block for up to timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// One or more tensors that were dequeued as a tuple. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation is not supported by all queues. If a queue does not support + /// DequeueUpTo, then an Unimplemented error is returned. + /// + /// If the queue is closed and there are more than 0 but less than n + /// elements remaining, then instead of returning an OutOfRange error like + /// QueueDequeueMany, less than n elements are returned immediately. If + /// the queue is closed and there are 0 elements left in the queue, then + /// an OutOfRange error is returned just like in QueueDequeueMany. + /// Otherwise the behavior is identical to QueueDequeueMany: + /// + /// This operation concatenates queue-element component tensors along the + /// 0th dimension to make a single component tensor. All of the components + /// in the dequeued tuple will have size n in the 0th dimension. + /// + /// This operation has k outputs, where k is the number of components in + /// the tuples stored in the given queue, and output i is the ith + /// component of the dequeued tuple. + /// + public static Tensor[] queue_dequeue_up_to_v2 (Tensor handle, Tensor n, TF_DataType[] component_types, int? timeout_ms = null, string name = "QueueDequeueUpToV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["n"] = n; + dict["component_types"] = component_types; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueDequeueUpToV2", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Dequeues a tuple of one or more tensors from the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueV2'. + /// + /// + /// Optional argument + /// The type of each component in a tuple. + /// + /// + /// If the queue is empty, this operation will block for up to + /// timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// One or more tensors that were dequeued as a tuple. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation has k outputs, where k is the number of components + /// in the tuples stored in the given queue, and output i is the ith + /// component of the dequeued tuple. + /// + /// N.B. If the queue is empty, this operation will block until an element + /// has been dequeued (or 'timeout_ms' elapses, if specified). + /// + public static Tensor[] queue_dequeue_v2 (Tensor handle, TF_DataType[] component_types, int? timeout_ms = null, string name = "QueueDequeueV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["component_types"] = component_types; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueDequeueV2", name: name, keywords: dict); + int _idx = 0; + var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray(); + return (components); + } + + /// + /// Enqueues a tuple of one or more tensors in the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// One or more tensors from which the enqueued tensors should be taken. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueEnqueue'. + /// + /// + /// If the queue is full, this operation will block for up to + /// timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// Returns the description of the operation + /// + /// + /// The components input has k elements, which correspond to the components of + /// tuples stored in the given queue. + /// + /// N.B. If the queue is full, this operation will block until the given + /// element has been enqueued (or 'timeout_ms' elapses, if specified). + /// + public static Operation queue_enqueue (Tensor handle, Tensor[] components, int? timeout_ms = null, string name = "QueueEnqueue") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["components"] = components; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueEnqueue", name: name, keywords: dict); + return op; + } + + /// + /// Enqueues zero or more tuples of one or more tensors in the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// One or more tensors from which the enqueued tensors should + /// be taken. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueEnqueueMany'. + /// + /// + /// If the queue is too full, this operation will block for up + /// to timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation slices each component tensor along the 0th dimension to + /// make multiple queue elements. All of the tuple components must have the + /// same size in the 0th dimension. + /// + /// The components input has k elements, which correspond to the components of + /// tuples stored in the given queue. + /// + /// N.B. If the queue is full, this operation will block until the given + /// elements have been enqueued (or 'timeout_ms' elapses, if specified). + /// + public static Operation queue_enqueue_many (Tensor handle, Tensor[] components, int? timeout_ms = null, string name = "QueueEnqueueMany") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["components"] = components; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueEnqueueMany", name: name, keywords: dict); + return op; + } + + /// + /// Enqueues zero or more tuples of one or more tensors in the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// One or more tensors from which the enqueued tensors should + /// be taken. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueEnqueueManyV2'. + /// + /// + /// If the queue is too full, this operation will block for up + /// to timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation slices each component tensor along the 0th dimension to + /// make multiple queue elements. All of the tuple components must have the + /// same size in the 0th dimension. + /// + /// The components input has k elements, which correspond to the components of + /// tuples stored in the given queue. + /// + /// N.B. If the queue is full, this operation will block until the given + /// elements have been enqueued (or 'timeout_ms' elapses, if specified). + /// + public static Operation queue_enqueue_many_v2 (Tensor handle, Tensor[] components, int? timeout_ms = null, string name = "QueueEnqueueManyV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["components"] = components; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueEnqueueManyV2", name: name, keywords: dict); + return op; + } + + /// + /// Enqueues a tuple of one or more tensors in the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// One or more tensors from which the enqueued tensors should be taken. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueEnqueueV2'. + /// + /// + /// If the queue is full, this operation will block for up to + /// timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// + /// Returns the description of the operation + /// + /// + /// The components input has k elements, which correspond to the components of + /// tuples stored in the given queue. + /// + /// N.B. If the queue is full, this operation will block until the given + /// element has been enqueued (or 'timeout_ms' elapses, if specified). + /// + public static Operation queue_enqueue_v2 (Tensor handle, Tensor[] components, int? timeout_ms = null, string name = "QueueEnqueueV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["components"] = components; + if (timeout_ms.HasValue) + dict["timeout_ms"] = timeout_ms.Value; + var op = _op_def_lib._apply_op_helper("QueueEnqueueV2", name: name, keywords: dict); + return op; + } + + /// + /// Returns true if queue is closed. + /// + /// + /// The handle to a queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueIsClosed'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns true if the queue is closed and false if the queue + /// is open. + /// + public static Tensor queue_is_closed (Tensor handle, string name = "QueueIsClosed") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("QueueIsClosed", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns true if queue is closed. + /// + /// + /// The handle to a queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueIsClosedV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns true if the queue is closed and false if the queue + /// is open. + /// + public static Tensor queue_is_closed_v2 (Tensor handle, string name = "QueueIsClosedV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("QueueIsClosedV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the number of elements in the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueSize'. + /// + /// + /// The number of elements in the given queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor queue_size (Tensor handle, string name = "QueueSize") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("QueueSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the number of elements in the given queue. + /// + /// + /// The handle to a queue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueSizeV2'. + /// + /// + /// The number of elements in the given queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor queue_size_v2 (Tensor handle, string name = "QueueSizeV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("QueueSizeV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Real-valued fast Fourier transform. + /// + /// + /// A float32 tensor. + /// + /// + /// An int32 tensor of shape [1]. The FFT length. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RFFT'. + /// + /// + /// A complex64 tensor of the same rank as input. The inner-most + /// dimension of input is replaced with the fft_length / 2 + 1 unique + /// frequency components of its 1D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.rfft + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the 1-dimensional discrete Fourier transform of a real-valued signal + /// over the inner-most dimension of input. + /// + /// Since the DFT of a real signal is Hermitian-symmetric, RFFT only returns the + /// fft_length / 2 + 1 unique components of the FFT: the zero-frequency term, + /// followed by the fft_length / 2 positive-frequency terms. + /// + /// Along the axis RFFT is computed on, if fft_length is smaller than the + /// corresponding dimension of input, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + public static Tensor r_f_f_t (Tensor input, Tensor fft_length, string name = "RFFT") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["fft_length"] = fft_length; + var op = _op_def_lib._apply_op_helper("RFFT", name: name, keywords: dict); + return op.output; + } + + /// + /// 2D real-valued fast Fourier transform. + /// + /// + /// A float32 tensor. + /// + /// + /// An int32 tensor of shape [2]. The FFT length for each dimension. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RFFT2D'. + /// + /// + /// A complex64 tensor of the same rank as input. The inner-most 2 + /// dimensions of input are replaced with their 2D Fourier transform. The + /// inner-most dimension contains fft_length / 2 + 1 unique frequency + /// components. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.rfft2 + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the 2-dimensional discrete Fourier transform of a real-valued signal + /// over the inner-most 2 dimensions of input. + /// + /// Since the DFT of a real signal is Hermitian-symmetric, RFFT2D only returns the + /// fft_length / 2 + 1 unique components of the FFT for the inner-most dimension + /// of output: the zero-frequency term, followed by the fft_length / 2 + /// positive-frequency terms. + /// + /// Along each axis RFFT2D is computed on, if fft_length is smaller than the + /// corresponding dimension of input, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + public static Tensor r_f_f_t2d (Tensor input, Tensor fft_length, string name = "RFFT2D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["fft_length"] = fft_length; + var op = _op_def_lib._apply_op_helper("RFFT2D", name: name, keywords: dict); + return op.output; + } + + /// + /// 3D real-valued fast Fourier transform. + /// + /// + /// A float32 tensor. + /// + /// + /// An int32 tensor of shape [3]. The FFT length for each dimension. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RFFT3D'. + /// + /// + /// A complex64 tensor of the same rank as input. The inner-most 3 + /// dimensions of input are replaced with the their 3D Fourier transform. The + /// inner-most dimension contains fft_length / 2 + 1 unique frequency + /// components. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.rfftn with 3 dimensions. + /// @end_compatibility + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the 3-dimensional discrete Fourier transform of a real-valued signal + /// over the inner-most 3 dimensions of input. + /// + /// Since the DFT of a real signal is Hermitian-symmetric, RFFT3D only returns the + /// fft_length / 2 + 1 unique components of the FFT for the inner-most dimension + /// of output: the zero-frequency term, followed by the fft_length / 2 + /// positive-frequency terms. + /// + /// Along each axis RFFT3D is computed on, if fft_length is smaller than the + /// corresponding dimension of input, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + public static Tensor r_f_f_t3d (Tensor input, Tensor fft_length, string name = "RFFT3D") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["fft_length"] = fft_length; + var op = _op_def_lib._apply_op_helper("RFFT3D", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts one or more images from RGB to HSV. + /// + /// + /// 1-D or higher rank. RGB data to convert. Last dimension must be size 3. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RGBToHSV'. + /// + /// + /// images converted to HSV. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Outputs a tensor of the same shape as the images tensor, containing the HSV + /// value of the pixels. The output is only well defined if the value in images + /// are in [0,1]. + /// + /// output[..., 0] contains hue, output[..., 1] contains saturation, and + /// output[..., 2] contains value. All HSV values are in [0,1]. A hue of 0 + /// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. + /// + public static Tensor r_g_b_to_h_s_v (Tensor images, string name = "RGBToHSV") + { + var dict = new Dictionary(); + dict["images"] = images; + var op = _op_def_lib._apply_op_helper("RGBToHSV", name: name, keywords: dict); + return op.output; + } + + /// + /// Randomly crop image. + /// + /// + /// 3-D of shape [height, width, channels]. + /// + /// + /// 1-D of length 2 containing: crop_height, crop_width.. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomCrop'. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// 3-D of shape [crop_height, crop_width, channels]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// size is a 1-D int64 tensor with 2 elements representing the crop height and + /// width. The values must be non negative. + /// + /// This Op picks a random location in image and crops a height by width + /// rectangle from that location. The random location is picked so the cropped + /// area will fit inside the original image. + /// + public static Tensor random_crop (Tensor image, Tensor size, int? seed = null, int? seed2 = null, string name = "RandomCrop") + { + var dict = new Dictionary(); + dict["image"] = image; + dict["size"] = size; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("RandomCrop", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a Dataset that returns pseudorandom numbers. + /// + /// + /// A scalar seed for the random number generator. If either seed or + /// seed2 is set to be non-zero, the random number generator is seeded + /// by the given seed. Otherwise, a random seed is used. + /// + /// + /// A second scalar seed to avoid seed collision. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor random_dataset (Tensor seed, Tensor seed2, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "RandomDataset") + { + var dict = new Dictionary(); + dict["seed"] = seed; + dict["seed2"] = seed2; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("RandomDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs random values from the Gamma distribution(s) described by alpha. + /// + /// + /// 1-D integer tensor. Shape of independent samples to draw from each + /// distribution described by the shape parameters given in alpha. + /// + /// + /// A tensor in which each scalar is a "shape" parameter describing the + /// associated gamma distribution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomGamma'. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// A tensor with shape shape + shape(alpha). Each slice + /// [:, ..., :, i0, i1, ...iN] contains the samples drawn for + /// alpha[i0, i1, ...iN]. The dtype of the output matches the dtype of alpha. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op uses the algorithm by Marsaglia et al. to acquire samples via + /// transformation-rejection from pairs of uniform and normal random variables. + /// See http://dl.acm.org/citation.cfm?id=358414 + /// + public static Tensor random_gamma (Tensor shape, Tensor alpha, int? seed = null, int? seed2 = null, string name = "RandomGamma") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["alpha"] = alpha; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("RandomGamma", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the derivative of a Gamma random sample w.r.t. alpha. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomGammaGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor random_gamma_grad (Tensor alpha, Tensor sample, string name = "RandomGammaGrad") + { + var dict = new Dictionary(); + dict["alpha"] = alpha; + dict["sample"] = sample; + var op = _op_def_lib._apply_op_helper("RandomGammaGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Use RandomPoissonV2 instead. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomPoisson'. + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor random_poisson (Tensor shape, Tensor rate, int? seed = null, int? seed2 = null, string name = "RandomPoisson") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["rate"] = rate; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("RandomPoisson", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs random values from the Poisson distribution(s) described by rate. + /// + /// + /// 1-D integer tensor. Shape of independent samples to draw from each + /// distribution described by the shape parameters given in rate. + /// + /// + /// A tensor in which each scalar is a "rate" parameter describing the + /// associated poisson distribution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomPoissonV2'. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// + /// + /// A tensor with shape shape + shape(rate). Each slice + /// [:, ..., :, i0, i1, ...iN] contains the samples drawn for + /// rate[i0, i1, ...iN]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op uses two algorithms, depending on rate. If rate >= 10, then + /// the algorithm by Hormann is used to acquire samples via + /// transformation-rejection. + /// See http://www.sciencedirect.com/science/article/pii/0167668793909974. + /// + /// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + /// random variables. + /// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + /// Programming, Volume 2. Addison Wesley + /// + public static Tensor random_poisson_v2 (Tensor shape, Tensor rate, int? seed = null, int? seed2 = null, TF_DataType? dtype = null, string name = "RandomPoissonV2") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["rate"] = rate; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + if (dtype.HasValue) + dict["dtype"] = dtype.Value; + var op = _op_def_lib._apply_op_helper("RandomPoissonV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Randomly shuffles a tensor along its first dimension. + /// + /// + /// The tensor to be shuffled. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomShuffle'. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// A tensor of same shape and type as value, shuffled along its first + /// dimension. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The tensor is shuffled along dimension 0, such that each value[j] is mapped + /// to one and only one output[i]. For example, a mapping that might occur for a + /// 3x2 tensor is: + /// + /// + /// [[1, 2], [[5, 6], + /// [3, 4], ==> [1, 2], + /// [5, 6]] [3, 4]] + /// + /// + public static Tensor random_shuffle (Tensor value, int? seed = null, int? seed2 = null, string name = "RandomShuffle") + { + var dict = new Dictionary(); + dict["value"] = value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("RandomShuffle", name: name, keywords: dict); + return op.output; + } + + /// + /// A queue that randomizes the order of elements. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomShuffleQueue'. + /// + /// + /// Optional argument + /// The type of each component in a value. + /// + /// + /// The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// + /// + /// The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// + /// + /// Dequeue will block unless there would be this + /// many elements after the dequeue or the queue is closed. This + /// ensures a minimum level of mixing of elements. + /// + /// + /// If either seed or seed2 is set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, a random seed is used. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor random_shuffle_queue (TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, int? min_after_dequeue = null, int? seed = null, int? seed2 = null, string container = null, string shared_name = null, string name = "RandomShuffleQueue") + { + var dict = new Dictionary(); + dict["component_types"] = component_types; + if (shapes != null) + dict["shapes"] = shapes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (min_after_dequeue.HasValue) + dict["min_after_dequeue"] = min_after_dequeue.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("RandomShuffleQueue", name: name, keywords: dict); + return op.output; + } + + /// + /// A queue that randomizes the order of elements. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomShuffleQueueV2'. + /// + /// + /// Optional argument + /// The type of each component in a value. + /// + /// + /// The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// + /// + /// The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// + /// + /// Dequeue will block unless there would be this + /// many elements after the dequeue or the queue is closed. This + /// ensures a minimum level of mixing of elements. + /// + /// + /// If either seed or seed2 is set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, a random seed is used. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the queue. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor random_shuffle_queue_v2 (TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, int? min_after_dequeue = null, int? seed = null, int? seed2 = null, string container = null, string shared_name = null, string name = "RandomShuffleQueueV2") + { + var dict = new Dictionary(); + dict["component_types"] = component_types; + if (shapes != null) + dict["shapes"] = shapes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (min_after_dequeue.HasValue) + dict["min_after_dequeue"] = min_after_dequeue.Value; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("RandomShuffleQueueV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs random values from a normal distribution. + /// + /// + /// The shape of the output tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomStandardNormal'. + /// + /// + /// Optional argument + /// The type of the output. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// A tensor of the specified shape filled with random normal values. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The generated values will have mean 0 and standard deviation 1. + /// + public static Tensor random_standard_normal (Tensor shape, TF_DataType dtype, int? seed = null, int? seed2 = null, string name = "RandomStandardNormal") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["dtype"] = dtype; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("RandomStandardNormal", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs random values from a uniform distribution. + /// + /// + /// The shape of the output tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomUniform'. + /// + /// + /// Optional argument + /// The type of the output. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// A tensor of the specified shape filled with uniform random values. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The generated values follow a uniform distribution in the range [0, 1). The + /// lower bound 0 is included in the range, while the upper bound 1 is excluded. + /// + public static Tensor random_uniform (Tensor shape, TF_DataType dtype, int? seed = null, int? seed2 = null, string name = "RandomUniform") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["dtype"] = dtype; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("RandomUniform", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs random integers from a uniform distribution. + /// + /// + /// The shape of the output tensor. + /// + /// + /// 0-D. Inclusive lower bound on the generated integers. + /// + /// + /// 0-D. Exclusive upper bound on the generated integers. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomUniformInt'. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// A tensor of the specified shape filled with uniform random integers. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The generated values are uniform integers in the range [minval, maxval). + /// The lower bound minval is included in the range, while the upper bound + /// maxval is excluded. + /// + /// The random integers are slightly biased unless maxval - minval is an exact + /// power of two. The bias is small for values of maxval - minval significantly + /// smaller than the range of the output (either 2^32 or 2^64). + /// + public static Tensor random_uniform_int (Tensor shape, Tensor minval, Tensor maxval, int? seed = null, int? seed2 = null, string name = "RandomUniformInt") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["minval"] = minval; + dict["maxval"] = maxval; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("RandomUniformInt", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a sequence of numbers. + /// + /// + /// 0-D (scalar). First entry in the sequence. + /// + /// + /// 0-D (scalar). Upper limit of sequence, exclusive. + /// + /// + /// 0-D (scalar). Optional. Default is 1. Number that increments start. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Range'. + /// + /// + /// 1-D. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation creates a sequence of numbers that begins at start and + /// extends by increments of delta up to but not including limit. + /// + /// For example: + /// + /// + /// # 'start' is 3 + /// # 'limit' is 18 + /// # 'delta' is 3 + /// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + /// + /// + public static Tensor range (Tensor start, Tensor limit, Tensor delta, string name = "Range") + { + var dict = new Dictionary(); + dict["start"] = start; + dict["limit"] = limit; + dict["delta"] = delta; + var op = _op_def_lib._apply_op_helper("Range", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset with a range of values. Corresponds to python's xrange. + /// + /// + /// corresponds to start in python's xrange(). + /// + /// + /// corresponds to stop in python's xrange(). + /// + /// + /// corresponds to step in python's xrange(). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RangeDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor range_dataset (Tensor start, Tensor stop, Tensor step, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "RangeDataset") + { + var dict = new Dictionary(); + dict["start"] = start; + dict["stop"] = stop; + dict["step"] = step; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("RangeDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the rank of a tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Rank'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns an integer representing the rank of input. + /// + /// For example: + /// + /// + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// # shape of tensor 't' is [2, 2, 3] + /// rank(t) ==> 3 + /// + /// + /// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank + /// of a tensor is the number of indices required to uniquely select each element + /// of the tensor. Rank is also known as "order", "degree", or "ndims." + /// + public static Tensor rank (Tensor input, string name = "Rank") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("Rank", name: name, keywords: dict); + return op.output; + } + + /// + /// Reads and outputs the entire contents of the input filename. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReadFile'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor read_file (Tensor filename, string name = "ReadFile") + { + var dict = new Dictionary(); + dict["filename"] = filename; + var op = _op_def_lib._apply_op_helper("ReadFile", name: name, keywords: dict); + return op.output; + } + + /// + /// Reads the value of a variable. + /// + /// + /// handle to the resource in which to store the variable. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReadVariableOp'. + /// + /// + /// Optional argument + /// the dtype of the value. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The tensor returned by this operation is immutable. + /// + /// The value returned by this operation is guaranteed to be influenced by all the + /// writes on which this operation depends directly or indirectly, and to not be + /// influenced by any of the writes which depend directly or indirectly on this + /// operation. + /// + public static Tensor read_variable_op (Tensor resource, TF_DataType dtype, string name = "ReadVariableOp") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("ReadVariableOp", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the number of records this Reader has produced. + /// + /// + /// Handle to a Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderNumRecordsProduced'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is the same as the number of ReaderRead executions that have + /// succeeded. + /// + public static Tensor reader_num_records_produced (Tensor reader_handle, string name = "ReaderNumRecordsProduced") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + var op = _op_def_lib._apply_op_helper("ReaderNumRecordsProduced", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the number of records this Reader has produced. + /// + /// + /// Handle to a Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderNumRecordsProducedV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is the same as the number of ReaderRead executions that have + /// succeeded. + /// + public static Tensor reader_num_records_produced_v2 (Tensor reader_handle, string name = "ReaderNumRecordsProducedV2") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + var op = _op_def_lib._apply_op_helper("ReaderNumRecordsProducedV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the number of work units this Reader has finished processing. + /// + /// + /// Handle to a Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderNumWorkUnitsCompleted'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor reader_num_work_units_completed (Tensor reader_handle, string name = "ReaderNumWorkUnitsCompleted") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + var op = _op_def_lib._apply_op_helper("ReaderNumWorkUnitsCompleted", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the number of work units this Reader has finished processing. + /// + /// + /// Handle to a Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderNumWorkUnitsCompletedV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor reader_num_work_units_completed_v2 (Tensor reader_handle, string name = "ReaderNumWorkUnitsCompletedV2") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + var op = _op_def_lib._apply_op_helper("ReaderNumWorkUnitsCompletedV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the next record (key, value pair) produced by a Reader. + /// + /// + /// Handle to a Reader. + /// + /// + /// Handle to a Queue, with string work items. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderRead'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// key : A scalar. + /// value : A scalar. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// + public static (Tensor key, Tensor value) reader_read (Tensor reader_handle, Tensor queue_handle, string name = "ReaderRead") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + dict["queue_handle"] = queue_handle; + var op = _op_def_lib._apply_op_helper("ReaderRead", name: name, keywords: dict); + int _idx = 0; + var key = op.outputs[_idx++]; + var value = op.outputs[_idx++]; + return (key, value); + } + + /// + /// Returns up to num_records (key, value) pairs produced by a Reader. + /// + /// + /// Handle to a Reader. + /// + /// + /// Handle to a Queue, with string work items. + /// + /// + /// number of records to read from Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderReadUpTo'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// keys : A 1-D tensor. + /// values : A 1-D tensor. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// It may return less than num_records even before the last batch. + /// + public static (Tensor keys, Tensor values) reader_read_up_to (Tensor reader_handle, Tensor queue_handle, Tensor num_records, string name = "ReaderReadUpTo") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + dict["queue_handle"] = queue_handle; + dict["num_records"] = num_records; + var op = _op_def_lib._apply_op_helper("ReaderReadUpTo", name: name, keywords: dict); + int _idx = 0; + var keys = op.outputs[_idx++]; + var values = op.outputs[_idx++]; + return (keys, values); + } + + /// + /// Returns up to num_records (key, value) pairs produced by a Reader. + /// + /// + /// Handle to a Reader. + /// + /// + /// Handle to a Queue, with string work items. + /// + /// + /// number of records to read from Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderReadUpToV2'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// keys : A 1-D tensor. + /// values : A 1-D tensor. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// It may return less than num_records even before the last batch. + /// + public static (Tensor keys, Tensor values) reader_read_up_to_v2 (Tensor reader_handle, Tensor queue_handle, Tensor num_records, string name = "ReaderReadUpToV2") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + dict["queue_handle"] = queue_handle; + dict["num_records"] = num_records; + var op = _op_def_lib._apply_op_helper("ReaderReadUpToV2", name: name, keywords: dict); + int _idx = 0; + var keys = op.outputs[_idx++]; + var values = op.outputs[_idx++]; + return (keys, values); + } + + /// + /// Returns the next record (key, value pair) produced by a Reader. + /// + /// + /// Handle to a Reader. + /// + /// + /// Handle to a Queue, with string work items. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderReadV2'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// key : A scalar. + /// value : A scalar. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// + public static (Tensor key, Tensor value) reader_read_v2 (Tensor reader_handle, Tensor queue_handle, string name = "ReaderReadV2") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + dict["queue_handle"] = queue_handle; + var op = _op_def_lib._apply_op_helper("ReaderReadV2", name: name, keywords: dict); + int _idx = 0; + var key = op.outputs[_idx++]; + var value = op.outputs[_idx++]; + return (key, value); + } + + /// + /// Restore a Reader to its initial clean state. + /// + /// + /// Handle to a Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderReset'. + /// + /// + /// Returns the description of the operation + /// + public static Operation reader_reset (Tensor reader_handle, string name = "ReaderReset") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + var op = _op_def_lib._apply_op_helper("ReaderReset", name: name, keywords: dict); + return op; + } + + /// + /// Restore a Reader to its initial clean state. + /// + /// + /// Handle to a Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderResetV2'. + /// + /// + /// Returns the description of the operation + /// + public static Operation reader_reset_v2 (Tensor reader_handle, string name = "ReaderResetV2") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + var op = _op_def_lib._apply_op_helper("ReaderResetV2", name: name, keywords: dict); + return op; + } + + /// + /// Restore a reader to a previously saved state. + /// + /// + /// Handle to a Reader. + /// + /// + /// Result of a ReaderSerializeState of a Reader with type + /// matching reader_handle. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderRestoreState'. + /// + /// + /// Returns the description of the operation + /// + /// + /// Not all Readers support being restored, so this can produce an + /// Unimplemented error. + /// + public static Operation reader_restore_state (Tensor reader_handle, Tensor state, string name = "ReaderRestoreState") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + dict["state"] = state; + var op = _op_def_lib._apply_op_helper("ReaderRestoreState", name: name, keywords: dict); + return op; + } + + /// + /// Restore a reader to a previously saved state. + /// + /// + /// Handle to a Reader. + /// + /// + /// Result of a ReaderSerializeState of a Reader with type + /// matching reader_handle. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderRestoreStateV2'. + /// + /// + /// Returns the description of the operation + /// + /// + /// Not all Readers support being restored, so this can produce an + /// Unimplemented error. + /// + public static Operation reader_restore_state_v2 (Tensor reader_handle, Tensor state, string name = "ReaderRestoreStateV2") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + dict["state"] = state; + var op = _op_def_lib._apply_op_helper("ReaderRestoreStateV2", name: name, keywords: dict); + return op; + } + + /// + /// Produce a string tensor that encodes the state of a Reader. + /// + /// + /// Handle to a Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderSerializeState'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Not all Readers support being serialized, so this can produce an + /// Unimplemented error. + /// + public static Tensor reader_serialize_state (Tensor reader_handle, string name = "ReaderSerializeState") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + var op = _op_def_lib._apply_op_helper("ReaderSerializeState", name: name, keywords: dict); + return op.output; + } + + /// + /// Produce a string tensor that encodes the state of a Reader. + /// + /// + /// Handle to a Reader. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderSerializeStateV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Not all Readers support being serialized, so this can produce an + /// Unimplemented error. + /// + public static Tensor reader_serialize_state_v2 (Tensor reader_handle, string name = "ReaderSerializeStateV2") + { + var dict = new Dictionary(); + dict["reader_handle"] = reader_handle; + var op = _op_def_lib._apply_op_helper("ReaderSerializeStateV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the real part of a complex number. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Real'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor input of complex numbers, this operation returns a tensor of + /// type float that is the real part of each element in input. All elements in + /// input must be complex numbers of the form \\(a + bj\\), where *a* is the real + /// part returned by this operation and *b* is the imaginary part. + /// + /// For example: + /// + /// + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.real(input) ==> [-2.25, 3.25] + /// + /// + public static Tensor real (Tensor input, TF_DataType? Tout = null, string name = "Real") + { + var dict = new Dictionary(); + dict["input"] = input; + if (Tout.HasValue) + dict["Tout"] = Tout.Value; + var op = _op_def_lib._apply_op_helper("Real", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns x / y element-wise for real types. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RealDiv'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// If x and y are reals, this will return the floating-point division. + /// + /// *NOTE*: Div supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor real_div (Tensor x, Tensor y, string name = "RealDiv") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("RealDiv", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the reciprocal of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Reciprocal'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = 1 / x\\). + /// + public static Tensor reciprocal (Tensor x, string name = "Reciprocal") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Reciprocal", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient for the inverse of x wrt its input. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReciprocalGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, grad = -dy * y*y, where y = 1/x, and dy + /// is the corresponding input gradient. + /// + public static Tensor reciprocal_grad (Tensor y, Tensor dy, string name = "ReciprocalGrad") + { + var dict = new Dictionary(); + dict["y"] = y; + dict["dy"] = dy; + var op = _op_def_lib._apply_op_helper("ReciprocalGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Emits randomized records. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RecordInput'. + /// + /// + /// Optional argument + /// Glob pattern for the data files. + /// + /// + /// Random seeds used to produce randomized records. + /// + /// + /// Shifts the list of files after the list is randomly + /// shuffled. + /// + /// + /// The randomization shuffling buffer. + /// + /// + /// How many sstables are opened and concurrently iterated over. + /// + /// + /// The batch size. + /// + /// + /// The type of compression for the file. Currently ZLIB and + /// GZIP are supported. Defaults to none. + /// + /// + /// A tensor of shape [batch_size]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor record_input (string file_pattern, int? file_random_seed = null, float? file_shuffle_shift_ratio = null, int? file_buffer_size = null, int? file_parallelism = null, int? batch_size = null, string compression_type = null, string name = "RecordInput") + { + var dict = new Dictionary(); + dict["file_pattern"] = file_pattern; + if (file_random_seed.HasValue) + dict["file_random_seed"] = file_random_seed.Value; + if (file_shuffle_shift_ratio.HasValue) + dict["file_shuffle_shift_ratio"] = file_shuffle_shift_ratio.Value; + if (file_buffer_size.HasValue) + dict["file_buffer_size"] = file_buffer_size.Value; + if (file_parallelism.HasValue) + dict["file_parallelism"] = file_parallelism.Value; + if (batch_size.HasValue) + dict["batch_size"] = batch_size.Value; + if (compression_type != null) + dict["compression_type"] = compression_type; + var op = _op_def_lib._apply_op_helper("RecordInput", name: name, keywords: dict); + return op.output; + } + + /// + /// Joins a string Tensor across the given dimensions. + /// + /// + /// The input to be joined. All reduced indices must have non-zero size. + /// + /// + /// The dimensions to reduce over. Dimensions are reduced in the + /// order specified. Omitting reduction_indices is equivalent to passing + /// [n-1, n-2, ..., 0]. Negative indices from -n to -1 are supported. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReduceJoin'. + /// + /// + /// If True, retain reduced dimensions with length 1. + /// + /// + /// The separator to use when joining. + /// + /// + /// Has shape equal to that of the input with reduced dimensions removed or + /// set to 1 depending on keep_dims. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Computes the string join across dimensions in the given string Tensor of shape + /// [\\(d_0, d_1, ..., d_{n-1}\\)]. Returns a new Tensor created by joining the input + /// strings with the given separator (default: empty string). Negative indices are + /// counted backwards from the end, with -1 being equivalent to n - 1. If + /// indices are not specified, joins across all dimensions beginning from n - 1 + /// through 0. + /// + /// For example: + /// + /// + /// # tensor a is [["a", "b"], ["c", "d"]] + /// tf.reduce_join(a, 0) ==> ["ac", "bd"] + /// tf.reduce_join(a, 1) ==> ["ab", "cd"] + /// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + /// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + /// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + /// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] + /// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + /// tf.reduce_join(a, [0, 1]) ==> "acbd" + /// tf.reduce_join(a, [1, 0]) ==> "abcd" + /// tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] + /// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" + /// + /// + public static Tensor reduce_join (Tensor inputs, Tensor reduction_indices, bool? keep_dims = null, string separator = null, string name = "ReduceJoin") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["reduction_indices"] = reduction_indices; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + if (separator != null) + dict["separator"] = separator; + var op = _op_def_lib._apply_op_helper("ReduceJoin", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates or finds a child frame, and makes data available to the child frame. + /// + /// + /// The tensor to be made available to the child frame. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefEnter'. + /// + /// + /// Optional argument + /// The name of the child frame. + /// + /// + /// If true, the output is constant within the child frame. + /// + /// + /// The number of iterations allowed to run in parallel. + /// + /// + /// The same tensor as data. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The unique frame_name is used by the Executor to identify frames. If + /// is_constant is true, output is a constant in the child frame; otherwise + /// it may be changed in the child frame. At most parallel_iterations iterations + /// are run in parallel in the child frame. + /// + public static Tensor ref_enter (Tensor data, string frame_name, bool? is_constant = null, int? parallel_iterations = null, string name = "RefEnter") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["frame_name"] = frame_name; + if (is_constant.HasValue) + dict["is_constant"] = is_constant.Value; + if (parallel_iterations.HasValue) + dict["parallel_iterations"] = parallel_iterations.Value; + var op = _op_def_lib._apply_op_helper("RefEnter", name: name, keywords: dict); + return op.output; + } + + /// + /// Exits the current frame to its parent frame. + /// + /// + /// The tensor to be made available to the parent frame. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefExit'. + /// + /// + /// The same tensor as data. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Exit makes its input data available to the parent frame. + /// + public static Tensor ref_exit (Tensor data, string name = "RefExit") + { + var dict = new Dictionary(); + dict["data"] = data; + var op = _op_def_lib._apply_op_helper("RefExit", name: name, keywords: dict); + return op.output; + } + + /// + /// Return the same ref tensor as the input ref tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefIdentity'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor ref_identity (Tensor input, string name = "RefIdentity") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("RefIdentity", name: name, keywords: dict); + return op.output; + } + + /// + /// Forwards the value of an available tensor from inputs to output. + /// + /// + /// The input tensors, exactly one of which will become available. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefMerge'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : Will be set to the available input tensor. + /// value_index : The index of the chosen input tensor in inputs. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Merge waits for at least one of the tensors in inputs to become available. + /// It is usually combined with Switch to implement branching. + /// + /// Merge forwards the first tensor for become available to output, and sets + /// value_index to its index in inputs. + /// + public static (Tensor output, Tensor value_index) ref_merge (Tensor[] inputs, string name = "RefMerge") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + var op = _op_def_lib._apply_op_helper("RefMerge", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var value_index = op.outputs[_idx++]; + return (output, value_index); + } + + /// + /// Makes its input available to the next iteration. + /// + /// + /// The tensor to be made available to the next iteration. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefNextIteration'. + /// + /// + /// The same tensor as data. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor ref_next_iteration (Tensor data, string name = "RefNextIteration") + { + var dict = new Dictionary(); + dict["data"] = data; + var op = _op_def_lib._apply_op_helper("RefNextIteration", name: name, keywords: dict); + return op.output; + } + + /// + /// Forwards the indexth element of inputs to output. + /// + /// + /// A scalar that determines the input that gets selected. + /// + /// + /// A list of ref tensors, one of which will be forwarded to output. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefSelect'. + /// + /// + /// The forwarded tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor ref_select (Tensor index, Tensor[] inputs, string name = "RefSelect") + { + var dict = new Dictionary(); + dict["index"] = index; + dict["inputs"] = inputs; + var op = _op_def_lib._apply_op_helper("RefSelect", name: name, keywords: dict); + return op.output; + } + + /// + /// Forwards the ref tensor data to the output port determined by pred. + /// + /// + /// The ref tensor to be forwarded to the appropriate output. + /// + /// + /// A scalar that specifies which output port will receive data. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefSwitch'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_false : If pred is false, data will be forwarded to this output. + /// output_true : If pred is true, data will be forwarded to this output. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// If pred is true, the data input is forwarded to output_true. Otherwise, + /// the data goes to output_false. + /// + /// See also Switch and Merge. + /// + public static (Tensor output_false, Tensor output_true) ref_switch (Tensor data, Tensor pred, string name = "RefSwitch") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["pred"] = pred; + var op = _op_def_lib._apply_op_helper("RefSwitch", name: name, keywords: dict); + int _idx = 0; + var output_false = op.outputs[_idx++]; + var output_true = op.outputs[_idx++]; + return (output_false, output_true); + } + + /// + /// Check if the input matches the regex pattern. + /// + /// + /// A string tensor of the text to be processed. + /// + /// + /// A scalar string tensor containing the regular expression to match the input. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RegexFullMatch'. + /// + /// + /// A bool tensor with the same shape as input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The input is a string tensor of any shape. The pattern is a scalar + /// string tensor which is applied to every element of the input tensor. + /// The boolean values (True or False) of the output tensor indicate + /// if the input matches the regex pattern provided. + /// + /// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + /// + public static Tensor regex_full_match (Tensor input, Tensor pattern, string name = "RegexFullMatch") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["pattern"] = pattern; + var op = _op_def_lib._apply_op_helper("RegexFullMatch", name: name, keywords: dict); + return op.output; + } + + /// + /// Replaces the match of pattern in input with rewrite. + /// + /// + /// The text to be processed. + /// + /// + /// The regular expression to match the input. + /// + /// + /// The rewrite to be applied to the matched expresion. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RegexReplace'. + /// + /// + /// If True, the replacement is global, otherwise the replacement + /// is done only on the first match. + /// + /// + /// The text after applying pattern and rewrite. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + /// + public static Tensor regex_replace (Tensor input, Tensor pattern, Tensor rewrite, bool? replace_global = null, string name = "RegexReplace") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["pattern"] = pattern; + dict["rewrite"] = rewrite; + if (replace_global.HasValue) + dict["replace_global"] = replace_global.Value; + var op = _op_def_lib._apply_op_helper("RegexReplace", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes rectified linear: max(features, 0). + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Relu'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor relu (Tensor features, string name = "Relu") + { + var dict = new Dictionary(); + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("Relu", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes rectified linear 6: min(max(features, 0), 6). + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Relu6'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor relu6 (Tensor features, string name = "Relu6") + { + var dict = new Dictionary(); + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("Relu6", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes rectified linear 6 gradients for a Relu6 operation. + /// + /// + /// The backpropagated gradients to the corresponding Relu6 operation. + /// + /// + /// The features passed as input to the corresponding Relu6 operation, or + /// its output; using either one produces the same result. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Relu6Grad'. + /// + /// + /// The gradients: + /// gradients * (features > 0) * (features < 6). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor relu6grad (Tensor gradients, Tensor features, string name = "Relu6Grad") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("Relu6Grad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes rectified linear gradients for a Relu operation. + /// + /// + /// The backpropagated gradients to the corresponding Relu operation. + /// + /// + /// The features passed as input to the corresponding Relu operation, OR + /// the outputs of that operation (both work equivalently). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReluGrad'. + /// + /// + /// gradients * (features > 0). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor relu_grad (Tensor gradients, Tensor features, string name = "ReluGrad") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("ReluGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Execute a sub graph on a remote processor. + /// + /// + /// Arbitrary number of tensors with arbitrary data types + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RemoteFusedGraphExecute'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// Serialized protocol buffer + /// of RemoteFusedGraphExecuteInfo which contains graph specifications. + /// + /// + /// Arbitrary number of tensors with arbitrary data types + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The graph specifications(such as graph itself, input tensors and output names) + /// are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo + /// as serialized_remote_fused_graph_execute_info. + /// The specifications will be passed to a dedicated registered + /// remote fused graph executor. The executor will send the graph specifications + /// to a remote processor and execute that graph. The execution results + /// will be passed to consumer nodes as outputs of this node. + /// + public static Tensor[] remote_fused_graph_execute (Tensor[] inputs, TF_DataType[] Toutputs, string serialized_remote_fused_graph_execute_info, string name = "RemoteFusedGraphExecute") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + dict["Toutputs"] = Toutputs; + dict["serialized_remote_fused_graph_execute_info"] = serialized_remote_fused_graph_execute_info; + var op = _op_def_lib._apply_op_helper("RemoteFusedGraphExecute", name: name, keywords: dict); + int _idx = 0; + var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray(); + return (outputs); + } + + /// + /// Creates a dataset that emits the outputs of input_dataset count times. + /// + /// + /// + /// + /// A scalar representing the number of times that input_dataset should + /// be repeated. A value of -1 indicates that it should be repeated infinitely. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RepeatDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor repeat_dataset (Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "RepeatDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["count"] = count; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("RepeatDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Given a quantized tensor described by (input, input_min, input_max), outputs a + /// + /// + /// + /// + /// The float value that the minimum quantized input value represents. + /// + /// + /// The float value that the maximum quantized input value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RequantizationRange'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_min : The computed min output. + /// output_max : the computed max output. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// range that covers the actual values present in that tensor. This op is + /// typically used to produce the requested_output_min and requested_output_max for + /// Requantize. + /// + public static (Tensor output_min, Tensor output_max) requantization_range (Tensor input, Tensor input_min, Tensor input_max, string name = "RequantizationRange") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_min"] = input_min; + dict["input_max"] = input_max; + var op = _op_def_lib._apply_op_helper("RequantizationRange", name: name, keywords: dict); + int _idx = 0; + var output_min = op.outputs[_idx++]; + var output_max = op.outputs[_idx++]; + return (output_min, output_max); + } + + /// + /// Convert the quantized 'input' tensor into a lower-precision 'output', using the + /// + /// + /// + /// + /// The float value that the minimum quantized input value represents. + /// + /// + /// The float value that the maximum quantized input value represents. + /// + /// + /// The float value that the minimum quantized output value represents. + /// + /// + /// The float value that the maximum quantized output value represents. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Requantize'. + /// + /// + /// Optional argument + /// The type of the output. Should be a lower bit depth than Tinput. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output : + /// output_min : The requested_output_min value is copied into this output. + /// output_max : The requested_output_max value is copied into this output. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// output range specified with 'requested_output_min' and 'requested_output_max'. + /// + /// [input_min, input_max] are scalar floats that specify the range for the float + /// interpretation of the 'input' data. For example, if input_min is -1.0f and + /// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + /// + public static (Tensor output, Tensor output_min, Tensor output_max) requantize (Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type, string name = "Requantize") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["input_min"] = input_min; + dict["input_max"] = input_max; + dict["requested_output_min"] = requested_output_min; + dict["requested_output_max"] = requested_output_max; + dict["out_type"] = out_type; + var op = _op_def_lib._apply_op_helper("Requantize", name: name, keywords: dict); + int _idx = 0; + var output = op.outputs[_idx++]; + var output_min = op.outputs[_idx++]; + var output_max = op.outputs[_idx++]; + return (output, output_min, output_max); + } + + /// + /// Reshapes a tensor. + /// + /// + /// + /// + /// Defines the shape of the output tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Reshape'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given tensor, this operation returns a tensor that has the same values + /// as tensor with shape shape. + /// + /// If one component of shape is the special value -1, the size of that dimension + /// is computed so that the total size remains constant. In particular, a shape + /// of [-1] flattens into 1-D. At most one component of shape can be -1. + /// + /// If shape is 1-D or higher, then the operation returns a tensor with shape + /// shape filled with the values of tensor. In this case, the number of elements + /// implied by shape must be the same as the number of elements in tensor. + /// + /// For example: + /// + /// + /// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] + /// # tensor 't' has shape [9] + /// reshape(t, [3, 3]) ==> [[1, 2, 3], + /// [4, 5, 6], + /// [7, 8, 9]] + /// + /// # tensor 't' is [[[1, 1], [2, 2]], + /// # [[3, 3], [4, 4]]] + /// # tensor 't' has shape [2, 2, 2] + /// reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + /// [3, 3, 4, 4]] + /// + /// # tensor 't' is [[[1, 1, 1], + /// # [2, 2, 2]], + /// # [[3, 3, 3], + /// # [4, 4, 4]], + /// # [[5, 5, 5], + /// # [6, 6, 6]]] + /// # tensor 't' has shape [3, 2, 3] + /// # pass '[-1]' to flatten 't' + /// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] + /// + /// # -1 can also be used to infer the shape + /// + /// # -1 is inferred to be 9: + /// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] + /// # -1 is inferred to be 2: + /// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] + /// # -1 is inferred to be 3: + /// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + /// [2, 2, 2], + /// [3, 3, 3]], + /// [[4, 4, 4], + /// [5, 5, 5], + /// [6, 6, 6]]] + /// + /// # tensor 't' is [7] + /// # shape [] reshapes to a scalar + /// reshape(t, []) ==> 7 + /// + /// + public static Tensor reshape (Tensor tensor, Tensor shape, string name = "Reshape") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("Reshape", name: name, keywords: dict); + return op.output; + } + + /// + /// Resize images to size using area interpolation. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// = A 1-D int32 Tensor of 2 elements: new_height, new_width. The + /// new size for the images. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeArea'. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// + /// 4-D with shape + /// [batch, new_height, new_width, channels]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Input images can be of different types but output images are always float. + /// + /// The range of pixel values for the output image might be slightly different + /// from the range for the input image because of limited numerical precision. + /// To guarantee an output range, for example [0.0, 1.0], apply + /// tf.clip_by_value to the output. + /// + /// Each output pixel is computed by first transforming the pixel's footprint into + /// the input tensor and then averaging the pixels that intersect the footprint. An + /// input pixel's contribution to the average is weighted by the fraction of its + /// area that intersects the footprint. This is the same as OpenCV's INTER_AREA. + /// + public static Tensor resize_area (Tensor images, Tensor size, bool? align_corners = null, string name = "ResizeArea") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["size"] = size; + if (align_corners.HasValue) + dict["align_corners"] = align_corners.Value; + var op = _op_def_lib._apply_op_helper("ResizeArea", name: name, keywords: dict); + return op.output; + } + + /// + /// Resize images to size using bicubic interpolation. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// = A 1-D int32 Tensor of 2 elements: new_height, new_width. The + /// new size for the images. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeBicubic'. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// + /// 4-D with shape + /// [batch, new_height, new_width, channels]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Input images can be of different types but output images are always float. + /// + public static Tensor resize_bicubic (Tensor images, Tensor size, bool? align_corners = null, string name = "ResizeBicubic") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["size"] = size; + if (align_corners.HasValue) + dict["align_corners"] = align_corners.Value; + var op = _op_def_lib._apply_op_helper("ResizeBicubic", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of bicubic interpolation. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// 4-D with shape [batch, orig_height, orig_width, channels], + /// The image tensor that was resized. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeBicubicGrad'. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and grad tensors are + /// aligned. Defaults to false. + /// + /// + /// 4-D with shape [batch, orig_height, orig_width, channels]. + /// Gradients with respect to the input image. Input image must have been + /// float or double. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor resize_bicubic_grad (Tensor grads, Tensor original_image, bool? align_corners = null, string name = "ResizeBicubicGrad") + { + var dict = new Dictionary(); + dict["grads"] = grads; + dict["original_image"] = original_image; + if (align_corners.HasValue) + dict["align_corners"] = align_corners.Value; + var op = _op_def_lib._apply_op_helper("ResizeBicubicGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Resize images to size using bilinear interpolation. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// = A 1-D int32 Tensor of 2 elements: new_height, new_width. The + /// new size for the images. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeBilinear'. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// + /// 4-D with shape + /// [batch, new_height, new_width, channels]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Input images can be of different types but output images are always float. + /// + public static Tensor resize_bilinear (Tensor images, Tensor size, bool? align_corners = null, string name = "ResizeBilinear") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["size"] = size; + if (align_corners.HasValue) + dict["align_corners"] = align_corners.Value; + var op = _op_def_lib._apply_op_helper("ResizeBilinear", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of bilinear interpolation. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// 4-D with shape [batch, orig_height, orig_width, channels], + /// The image tensor that was resized. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeBilinearGrad'. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and grad tensors are + /// aligned. Defaults to false. + /// + /// + /// 4-D with shape [batch, orig_height, orig_width, channels]. + /// Gradients with respect to the input image. Input image must have been + /// float or double. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor resize_bilinear_grad (Tensor grads, Tensor original_image, bool? align_corners = null, string name = "ResizeBilinearGrad") + { + var dict = new Dictionary(); + dict["grads"] = grads; + dict["original_image"] = original_image; + if (align_corners.HasValue) + dict["align_corners"] = align_corners.Value; + var op = _op_def_lib._apply_op_helper("ResizeBilinearGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Resize images to size using nearest neighbor interpolation. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// = A 1-D int32 Tensor of 2 elements: new_height, new_width. The + /// new size for the images. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeNearestNeighbor'. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// + /// 4-D with shape + /// [batch, new_height, new_width, channels]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor resize_nearest_neighbor (Tensor images, Tensor size, bool? align_corners = null, string name = "ResizeNearestNeighbor") + { + var dict = new Dictionary(); + dict["images"] = images; + dict["size"] = size; + if (align_corners.HasValue) + dict["align_corners"] = align_corners.Value; + var op = _op_def_lib._apply_op_helper("ResizeNearestNeighbor", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of nearest neighbor interpolation. + /// + /// + /// 4-D with shape [batch, height, width, channels]. + /// + /// + /// = A 1-D int32 Tensor of 2 elements: orig_height, orig_width. The + /// original input size. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeNearestNeighborGrad'. + /// + /// + /// If true, the centers of the 4 corner pixels of the input and grad tensors are + /// aligned. Defaults to false. + /// + /// + /// 4-D with shape [batch, orig_height, orig_width, channels]. Gradients + /// with respect to the input image. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor resize_nearest_neighbor_grad (Tensor grads, Tensor size, bool? align_corners = null, string name = "ResizeNearestNeighborGrad") + { + var dict = new Dictionary(); + dict["grads"] = grads; + dict["size"] = size; + if (align_corners.HasValue) + dict["align_corners"] = align_corners.Value; + var op = _op_def_lib._apply_op_helper("ResizeNearestNeighborGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the AdaMax algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Must be a scalar. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Momentum factor. Must be a scalar. + /// + /// + /// Momentum factor. Must be a scalar. + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdaMax'. + /// + /// + /// If True, updating of the var, m, and v tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// v_t <- max(beta2 * v_{t-1}, abs(g)) + /// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) + /// + public static Operation resource_apply_ada_max (Tensor var, Tensor m, Tensor v, Tensor beta1_power, Tensor lr, Tensor beta1, Tensor beta2, Tensor epsilon, Tensor grad, bool? use_locking = null, string name = "ResourceApplyAdaMax") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["m"] = m; + dict["v"] = v; + dict["beta1_power"] = beta1_power; + dict["lr"] = lr; + dict["beta1"] = beta1; + dict["beta2"] = beta2; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyAdaMax", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the adadelta scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay factor. Must be a scalar. + /// + /// + /// Constant factor. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdadelta'. + /// + /// + /// If True, updating of the var, accum and update_accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// accum = rho() * accum + (1 - rho()) * grad.square(); + /// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + /// update_accum = rho() * update_accum + (1 - rho()) * update.square(); + /// var -= update; + /// + public static Operation resource_apply_adadelta (Tensor var, Tensor accum, Tensor accum_update, Tensor lr, Tensor rho, Tensor epsilon, Tensor grad, bool? use_locking = null, string name = "ResourceApplyAdadelta") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["accum_update"] = accum_update; + dict["lr"] = lr; + dict["rho"] = rho; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyAdadelta", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the adagrad scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdagrad'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// + /// + /// Returns the description of the operation + /// + /// + /// accum += grad * grad + /// var -= lr * grad * (1 / sqrt(accum)) + /// + public static Operation resource_apply_adagrad (Tensor var, Tensor accum, Tensor lr, Tensor grad, bool? use_locking = null, bool? update_slots = null, string name = "ResourceApplyAdagrad") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (update_slots.HasValue) + dict["update_slots"] = update_slots.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyAdagrad", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the proximal adagrad scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// Training step number. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdagradDA'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + public static Operation resource_apply_adagrad_d_a (Tensor var, Tensor gradient_accumulator, Tensor gradient_squared_accumulator, Tensor grad, Tensor lr, Tensor l1, Tensor l2, Tensor global_step, bool? use_locking = null, string name = "ResourceApplyAdagradDA") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["gradient_accumulator"] = gradient_accumulator; + dict["gradient_squared_accumulator"] = gradient_squared_accumulator; + dict["grad"] = grad; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["global_step"] = global_step; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyAdagradDA", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the Adam algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Momentum factor. Must be a scalar. + /// + /// + /// Momentum factor. Must be a scalar. + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdam'. + /// + /// + /// If True, updating of the var, m, and v tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// If True, uses the nesterov update. + /// + /// + /// Returns the description of the operation + /// + /// + /// $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ + /// $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ + /// $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ + /// $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ + /// + public static Operation resource_apply_adam (Tensor var, Tensor m, Tensor v, Tensor beta1_power, Tensor beta2_power, Tensor lr, Tensor beta1, Tensor beta2, Tensor epsilon, Tensor grad, bool? use_locking = null, bool? use_nesterov = null, string name = "ResourceApplyAdam") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["m"] = m; + dict["v"] = v; + dict["beta1_power"] = beta1_power; + dict["beta2_power"] = beta2_power; + dict["lr"] = lr; + dict["beta1"] = beta1; + dict["beta2"] = beta2; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (use_nesterov.HasValue) + dict["use_nesterov"] = use_nesterov.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyAdam", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the AddSign update. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAddSign'. + /// + /// + /// If True, updating of the var and m tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// update <- (alpha + sign_decay * sign(g) *sign(m)) * g + /// variable <- variable - lr_t * update + /// + public static Operation resource_apply_add_sign (Tensor var, Tensor m, Tensor lr, Tensor alpha, Tensor sign_decay, Tensor beta, Tensor grad, bool? use_locking = null, string name = "ResourceApplyAddSign") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["m"] = m; + dict["lr"] = lr; + dict["alpha"] = alpha; + dict["sign_decay"] = sign_decay; + dict["beta"] = beta; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyAddSign", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the centered RMSProp algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay rate. Must be a scalar. + /// + /// + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyCenteredRMSProp'. + /// + /// + /// If True, updating of the var, mg, ms, and mom tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// The centered RMSProp algorithm uses an estimate of the centered second moment + /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which + /// uses the (uncentered) second moment. This often helps with training, but is + /// slightly more expensive in terms of computation and memory. + /// + /// Note that in dense implementation of this algorithm, mg, ms, and mom will + /// update even if the grad is zero, but in this sparse implementation, mg, ms, + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// mean_grad = decay * mean_grad + (1-decay) * gradient + /// + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + /// + /// mg <- rho * mg_{t-1} + (1-rho) * grad + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + /// var <- var - mom + /// + public static Operation resource_apply_centered_r_m_s_prop (Tensor var, Tensor mg, Tensor ms, Tensor mom, Tensor lr, Tensor rho, Tensor momentum, Tensor epsilon, Tensor grad, bool? use_locking = null, string name = "ResourceApplyCenteredRMSProp") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["mg"] = mg; + dict["ms"] = ms; + dict["mom"] = mom; + dict["lr"] = lr; + dict["rho"] = rho; + dict["momentum"] = momentum; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyCenteredRMSProp", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the Ftrl-proximal scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regulariation. Must be a scalar. + /// + /// + /// L2 regulariation. Must be a scalar. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyFtrl'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// accum_new = accum + grad * grad + /// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + public static Operation resource_apply_ftrl (Tensor var, Tensor accum, Tensor linear, Tensor grad, Tensor lr, Tensor l1, Tensor l2, Tensor lr_power, bool? use_locking = null, string name = "ResourceApplyFtrl") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["linear"] = linear; + dict["grad"] = grad; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["lr_power"] = lr_power; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyFtrl", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the Ftrl-proximal scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regulariation. Must be a scalar. + /// + /// + /// L2 shrinkage regulariation. Must be a scalar. + /// + /// + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyFtrlV2'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var + /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + /// linear += grad_with_shrinkage + + /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + public static Operation resource_apply_ftrl_v2 (Tensor var, Tensor accum, Tensor linear, Tensor grad, Tensor lr, Tensor l1, Tensor l2, Tensor l2_shrinkage, Tensor lr_power, bool? use_locking = null, string name = "ResourceApplyFtrlV2") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["linear"] = linear; + dict["grad"] = grad; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["l2_shrinkage"] = l2_shrinkage; + dict["lr_power"] = lr_power; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyFtrlV2", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' by subtracting 'alpha' * 'delta' from it. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// The change. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyGradientDescent'. + /// + /// + /// If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + public static Operation resource_apply_gradient_descent (Tensor var, Tensor alpha, Tensor delta, bool? use_locking = null, string name = "ResourceApplyGradientDescent") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["alpha"] = alpha; + dict["delta"] = delta; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyGradientDescent", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the momentum scheme. Set use_nesterov = True if you + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// Momentum. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyMomentum'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// If True, the tensor passed to compute grad will be + /// var - lr * momentum * accum, so in the end, the var you get is actually + /// var - lr * momentum * accum. + /// + /// + /// Returns the description of the operation + /// + /// + /// want to use Nesterov momentum. + /// + /// accum = accum * momentum + grad + /// var -= lr * accum + /// + public static Operation resource_apply_momentum (Tensor var, Tensor accum, Tensor lr, Tensor grad, Tensor momentum, bool? use_locking = null, bool? use_nesterov = null, string name = "ResourceApplyMomentum") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["grad"] = grad; + dict["momentum"] = momentum; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (use_nesterov.HasValue) + dict["use_nesterov"] = use_nesterov.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyMomentum", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the AddSign update. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyPowerSign'. + /// + /// + /// If True, updating of the var and m tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + /// variable <- variable - lr_t * update + /// + public static Operation resource_apply_power_sign (Tensor var, Tensor m, Tensor lr, Tensor logbase, Tensor sign_decay, Tensor beta, Tensor grad, bool? use_locking = null, string name = "ResourceApplyPowerSign") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["m"] = m; + dict["lr"] = lr; + dict["logbase"] = logbase; + dict["sign_decay"] = sign_decay; + dict["beta"] = beta; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyPowerSign", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyProximalAdagrad'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// accum += grad * grad + /// prox_v = var - lr * grad * (1 / sqrt(accum)) + /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} + /// + public static Operation resource_apply_proximal_adagrad (Tensor var, Tensor accum, Tensor lr, Tensor l1, Tensor l2, Tensor grad, bool? use_locking = null, string name = "ResourceApplyProximalAdagrad") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyProximalAdagrad", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' as FOBOS algorithm with fixed learning rate. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// The change. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyProximalGradientDescent'. + /// + /// + /// If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// prox_v = var - alpha * delta + /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} + /// + public static Operation resource_apply_proximal_gradient_descent (Tensor var, Tensor alpha, Tensor l1, Tensor l2, Tensor delta, bool? use_locking = null, string name = "ResourceApplyProximalGradientDescent") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["alpha"] = alpha; + dict["l1"] = l1; + dict["l2"] = l2; + dict["delta"] = delta; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyProximalGradientDescent", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the RMSProp algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay rate. Must be a scalar. + /// + /// + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyRMSProp'. + /// + /// + /// If True, updating of the var, ms, and mom tensors is protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// Note that in dense implementation of this algorithm, ms and mom will + /// update even if the grad is zero, but in this sparse implementation, ms + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + /// + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + /// var <- var - mom + /// + public static Operation resource_apply_r_m_s_prop (Tensor var, Tensor ms, Tensor mom, Tensor lr, Tensor rho, Tensor momentum, Tensor epsilon, Tensor grad, bool? use_locking = null, string name = "ResourceApplyRMSProp") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["ms"] = ms; + dict["mom"] = mom; + dict["lr"] = lr; + dict["rho"] = rho; + dict["momentum"] = momentum; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceApplyRMSProp", name: name, keywords: dict); + return op; + } + + /// + /// Increments variable pointed to by 'resource' until it reaches 'limit'. + /// + /// + /// Should be from a scalar Variable node. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceCountUpTo'. + /// + /// + /// Optional argument + /// If incrementing ref would bring it above limit, instead generates an + /// 'OutOfRange' error. + /// + /// + /// Optional argument + /// + /// + /// A copy of the input before increment. If nothing else modifies the + /// input, the values produced will all be distinct. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor resource_count_up_to (Tensor resource, int limit, TF_DataType T, string name = "ResourceCountUpTo") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["limit"] = limit; + dict["T"] = T; + var op = _op_def_lib._apply_op_helper("ResourceCountUpTo", name: name, keywords: dict); + return op.output; + } + + /// + /// Gather slices from the variable pointed to by resource according to indices. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceGather'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// indices must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape indices.shape + params.shape[1:] where: + /// + /// + /// # Scalar indices + /// output[:, ..., :] = params[indices, :, ... :] + /// + /// # Vector indices + /// output[i, :, ..., :] = params[indices[i], :, ... :] + /// + /// # Higher rank indices + /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + /// + /// + public static Tensor resource_gather (Tensor resource, Tensor indices, TF_DataType dtype, bool? validate_indices = null, string name = "ResourceGather") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["indices"] = indices; + dict["dtype"] = dtype; + if (validate_indices.HasValue) + dict["validate_indices"] = validate_indices.Value; + var op = _op_def_lib._apply_op_helper("ResourceGather", name: name, keywords: dict); + return op.output; + } + + /// + /// Adds sparse updates to the variable referenced by resource. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterAdd'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] += updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] += updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions add. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> + /// </div> + /// + public static Operation resource_scatter_add (Tensor resource, Tensor indices, Tensor updates, string name = "ResourceScatterAdd") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["indices"] = indices; + dict["updates"] = updates; + var op = _op_def_lib._apply_op_helper("ResourceScatterAdd", name: name, keywords: dict); + return op; + } + + /// + /// Divides sparse updates into the variable referenced by resource. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterDiv'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] /= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] /= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions multiply. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> + /// </div> + /// + public static Operation resource_scatter_div (Tensor resource, Tensor indices, Tensor updates, string name = "ResourceScatterDiv") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["indices"] = indices; + dict["updates"] = updates; + var op = _op_def_lib._apply_op_helper("ResourceScatterDiv", name: name, keywords: dict); + return op; + } + + /// + /// Reduces sparse updates into the variable referenced by resource using the max operation. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterMax'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = max(ref[indices, ...], updates[...]) + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions are combined. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> + /// </div> + /// + public static Operation resource_scatter_max (Tensor resource, Tensor indices, Tensor updates, string name = "ResourceScatterMax") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["indices"] = indices; + dict["updates"] = updates; + var op = _op_def_lib._apply_op_helper("ResourceScatterMax", name: name, keywords: dict); + return op; + } + + /// + /// Reduces sparse updates into the variable referenced by resource using the min operation. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterMin'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = min(ref[indices, ...], updates[...]) + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions are combined. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> + /// </div> + /// + public static Operation resource_scatter_min (Tensor resource, Tensor indices, Tensor updates, string name = "ResourceScatterMin") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["indices"] = indices; + dict["updates"] = updates; + var op = _op_def_lib._apply_op_helper("ResourceScatterMin", name: name, keywords: dict); + return op; + } + + /// + /// Multiplies sparse updates into the variable referenced by resource. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterMul'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] *= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] *= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions multiply. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> + /// </div> + /// + public static Operation resource_scatter_mul (Tensor resource, Tensor indices, Tensor updates, string name = "ResourceScatterMul") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["indices"] = indices; + dict["updates"] = updates; + var op = _op_def_lib._apply_op_helper("ResourceScatterMul", name: name, keywords: dict); + return op; + } + + /// + /// Adds sparse updates to individual values or slices within a given + /// + /// + /// A resource handle. Must be from a VarHandleOp. + /// + /// + /// A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into ref. + /// + /// + /// A Tensor. Must have the same type as ref. A tensor of + /// values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterNdAdd'. + /// + /// + /// An optional bool. Defaults to True. If True, the assignment will + /// be protected by a lock; otherwise the behavior is undefined, + /// but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// variable according to indices. + /// + /// ref is a Tensor with rank P and indices is a Tensor of rank Q. + /// + /// indices must be integer tensor, containing indices into ref. + /// It must be shape [d_0, ..., d_{Q-2}, K] where 0 < K <= P. + /// + /// The innermost dimension of indices (with length K) corresponds to + /// indices into elements (if K = P) or slices (if K < P) along the Kth + /// dimension of ref. + /// + /// updates is Tensor of rank Q-1+P-K with shape: + /// + /// + /// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + /// + /// + /// For example, say we want to update 4 scattered elements to a rank-1 tensor to + /// 8 elements. In Python, that update would look like this: + /// + /// + /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + /// indices = tf.constant([[4], [3], [1] ,[7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// update = tf.scatter_nd_add(ref, indices, updates) + /// with tf.Session() as sess: + /// print sess.run(update) + /// + /// + /// The resulting update to ref would look like this: + /// + /// [1, 12, 3, 14, 14, 6, 7, 20] + /// + /// See tf.scatter_nd for more details about how to make updates to + /// slices. + /// + public static Operation resource_scatter_nd_add (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ResourceScatterNdAdd") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceScatterNdAdd", name: name, keywords: dict); + return op; + } + + /// + /// Applies sparse updates to individual values or slices within a given + /// + /// + /// A resource handle. Must be from a VarHandleOp. + /// + /// + /// A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into ref. + /// + /// + /// A Tensor. Must have the same type as ref. A tensor of updated + /// values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterNdUpdate'. + /// + /// + /// An optional bool. Defaults to True. If True, the assignment will + /// be protected by a lock; otherwise the behavior is undefined, + /// but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// variable according to indices. + /// + /// ref is a Tensor with rank P and indices is a Tensor of rank Q. + /// + /// indices must be integer tensor, containing indices into ref. + /// It must be shape [d_0, ..., d_{Q-2}, K] where 0 < K <= P. + /// + /// The innermost dimension of indices (with length K) corresponds to + /// indices into elements (if K = P) or slices (if K < P) along the Kth + /// dimension of ref. + /// + /// updates is Tensor of rank Q-1+P-K with shape: + /// + /// + /// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + /// + /// + /// For example, say we want to update 4 scattered elements to a rank-1 tensor to + /// 8 elements. In Python, that update would look like this: + /// + /// + /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1] ,[7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// update = tf.scatter_nd_update(ref, indices, updates) + /// with tf.Session() as sess: + /// print sess.run(update) + /// + /// + /// The resulting update to ref would look like this: + /// + /// [1, 11, 3, 10, 9, 6, 7, 12] + /// + /// See tf.scatter_nd for more details about how to make updates to + /// slices. + /// + public static Operation resource_scatter_nd_update (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ResourceScatterNdUpdate") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceScatterNdUpdate", name: name, keywords: dict); + return op; + } + + /// + /// Subtracts sparse updates from the variable referenced by resource. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterSub'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] -= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] -= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions add. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> + /// </div> + /// + public static Operation resource_scatter_sub (Tensor resource, Tensor indices, Tensor updates, string name = "ResourceScatterSub") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["indices"] = indices; + dict["updates"] = updates; + var op = _op_def_lib._apply_op_helper("ResourceScatterSub", name: name, keywords: dict); + return op; + } + + /// + /// Assigns sparse updates to the variable referenced by resource. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterUpdate'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + /// + public static Operation resource_scatter_update (Tensor resource, Tensor indices, Tensor updates, string name = "ResourceScatterUpdate") + { + var dict = new Dictionary(); + dict["resource"] = resource; + dict["indices"] = indices; + dict["updates"] = updates; + var op = _op_def_lib._apply_op_helper("ResourceScatterUpdate", name: name, keywords: dict); + return op; + } + + /// + /// var: Should be from a Variable(). + /// + /// + /// + /// + /// Should be from a Variable(). + /// + /// + /// : Should be from a Variable(). + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// Decay factor. Must be a scalar. + /// + /// + /// Constant factor. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyAdadelta'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + public static Operation resource_sparse_apply_adadelta (Tensor var, Tensor accum, Tensor accum_update, Tensor lr, Tensor rho, Tensor epsilon, Tensor grad, Tensor indices, bool? use_locking = null, string name = "ResourceSparseApplyAdadelta") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["accum_update"] = accum_update; + dict["lr"] = lr; + dict["rho"] = rho; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyAdadelta", name: name, keywords: dict); + return op; + } + + /// + /// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyAdagrad'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// + /// + /// Returns the description of the operation + /// + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// accum += grad * grad + /// var -= lr * grad * (1 / sqrt(accum)) + /// + public static Operation resource_sparse_apply_adagrad (Tensor var, Tensor accum, Tensor lr, Tensor grad, Tensor indices, bool? use_locking = null, bool? update_slots = null, string name = "ResourceSparseApplyAdagrad") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (update_slots.HasValue) + dict["update_slots"] = update_slots.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyAdagrad", name: name, keywords: dict); + return op; + } + + /// + /// Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// Training step number. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyAdagradDA'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + public static Operation resource_sparse_apply_adagrad_d_a (Tensor var, Tensor gradient_accumulator, Tensor gradient_squared_accumulator, Tensor grad, Tensor indices, Tensor lr, Tensor l1, Tensor l2, Tensor global_step, bool? use_locking = null, string name = "ResourceSparseApplyAdagradDA") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["gradient_accumulator"] = gradient_accumulator; + dict["gradient_squared_accumulator"] = gradient_squared_accumulator; + dict["grad"] = grad; + dict["indices"] = indices; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["global_step"] = global_step; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyAdagradDA", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the centered RMSProp algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay rate. Must be a scalar. + /// + /// + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var, ms and mom. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyCenteredRMSProp'. + /// + /// + /// If True, updating of the var, mg, ms, and mom tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// The centered RMSProp algorithm uses an estimate of the centered second moment + /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which + /// uses the (uncentered) second moment. This often helps with training, but is + /// slightly more expensive in terms of computation and memory. + /// + /// Note that in dense implementation of this algorithm, mg, ms, and mom will + /// update even if the grad is zero, but in this sparse implementation, mg, ms, + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// mean_grad = decay * mean_grad + (1-decay) * gradient + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + /// + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + /// var <- var - mom + /// + public static Operation resource_sparse_apply_centered_r_m_s_prop (Tensor var, Tensor mg, Tensor ms, Tensor mom, Tensor lr, Tensor rho, Tensor momentum, Tensor epsilon, Tensor grad, Tensor indices, bool? use_locking = null, string name = "ResourceSparseApplyCenteredRMSProp") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["mg"] = mg; + dict["ms"] = ms; + dict["mom"] = mom; + dict["lr"] = lr; + dict["rho"] = rho; + dict["momentum"] = momentum; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyCenteredRMSProp", name: name, keywords: dict); + return op; + } + + /// + /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyFtrl'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// That is for rows we have grad for, we update var, accum and linear as follows: + /// accum_new = accum + grad * grad + /// linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + public static Operation resource_sparse_apply_ftrl (Tensor var, Tensor accum, Tensor linear, Tensor grad, Tensor indices, Tensor lr, Tensor l1, Tensor l2, Tensor lr_power, bool? use_locking = null, string name = "ResourceSparseApplyFtrl") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["linear"] = linear; + dict["grad"] = grad; + dict["indices"] = indices; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["lr_power"] = lr_power; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyFtrl", name: name, keywords: dict); + return op; + } + + /// + /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 shrinkage regulariation. Must be a scalar. + /// + /// + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyFtrlV2'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// That is for rows we have grad for, we update var, accum and linear as follows: + /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var + /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + /// linear += grad_with_shrinkage + + /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + public static Operation resource_sparse_apply_ftrl_v2 (Tensor var, Tensor accum, Tensor linear, Tensor grad, Tensor indices, Tensor lr, Tensor l1, Tensor l2, Tensor l2_shrinkage, Tensor lr_power, bool? use_locking = null, string name = "ResourceSparseApplyFtrlV2") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["linear"] = linear; + dict["grad"] = grad; + dict["indices"] = indices; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["l2_shrinkage"] = l2_shrinkage; + dict["lr_power"] = lr_power; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyFtrlV2", name: name, keywords: dict); + return op; + } + + /// + /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// Momentum. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyMomentum'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// If True, the tensor passed to compute grad will be + /// var - lr * momentum * accum, so in the end, the var you get is actually + /// var - lr * momentum * accum. + /// + /// + /// Returns the description of the operation + /// + /// + /// Set use_nesterov = True if you want to use Nesterov momentum. + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// + /// accum = accum * momentum + grad + /// var -= lr * accum + /// + public static Operation resource_sparse_apply_momentum (Tensor var, Tensor accum, Tensor lr, Tensor grad, Tensor indices, Tensor momentum, bool? use_locking = null, bool? use_nesterov = null, string name = "ResourceSparseApplyMomentum") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["grad"] = grad; + dict["indices"] = indices; + dict["momentum"] = momentum; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (use_nesterov.HasValue) + dict["use_nesterov"] = use_nesterov.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyMomentum", name: name, keywords: dict); + return op; + } + + /// + /// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyProximalAdagrad'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// accum += grad * grad + /// prox_v = var + /// prox_v -= lr * grad * (1 / sqrt(accum)) + /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} + /// + public static Operation resource_sparse_apply_proximal_adagrad (Tensor var, Tensor accum, Tensor lr, Tensor l1, Tensor l2, Tensor grad, Tensor indices, bool? use_locking = null, string name = "ResourceSparseApplyProximalAdagrad") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyProximalAdagrad", name: name, keywords: dict); + return op; + } + + /// + /// Sparse update '*var' as FOBOS algorithm with fixed learning rate. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyProximalGradientDescent'. + /// + /// + /// If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// That is for rows we have grad for, we update var as follows: + /// prox_v = var - alpha * grad + /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} + /// + public static Operation resource_sparse_apply_proximal_gradient_descent (Tensor var, Tensor alpha, Tensor l1, Tensor l2, Tensor grad, Tensor indices, bool? use_locking = null, string name = "ResourceSparseApplyProximalGradientDescent") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["alpha"] = alpha; + dict["l1"] = l1; + dict["l2"] = l2; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyProximalGradientDescent", name: name, keywords: dict); + return op; + } + + /// + /// Update '*var' according to the RMSProp algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay rate. Must be a scalar. + /// + /// + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var, ms and mom. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyRMSProp'. + /// + /// + /// If True, updating of the var, ms, and mom tensors is protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Returns the description of the operation + /// + /// + /// Note that in dense implementation of this algorithm, ms and mom will + /// update even if the grad is zero, but in this sparse implementation, ms + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + /// + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + /// var <- var - mom + /// + public static Operation resource_sparse_apply_r_m_s_prop (Tensor var, Tensor ms, Tensor mom, Tensor lr, Tensor rho, Tensor momentum, Tensor epsilon, Tensor grad, Tensor indices, bool? use_locking = null, string name = "ResourceSparseApplyRMSProp") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["ms"] = ms; + dict["mom"] = mom; + dict["lr"] = lr; + dict["rho"] = rho; + dict["momentum"] = momentum; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ResourceSparseApplyRMSProp", name: name, keywords: dict); + return op; + } + + /// + /// Assign value to the sliced l-value reference of ref. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceStridedSliceAssign'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns the description of the operation + /// + /// + /// The values of value are assigned to the positions in the variable + /// ref that are selected by the slice parameters. The slice parameters + /// begin, end, strides, etc. work exactly as in StridedSlice. + /// + /// NOTE this op currently does not support broadcasting and so value's + /// shape must be exactly the shape produced by the slice of ref. + /// + public static Operation resource_strided_slice_assign (Tensor referecne, Tensor begin, Tensor end, Tensor strides, Tensor value, int? begin_mask = null, int? end_mask = null, int? ellipsis_mask = null, int? new_axis_mask = null, int? shrink_axis_mask = null, string name = "ResourceStridedSliceAssign") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["begin"] = begin; + dict["end"] = end; + dict["strides"] = strides; + dict["value"] = value; + if (begin_mask.HasValue) + dict["begin_mask"] = begin_mask.Value; + if (end_mask.HasValue) + dict["end_mask"] = end_mask.Value; + if (ellipsis_mask.HasValue) + dict["ellipsis_mask"] = ellipsis_mask.Value; + if (new_axis_mask.HasValue) + dict["new_axis_mask"] = new_axis_mask.Value; + if (shrink_axis_mask.HasValue) + dict["shrink_axis_mask"] = shrink_axis_mask.Value; + var op = _op_def_lib._apply_op_helper("ResourceStridedSliceAssign", name: name, keywords: dict); + return op; + } + + /// + /// Restores a tensor from checkpoint files. + /// + /// + /// Must have a single element. The pattern of the files from + /// which we read the tensor. + /// + /// + /// Must have a single element. The name of the tensor to be + /// restored. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Restore'. + /// + /// + /// Optional argument + /// The type of the tensor to be restored. + /// + /// + /// Index of file to open first if multiple files match + /// file_pattern. + /// + /// + /// The restored tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Reads a tensor stored in one or several files. If there are several files (for + /// instance because a tensor was saved as slices), file_pattern may contain + /// wildcard symbols (* and ?) in the filename portion only, not in the + /// directory portion. + /// + /// If a file_pattern matches several files, preferred_shard can be used to hint + /// in which file the requested tensor is likely to be found. This op will first + /// open the file at index preferred_shard in the list of matching files and try + /// to restore tensors from that file. Only if some tensors or tensor slices are + /// not found in that first file, then the Op opens all the files. Setting + /// preferred_shard to match the value passed as the shard input + /// of a matching Save Op may speed up Restore. This attribute only affects + /// performance, not correctness. The default value -1 means files are processed in + /// order. + /// + /// See also RestoreSlice. + /// + public static Tensor restore (Tensor file_pattern, Tensor tensor_name, TF_DataType dt, int? preferred_shard = null, string name = "Restore") + { + var dict = new Dictionary(); + dict["file_pattern"] = file_pattern; + dict["tensor_name"] = tensor_name; + dict["dt"] = dt; + if (preferred_shard.HasValue) + dict["preferred_shard"] = preferred_shard.Value; + var op = _op_def_lib._apply_op_helper("Restore", name: name, keywords: dict); + return op.output; + } + + /// + /// Restores a tensor from checkpoint files. + /// + /// + /// Must have a single element. The pattern of the files from + /// which we read the tensor. + /// + /// + /// Must have a single element. The name of the tensor to be + /// restored. + /// + /// + /// Scalar. The shapes and slice specifications to use when + /// restoring a tensors. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RestoreSlice'. + /// + /// + /// Optional argument + /// The type of the tensor to be restored. + /// + /// + /// Index of file to open first if multiple files match + /// file_pattern. See the documentation for Restore. + /// + /// + /// The restored tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is like Restore except that restored tensor can be listed as filling + /// only a slice of a larger tensor. shape_and_slice specifies the shape of the + /// larger tensor and the slice that the restored tensor covers. + /// + /// The shape_and_slice input has the same format as the + /// elements of the shapes_and_slices input of the SaveSlices op. + /// + public static Tensor restore_slice (Tensor file_pattern, Tensor tensor_name, Tensor shape_and_slice, TF_DataType dt, int? preferred_shard = null, string name = "RestoreSlice") + { + var dict = new Dictionary(); + dict["file_pattern"] = file_pattern; + dict["tensor_name"] = tensor_name; + dict["shape_and_slice"] = shape_and_slice; + dict["dt"] = dt; + if (preferred_shard.HasValue) + dict["preferred_shard"] = preferred_shard.Value; + var op = _op_def_lib._apply_op_helper("RestoreSlice", name: name, keywords: dict); + return op.output; + } + + /// + /// Restores tensors from a V2 checkpoint. + /// + /// + /// Must have a single element. The prefix of a V2 checkpoint. + /// + /// + /// shape {N}. The names of the tensors to be restored. + /// + /// + /// shape {N}. The slice specs of the tensors to be restored. + /// Empty strings indicate that they are non-partitioned tensors. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RestoreV2'. + /// + /// + /// Optional argument + /// shape {N}. The list of expected dtype for the tensors. Must match + /// those stored in the checkpoint. + /// + /// + /// shape {N}. The restored tensors, whose shapes are read from the + /// checkpoint directly. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For backward compatibility with the V1 format, this Op currently allows + /// restoring from a V1 checkpoint as well: + /// - This Op first attempts to find the V2 index file pointed to by "prefix", and + /// if found proceed to read it as a V2 checkpoint; + /// - Otherwise the V1 read path is invoked. + /// Relying on this behavior is not recommended, as the ability to fall back to read + /// V1 might be deprecated and eventually removed. + /// + /// By default, restores the named tensors in full. If the caller wishes to restore + /// specific slices of stored tensors, "shape_and_slices" should be non-empty + /// strings and correspondingly well-formed. + /// + /// Callers must ensure all the named tensors are indeed stored in the checkpoint. + /// + public static Tensor[] restore_v2 (Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string name = "RestoreV2") + { + var dict = new Dictionary(); + dict["prefix"] = prefix; + dict["tensor_names"] = tensor_names; + dict["shape_and_slices"] = shape_and_slices; + dict["dtypes"] = dtypes; + var op = _op_def_lib._apply_op_helper("RestoreV2", name: name, keywords: dict); + int _idx = 0; + var tensors = Enumerable.Range(0, op.OutputListLength("tensors")).Select(_ => op.outputs[_idx++]).ToArray(); + return (tensors); + } + + /// + /// Reverses specific dimensions of a tensor. + /// + /// + /// Up to 8-D. + /// + /// + /// 1-D. The dimensions to reverse. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Reverse'. + /// + /// + /// The same shape as tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor, and a bool tensor dims representing the dimensions + /// of tensor, this operation reverses each dimension i of tensor where + /// dims[i] is True. + /// + /// tensor can have up to 8 dimensions. The number of dimensions + /// of tensor must equal the number of elements in dims. In other words: + /// + /// rank(tensor) = size(dims) + /// + /// For example: + /// + /// + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [False, False, False, True] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is [False, True, False, False] + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is [False, False, True, False] + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// + /// + public static Tensor reverse (Tensor tensor, Tensor dims, string name = "Reverse") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + dict["dims"] = dims; + var op = _op_def_lib._apply_op_helper("Reverse", name: name, keywords: dict); + return op.output; + } + + /// + /// Reverses variable length slices. + /// + /// + /// The input to reverse. + /// + /// + /// 1-D with length input.dims(batch_dim) and + /// max(seq_lengths) <= input.dims(seq_dim) + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReverseSequence'. + /// + /// + /// Optional argument + /// The dimension which is partially reversed. + /// + /// + /// The dimension along which reversal is performed. + /// + /// + /// The partially reversed input. It has the same shape as input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op first slices input along the dimension batch_dim, and for each + /// slice i, reverses the first seq_lengths[i] elements along + /// the dimension seq_dim. + /// + /// The elements of seq_lengths must obey seq_lengths[i] <= input.dims[seq_dim], + /// and seq_lengths must be a vector of length input.dims[batch_dim]. + /// + /// The output slice i along dimension batch_dim is then given by input + /// slice i, with the first seq_lengths[i] slices along dimension + /// seq_dim reversed. + /// + /// For example: + /// + /// + /// # Given this: + /// batch_dim = 0 + /// seq_dim = 1 + /// input.dims = (4, 8, ...) + /// seq_lengths = [7, 2, 3, 5] + /// + /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: + /// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + /// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + /// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + /// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] + /// + /// # while entries past seq_lens are copied through: + /// output[0, 7:, :, ...] = input[0, 7:, :, ...] + /// output[1, 2:, :, ...] = input[1, 2:, :, ...] + /// output[2, 3:, :, ...] = input[2, 3:, :, ...] + /// output[3, 2:, :, ...] = input[3, 2:, :, ...] + /// + /// + /// In contrast, if: + /// + /// + /// # Given this: + /// batch_dim = 2 + /// seq_dim = 0 + /// input.dims = (8, ?, 4, ...) + /// seq_lengths = [7, 2, 3, 5] + /// + /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: + /// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + /// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + /// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + /// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] + /// + /// # while entries past seq_lens are copied through: + /// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + /// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + /// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + /// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + /// + /// + public static Tensor reverse_sequence (Tensor input, Tensor seq_lengths, int seq_dim, int? batch_dim = null, string name = "ReverseSequence") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["seq_lengths"] = seq_lengths; + dict["seq_dim"] = seq_dim; + if (batch_dim.HasValue) + dict["batch_dim"] = batch_dim.Value; + var op = _op_def_lib._apply_op_helper("ReverseSequence", name: name, keywords: dict); + return op.output; + } + + /// + /// Reverses specific dimensions of a tensor. + /// + /// + /// Up to 8-D. + /// + /// + /// 1-D. The indices of the dimensions to reverse. Must be in the range + /// [-rank(tensor), rank(tensor)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReverseV2'. + /// + /// + /// The same shape as tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// NOTE tf.reverse has now changed behavior in preparation for 1.0. + /// tf.reverse_v2 is currently an alias that will be deprecated before TF 1.0. + /// + /// Given a tensor, and a int32 tensor axis representing the set of + /// dimensions of tensor to reverse. This operation reverses each dimension + /// i for which there exists j s.t. axis[j] == i. + /// + /// tensor can have up to 8 dimensions. The number of dimensions specified + /// in axis may be 0 or more entries. If an index is specified more than + /// once, a InvalidArgument error is raised. + /// + /// For example: + /// + /// + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [3] or 'dims' is [-1] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is '[1]' (or 'dims' is '[-3]') + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is '[2]' (or 'dims' is '[-2]') + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// + /// + public static Tensor reverse_v2 (Tensor tensor, Tensor axis, string name = "ReverseV2") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + dict["axis"] = axis; + var op = _op_def_lib._apply_op_helper("ReverseV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Elementwise computes the bitwise right-shift of x and y. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RightShift'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Performs a logical shift for unsigned integer types, and an arithmetic shift + /// for signed integer types. + /// + /// If y is negative, or greater than or equal to than the width of x in bits + /// the result is implementation defined. + /// + public static Tensor right_shift (Tensor x, Tensor y, string name = "RightShift") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("RightShift", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns element-wise integer closest to x. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Rint'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// If the result is midway between two representable values, + /// the even representable is chosen. + /// For example: + /// + /// + /// rint(-1.5) ==> -2.0 + /// rint(0.5000001) ==> 1.0 + /// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + /// + /// + public static Tensor rint (Tensor x, string name = "Rint") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Rint", name: name, keywords: dict); + return op.output; + } + + /// + /// Rolls the elements of a tensor along an axis. + /// + /// + /// + /// + /// Dimension must be 0-D or 1-D. shift[i] specifies the number of places by which + /// elements are shifted positively (towards larger indices) along the dimension + /// specified by axis[i]. Negative shifts will roll the elements in the opposite + /// direction. + /// + /// + /// Dimension must be 0-D or 1-D. axis[i] specifies the dimension that the shift + /// shift[i] should occur. If the same axis is referenced more than once, the + /// total shift for that axis will be the sum of all the shifts that belong to that + /// axis. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Roll'. + /// + /// + /// Has the same shape and size as the input. The elements are shifted + /// positively (towards larger indices) by the offsets of shift along the + /// dimensions of axis. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The elements are shifted positively (towards larger indices) by the offset of + /// shift along the dimension of axis. Negative shift values will shift + /// elements in the opposite direction. Elements that roll passed the last position + /// will wrap around to the first and vice versa. Multiple shifts along multiple + /// axes may be specified. + /// + /// For example: + /// + /// + /// # 't' is [0, 1, 2, 3, 4] + /// roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] + /// + /// # shifting along multiple dimensions + /// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + /// roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] + /// + /// # shifting along the same axis multiple times + /// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + /// roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] + /// + /// + public static Tensor roll (Tensor input, Tensor shift, Tensor axis, string name = "Roll") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["shift"] = shift; + dict["axis"] = axis; + var op = _op_def_lib._apply_op_helper("Roll", name: name, keywords: dict); + return op.output; + } + + /// + /// Rounds the values of a tensor to the nearest integer, element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Round'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Rounds half to even. Also known as bankers rounding. If you want to round + /// according to the current system rounding mode use std::cint. + /// + public static Tensor round (Tensor x, string name = "Round") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Round", name: name, keywords: dict); + return op.output; + } + + /// + /// Perform batches of RPC requests. + /// + /// + /// 0-D or 1-D. The address (i.e. host_name:port) of the RPC server. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with method and request. + /// + /// + /// 0-D or 1-D. The method address on the RPC server. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with address and request. + /// + /// + /// 0-D or 1-D. Serialized proto strings: the rpc request argument. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with address and method. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Rpc'. + /// + /// + /// RPC protocol to use. Empty string means use the default protocol. + /// Options include 'grpc'. + /// + /// + /// boolean. If true (default), then failures to connect + /// (i.e., the server does not immediately respond) cause an RPC failure. + /// + /// + /// int. If 0 (default), then the kernel will run the RPC + /// request and only time out if the RPC deadline passes or the session times out. + /// If this value is greater than 0, then the op will raise an exception if + /// the RPC takes longer than timeout_in_ms. + /// + /// + /// Same shape as request. Serialized proto strings: the rpc responses. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op asynchronously performs either a single RPC request, or a batch + /// of requests. RPC requests are defined by three main parameters: + /// + /// - address (the host+port or BNS address of the request) + /// - method (the RPC method name for the request) + /// - request (the serialized proto string, or vector of strings, + /// of the RPC request argument). + /// + /// For example, if you have an RPC service running on port localhost:2345, + /// and its interface is configured with the following proto declaration: + /// + /// + /// service MyService { + /// rpc MyMethod(MyRequestProto) returns (MyResponseProto) { + /// } + /// }; + /// + /// + /// then call this op with arguments: + /// + /// + /// address = "localhost:2345" + /// method = "MyService/MyMethod" + /// + /// + /// The request tensor is a string tensor representing serialized MyRequestProto + /// strings; and the output string tensor response will have the same shape + /// and contain (upon successful completion) corresponding serialized + /// MyResponseProto strings. + /// + /// For example, to send a single, empty, MyRequestProto, call + /// this op with request = "". To send 5 **parallel** empty requests, + /// call this op with request = ["", "", "", "", ""]. + /// + /// More generally, one can create a batch of MyRequestProto serialized protos + /// from regular batched tensors using the encode_proto op, and convert + /// the response MyResponseProto serialized protos to batched tensors + /// using the decode_proto op. + /// + /// **NOTE** Working with serialized proto strings is faster than instantiating + /// actual proto objects in memory, so no performance degradation is expected + /// compared to writing custom kernels for this workflow. + /// + /// If the connection fails or the remote worker returns an error + /// status, the op reraises this exception locally. + /// + /// See the TryRpc op if you prefer to handle RPC failures manually in the graph. + /// + public static Tensor rpc (Tensor address, Tensor method, Tensor request, string protocol = null, bool? fail_fast = null, int? timeout_in_ms = null, string name = "Rpc") + { + var dict = new Dictionary(); + dict["address"] = address; + dict["method"] = method; + dict["request"] = request; + if (protocol != null) + dict["protocol"] = protocol; + if (fail_fast.HasValue) + dict["fail_fast"] = fail_fast.Value; + if (timeout_in_ms.HasValue) + dict["timeout_in_ms"] = timeout_in_ms.Value; + var op = _op_def_lib._apply_op_helper("Rpc", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes reciprocal of square root of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Rsqrt'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = 1 / \sqrt{x}\\). + /// + public static Tensor rsqrt (Tensor x, string name = "Rsqrt") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Rsqrt", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient for the rsqrt of x wrt its input. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'RsqrtGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, grad = dy * -0.5 * y^3, where y = rsqrt(x), and dy + /// is the corresponding input gradient. + /// + public static Tensor rsqrt_grad (Tensor y, Tensor dy, string name = "RsqrtGrad") + { + var dict = new Dictionary(); + dict["y"] = y; + dict["dy"] = dy; + var op = _op_def_lib._apply_op_helper("RsqrtGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Generate a single randomly distorted bounding box for an image. + /// + /// + /// 1-D, containing [height, width, channels]. + /// + /// + /// 3-D with shape [batch, N, 4] describing the N bounding boxes + /// associated with the image. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SampleDistortedBoundingBox'. + /// + /// + /// If either seed or seed2 are set to non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a random + /// seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// The cropped area of the image must contain at least this + /// fraction of any bounding box supplied. The value of this parameter should be + /// non-negative. In the case of 0, the cropped area does not need to overlap + /// any of the bounding boxes supplied. + /// + /// + /// The cropped area of the image must have an aspect ratio = + /// width / height within this range. + /// + /// + /// The cropped area of the image must contain a fraction of the + /// supplied image within this range. + /// + /// + /// Number of attempts at generating a cropped region of the image + /// of the specified constraints. After max_attempts failures, return the entire + /// image. + /// + /// + /// Controls behavior if no bounding boxes supplied. + /// If true, assume an implicit bounding box covering the whole input. If false, + /// raise an error. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// begin : 1-D, containing [offset_height, offset_width, 0]. Provide as input to + /// tf.slice. + /// size : 1-D, containing [target_height, target_width, -1]. Provide as input to + /// tf.slice. + /// bboxes : 3-D with shape [1, 1, 4] containing the distorted bounding box. + /// Provide as input to tf.image.draw_bounding_boxes. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Bounding box annotations are often supplied in addition to ground-truth labels + /// in image recognition or object localization tasks. A common technique for + /// training such a system is to randomly distort an image while preserving + /// its content, i.e. *data augmentation*. This Op outputs a randomly distorted + /// localization of an object, i.e. bounding box, given an image_size, + /// bounding_boxes and a series of constraints. + /// + /// The output of this Op is a single bounding box that may be used to crop the + /// original image. The output is returned as 3 tensors: begin, size and + /// bboxes. The first 2 tensors can be fed directly into tf.slice to crop the + /// image. The latter may be supplied to tf.image.draw_bounding_boxes to visualize + /// what the bounding box looks like. + /// + /// Bounding boxes are supplied and returned as [y_min, x_min, y_max, x_max]. The + /// bounding box coordinates are floats in [0.0, 1.0] relative to the width and + /// height of the underlying image. + /// + /// For example, + /// + /// + /// # Generate a single distorted bounding box. + /// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + /// tf.shape(image), + /// bounding_boxes=bounding_boxes) + /// + /// # Draw the bounding box in an image summary. + /// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + /// bbox_for_draw) + /// tf.summary.image('images_with_box', image_with_box) + /// + /// # Employ the bounding box to distort the image. + /// distorted_image = tf.slice(image, begin, size) + /// + /// + /// Note that if no bounding box information is available, setting + /// use_image_if_no_bounding_boxes = true will assume there is a single implicit + /// bounding box covering the whole image. If use_image_if_no_bounding_boxes is + /// false and no bounding boxes are supplied, an error is raised. + /// + public static (Tensor begin, Tensor size, Tensor bboxes) sample_distorted_bounding_box (Tensor image_size, Tensor bounding_boxes, int? seed = null, int? seed2 = null, float? min_object_covered = null, float[] aspect_ratio_range = null, float[] area_range = null, int? max_attempts = null, bool? use_image_if_no_bounding_boxes = null, string name = "SampleDistortedBoundingBox") + { + var dict = new Dictionary(); + dict["image_size"] = image_size; + dict["bounding_boxes"] = bounding_boxes; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + if (min_object_covered.HasValue) + dict["min_object_covered"] = min_object_covered.Value; + if (aspect_ratio_range != null) + dict["aspect_ratio_range"] = aspect_ratio_range; + if (area_range != null) + dict["area_range"] = area_range; + if (max_attempts.HasValue) + dict["max_attempts"] = max_attempts.Value; + if (use_image_if_no_bounding_boxes.HasValue) + dict["use_image_if_no_bounding_boxes"] = use_image_if_no_bounding_boxes.Value; + var op = _op_def_lib._apply_op_helper("SampleDistortedBoundingBox", name: name, keywords: dict); + int _idx = 0; + var begin = op.outputs[_idx++]; + var size = op.outputs[_idx++]; + var bboxes = op.outputs[_idx++]; + return (begin, size, bboxes); + } + + /// + /// Generate a single randomly distorted bounding box for an image. + /// + /// + /// 1-D, containing [height, width, channels]. + /// + /// + /// 3-D with shape [batch, N, 4] describing the N bounding boxes + /// associated with the image. + /// + /// + /// The cropped area of the image must contain at least this + /// fraction of any bounding box supplied. The value of this parameter should be + /// non-negative. In the case of 0, the cropped area does not need to overlap + /// any of the bounding boxes supplied. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SampleDistortedBoundingBoxV2'. + /// + /// + /// If either seed or seed2 are set to non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a random + /// seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// The cropped area of the image must have an aspect ratio = + /// width / height within this range. + /// + /// + /// The cropped area of the image must contain a fraction of the + /// supplied image within this range. + /// + /// + /// Number of attempts at generating a cropped region of the image + /// of the specified constraints. After max_attempts failures, return the entire + /// image. + /// + /// + /// Controls behavior if no bounding boxes supplied. + /// If true, assume an implicit bounding box covering the whole input. If false, + /// raise an error. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// begin : 1-D, containing [offset_height, offset_width, 0]. Provide as input to + /// tf.slice. + /// size : 1-D, containing [target_height, target_width, -1]. Provide as input to + /// tf.slice. + /// bboxes : 3-D with shape [1, 1, 4] containing the distorted bounding box. + /// Provide as input to tf.image.draw_bounding_boxes. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Bounding box annotations are often supplied in addition to ground-truth labels + /// in image recognition or object localization tasks. A common technique for + /// training such a system is to randomly distort an image while preserving + /// its content, i.e. *data augmentation*. This Op outputs a randomly distorted + /// localization of an object, i.e. bounding box, given an image_size, + /// bounding_boxes and a series of constraints. + /// + /// The output of this Op is a single bounding box that may be used to crop the + /// original image. The output is returned as 3 tensors: begin, size and + /// bboxes. The first 2 tensors can be fed directly into tf.slice to crop the + /// image. The latter may be supplied to tf.image.draw_bounding_boxes to visualize + /// what the bounding box looks like. + /// + /// Bounding boxes are supplied and returned as [y_min, x_min, y_max, x_max]. The + /// bounding box coordinates are floats in [0.0, 1.0] relative to the width and + /// height of the underlying image. + /// + /// For example, + /// + /// + /// # Generate a single distorted bounding box. + /// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + /// tf.shape(image), + /// bounding_boxes=bounding_boxes) + /// + /// # Draw the bounding box in an image summary. + /// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + /// bbox_for_draw) + /// tf.summary.image('images_with_box', image_with_box) + /// + /// # Employ the bounding box to distort the image. + /// distorted_image = tf.slice(image, begin, size) + /// + /// + /// Note that if no bounding box information is available, setting + /// use_image_if_no_bounding_boxes = true will assume there is a single implicit + /// bounding box covering the whole image. If use_image_if_no_bounding_boxes is + /// false and no bounding boxes are supplied, an error is raised. + /// + public static (Tensor begin, Tensor size, Tensor bboxes) sample_distorted_bounding_box_v2 (Tensor image_size, Tensor bounding_boxes, Tensor min_object_covered, int? seed = null, int? seed2 = null, float[] aspect_ratio_range = null, float[] area_range = null, int? max_attempts = null, bool? use_image_if_no_bounding_boxes = null, string name = "SampleDistortedBoundingBoxV2") + { + var dict = new Dictionary(); + dict["image_size"] = image_size; + dict["bounding_boxes"] = bounding_boxes; + dict["min_object_covered"] = min_object_covered; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + if (aspect_ratio_range != null) + dict["aspect_ratio_range"] = aspect_ratio_range; + if (area_range != null) + dict["area_range"] = area_range; + if (max_attempts.HasValue) + dict["max_attempts"] = max_attempts.Value; + if (use_image_if_no_bounding_boxes.HasValue) + dict["use_image_if_no_bounding_boxes"] = use_image_if_no_bounding_boxes.Value; + var op = _op_def_lib._apply_op_helper("SampleDistortedBoundingBoxV2", name: name, keywords: dict); + int _idx = 0; + var begin = op.outputs[_idx++]; + var size = op.outputs[_idx++]; + var bboxes = op.outputs[_idx++]; + return (begin, size, bboxes); + } + + /// + /// Saves the input tensors to disk. + /// + /// + /// Must have a single element. The name of the file to which we write + /// the tensor. + /// + /// + /// Shape [N]. The names of the tensors to be saved. + /// + /// + /// N tensors to save. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Save'. + /// + /// + /// Returns the description of the operation + /// + /// + /// The size of tensor_names must match the number of tensors in data. data[i] + /// is written to filename with name tensor_names[i]. + /// + /// See also SaveSlices. + /// + public static Operation save (Tensor filename, Tensor tensor_names, Tensor[] data, string name = "Save") + { + var dict = new Dictionary(); + dict["filename"] = filename; + dict["tensor_names"] = tensor_names; + dict["data"] = data; + var op = _op_def_lib._apply_op_helper("Save", name: name, keywords: dict); + return op; + } + + /// + /// Saves input tensors slices to disk. + /// + /// + /// Must have a single element. The name of the file to which we write the + /// tensor. + /// + /// + /// Shape [N]. The names of the tensors to be saved. + /// + /// + /// Shape [N]. The shapes and slice specifications to use when + /// saving the tensors. + /// + /// + /// N tensors to save. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SaveSlices'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This is like Save except that tensors can be listed in the saved file as being + /// a slice of a larger tensor. shapes_and_slices specifies the shape of the + /// larger tensor and the slice that this tensor covers. shapes_and_slices must + /// have as many elements as tensor_names. + /// + /// Elements of the shapes_and_slices input must either be: + /// + /// * The empty string, in which case the corresponding tensor is + /// saved normally. + /// * A string of the form dim0 dim1 ... dimN-1 slice-spec where the + /// dimI are the dimensions of the larger tensor and slice-spec + /// specifies what part is covered by the tensor to save. + /// + /// slice-spec itself is a :-separated list: slice0:slice1:...:sliceN-1 + /// where each sliceI is either: + /// + /// * The string - meaning that the slice covers all indices of this dimension + /// * start,length where start and length are integers. In that + /// case the slice covers length indices starting at start. + /// + /// See also Save. + /// + public static Operation save_slices (Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor[] data, string name = "SaveSlices") + { + var dict = new Dictionary(); + dict["filename"] = filename; + dict["tensor_names"] = tensor_names; + dict["shapes_and_slices"] = shapes_and_slices; + dict["data"] = data; + var op = _op_def_lib._apply_op_helper("SaveSlices", name: name, keywords: dict); + return op; + } + + /// + /// Saves tensors in V2 checkpoint format. + /// + /// + /// Must have a single element. The prefix of the V2 checkpoint to which we + /// write the tensors. + /// + /// + /// shape {N}. The names of the tensors to be saved. + /// + /// + /// shape {N}. The slice specs of the tensors to be saved. + /// Empty strings indicate that they are non-partitioned tensors. + /// + /// + /// N tensors to save. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SaveV2'. + /// + /// + /// Returns the description of the operation + /// + /// + /// By default, saves the named tensors in full. If the caller wishes to save + /// specific slices of full tensors, "shape_and_slices" should be non-empty strings + /// and correspondingly well-formed. + /// + public static Operation save_v2 (Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor[] tensors, string name = "SaveV2") + { + var dict = new Dictionary(); + dict["prefix"] = prefix; + dict["tensor_names"] = tensor_names; + dict["shape_and_slices"] = shape_and_slices; + dict["tensors"] = tensors; + var op = _op_def_lib._apply_op_helper("SaveV2", name: name, keywords: dict); + return op; + } + + /// + /// Outputs a Summary protocol buffer with scalar values. + /// + /// + /// Tags for the summary. + /// + /// + /// Same shape as tags. Values for the summary. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScalarSummary'. + /// + /// + /// Scalar. Serialized Summary protocol buffer. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The input tags and values must have the same shape. The generated summary + /// has a summary value for each tag-value pair in tags and values. + /// + public static Tensor scalar_summary (Tensor tags, Tensor values, string name = "ScalarSummary") + { + var dict = new Dictionary(); + dict["tags"] = tags; + dict["values"] = values; + var op = _op_def_lib._apply_op_helper("ScalarSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// Adds sparse updates to a variable reference. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterAdd'. + /// + /// + /// If True, the addition will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] += updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] += updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + /// + /// This operation outputs ref after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions add. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt> + /// </div> + /// + public static Tensor scatter_add (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterAdd") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterAdd", name: name, keywords: dict); + return op.output; + } + + /// + /// Divides a variable reference by sparse updates. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of values that ref is divided by. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterDiv'. + /// + /// + /// If True, the operation will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation computes + /// + /// + /// # Scalar indices + /// ref[indices, ...] /= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] /= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + /// + /// + /// This operation outputs ref after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions divide. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + public static Tensor scatter_div (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterDiv") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterDiv", name: name, keywords: dict); + return op.output; + } + + /// + /// Reduces sparse updates into a variable reference using the max operation. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to reduce into ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterMax'. + /// + /// + /// If True, the update will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = max(ref[indices, ...], updates[...]) + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + /// + /// This operation outputs ref after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions combine. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt> + /// </div> + /// + public static Tensor scatter_max (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterMax") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterMax", name: name, keywords: dict); + return op.output; + } + + /// + /// Reduces sparse updates into a variable reference using the min operation. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to reduce into ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterMin'. + /// + /// + /// If True, the update will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = min(ref[indices, ...], updates[...]) + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + /// + /// This operation outputs ref after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions combine. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt> + /// </div> + /// + public static Tensor scatter_min (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterMin") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterMin", name: name, keywords: dict); + return op.output; + } + + /// + /// Multiplies sparse updates into a variable reference. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to multiply to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterMul'. + /// + /// + /// If True, the operation will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation computes + /// + /// + /// # Scalar indices + /// ref[indices, ...] *= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] *= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + /// + /// + /// This operation outputs ref after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their contributions multiply. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + public static Tensor scatter_mul (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterMul") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterMul", name: name, keywords: dict); + return op.output; + } + + /// + /// Scatter updates into a new tensor according to indices. + /// + /// + /// Index tensor. + /// + /// + /// Updates to scatter into output. + /// + /// + /// 1-D. The shape of the resulting tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNd'. + /// + /// + /// A new tensor with the given shape and updates applied according + /// to the indices. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Creates a new tensor by applying sparse updates to individual values or + /// slices within a tensor (initially zero for numeric, empty for string) of + /// the given shape according to indices. This operator is the inverse of the + /// tf.gather_nd operator which extracts values or slices from a given tensor. + /// + /// If indices contains duplicates, then their updates are accumulated (summed). + /// + /// **WARNING**: The order in which updates are applied is nondeterministic, so the + /// output will be nondeterministic if indices contains duplicates -- because + /// of some numerical approximation issues, numbers summed in different order + /// may yield different results. + /// + /// indices is an integer tensor containing indices into a new tensor of shape + /// shape. The last dimension of indices can be at most the rank of shape: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of indices corresponds to indices into elements + /// (if indices.shape[-1] = shape.rank) or slices + /// (if indices.shape[-1] < shape.rank) along dimension indices.shape[-1] of + /// shape. updates is a tensor with shape + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of scatter is to insert individual elements in a tensor by + /// index. For example, say we want to insert 4 scattered elements in a rank-1 + /// tensor with 8 elements. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt> + /// </div> + /// + /// In Python, this scatter operation would look like this: + /// + /// + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// shape = tf.constant([8]) + /// scatter = tf.scatter_nd(indices, updates, shape) + /// with tf.Session() as sess: + /// print(sess.run(scatter)) + /// + /// + /// The resulting tensor would look like this: + /// + /// [0, 11, 0, 10, 9, 0, 0, 12] + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt> + /// </div> + /// + /// In Python, this scatter operation would look like this: + /// + /// + /// indices = tf.constant([[0], [2]]) + /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// shape = tf.constant([4, 4, 4]) + /// scatter = tf.scatter_nd(indices, updates, shape) + /// with tf.Session() as sess: + /// print(sess.run(scatter)) + /// + /// + /// The resulting tensor would look like this: + /// + /// [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + public static Tensor scatter_nd (Tensor indices, Tensor updates, Tensor shape, string name = "ScatterNd") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["updates"] = updates; + dict["shape"] = shape; + var op = _op_def_lib._apply_op_helper("ScatterNd", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies sparse addition between updates and individual values or slices + /// + /// + /// A mutable Tensor. Should be from a Variable node. + /// + /// + /// A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into ref. + /// + /// + /// A Tensor. Must have the same type as ref. A tensor of updated values + /// to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNdAdd'. + /// + /// + /// An optional bool. Defaults to True. If True, the assignment will + /// be protected by a lock; otherwise the behavior is undefined, + /// but may exhibit less contention. + /// + /// + /// Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// within a given variable according to indices. + /// + /// ref is a Tensor with rank P and indices is a Tensor of rank Q. + /// + /// indices must be integer tensor, containing indices into ref. + /// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where 0 < K <= P. + /// + /// The innermost dimension of indices (with length K) corresponds to + /// indices into elements (if K = P) or slices (if K < P) along the Kth + /// dimension of ref. + /// + /// updates is Tensor of rank Q-1+P-K with shape: + /// + /// $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ + /// + /// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + /// elements. In Python, that addition would look like this: + /// + /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// add = tf.scatter_nd_add(ref, indices, updates) + /// with tf.Session() as sess: + /// print sess.run(add) + /// + /// The resulting update to ref would look like this: + /// + /// [1, 13, 3, 14, 14, 6, 7, 20] + /// + /// See tf.scatter_nd for more details about how to make updates to + /// slices. + /// + public static Tensor scatter_nd_add (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterNdAdd") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterNdAdd", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies sparse addition to input using individual values or slices + /// + /// + /// A Tensor. + /// + /// + /// A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into input. + /// + /// + /// A Tensor. Must have the same type as ref. A tensor of updated values + /// to add to input. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNdNonAliasingAdd'. + /// + /// + /// A Tensor with the same shape as input, containing values of input + /// updated with updates. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// from updates according to indices indices. The updates are non-aliasing: + /// input is only modified in-place if no other operations will use it. + /// Otherwise, a copy of input is made. This operation has a gradient with + /// respect to both input and updates. + /// + /// input is a Tensor with rank P and indices is a Tensor of rank Q. + /// + /// indices must be integer tensor, containing indices into input. + /// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where 0 < K <= P. + /// + /// The innermost dimension of indices (with length K) corresponds to + /// indices into elements (if K = P) or (P-K)-dimensional slices + /// (if K < P) along the Kth dimension of input. + /// + /// updates is Tensor of rank Q-1+P-K with shape: + /// + /// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + /// + /// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + /// elements. In Python, that addition would look like this: + /// + /// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + /// with tf.Session() as sess: + /// print(sess.run(output)) + /// + /// The resulting value output would look like this: + /// + /// [1, 13, 3, 14, 14, 6, 7, 20] + /// + /// See tf.scatter_nd for more details about how to make updates to slices. + /// + public static Tensor scatter_nd_non_aliasing_add (Tensor input, Tensor indices, Tensor updates, string name = "ScatterNdNonAliasingAdd") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["indices"] = indices; + dict["updates"] = updates; + var op = _op_def_lib._apply_op_helper("ScatterNdNonAliasingAdd", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies sparse subtraction between updates and individual values or slices + /// + /// + /// A mutable Tensor. Should be from a Variable node. + /// + /// + /// A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into ref. + /// + /// + /// A Tensor. Must have the same type as ref. A tensor of updated values + /// to subtract from ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNdSub'. + /// + /// + /// An optional bool. Defaults to True. If True, the assignment will + /// be protected by a lock; otherwise the behavior is undefined, + /// but may exhibit less contention. + /// + /// + /// Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// within a given variable according to indices. + /// + /// ref is a Tensor with rank P and indices is a Tensor of rank Q. + /// + /// indices must be integer tensor, containing indices into ref. + /// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where 0 < K <= P. + /// + /// The innermost dimension of indices (with length K) corresponds to + /// indices into elements (if K = P) or slices (if K < P) along the Kth + /// dimension of ref. + /// + /// updates is Tensor of rank Q-1+P-K with shape: + /// + /// $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ + /// + /// For example, say we want to subtract 4 scattered elements from a rank-1 tensor + /// with 8 elements. In Python, that subtraction would look like this: + /// + /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// sub = tf.scatter_nd_sub(ref, indices, updates) + /// with tf.Session() as sess: + /// print sess.run(sub) + /// + /// The resulting update to ref would look like this: + /// + /// [1, -9, 3, -6, -4, 6, 7, -4] + /// + /// See tf.scatter_nd for more details about how to make updates to + /// slices. + /// + public static Tensor scatter_nd_sub (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterNdSub") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterNdSub", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies sparse updates to individual values or slices within a given + /// + /// + /// A mutable Tensor. Should be from a Variable node. + /// + /// + /// A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into ref. + /// + /// + /// A Tensor. Must have the same type as ref. A tensor of updated + /// values to add to ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNdUpdate'. + /// + /// + /// An optional bool. Defaults to True. If True, the assignment will + /// be protected by a lock; otherwise the behavior is undefined, + /// but may exhibit less contention. + /// + /// + /// Same as ref. Returned as a convenience for operations that want to + /// use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// variable according to indices. + /// + /// ref is a Tensor with rank P and indices is a Tensor of rank Q. + /// + /// indices must be integer tensor, containing indices into ref. + /// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where 0 < K <= P. + /// + /// The innermost dimension of indices (with length K) corresponds to + /// indices into elements (if K = P) or slices (if K < P) along the Kth + /// dimension of ref. + /// + /// updates is Tensor of rank Q-1+P-K with shape: + /// + /// $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ + /// + /// For example, say we want to update 4 scattered elements to a rank-1 tensor to + /// 8 elements. In Python, that update would look like this: + /// + /// + /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1] ,[7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// update = tf.scatter_nd_update(ref, indices, updates) + /// with tf.Session() as sess: + /// print sess.run(update) + /// + /// + /// The resulting update to ref would look like this: + /// + /// [1, 11, 3, 10, 9, 6, 7, 12] + /// + /// See tf.scatter_nd for more details about how to make updates to + /// slices. + /// + /// See also tf.scatter_update and tf.batch_scatter_update. + /// + public static Tensor scatter_nd_update (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterNdUpdate") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterNdUpdate", name: name, keywords: dict); + return op.output; + } + + /// + /// Subtracts sparse updates to a variable reference. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to subtract from ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterSub'. + /// + /// + /// If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// + /// # Scalar indices + /// ref[indices, ...] -= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] -= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + /// + /// + /// This operation outputs ref after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + /// Duplicate entries are handled correctly: if multiple indices reference + /// the same location, their (negated) contributions add. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt> + /// </div> + /// + public static Tensor scatter_sub (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterSub") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterSub", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies sparse updates to a variable reference. + /// + /// + /// Should be from a Variable node. + /// + /// + /// A tensor of indices into the first dimension of ref. + /// + /// + /// A tensor of updated values to store in ref. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterUpdate'. + /// + /// + /// If True, the assignment will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// = Same as ref. Returned as a convenience for operations that want + /// to use the updated values after the update is done. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation computes + /// + /// + /// # Scalar indices + /// ref[indices, ...] = updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + /// + /// + /// This operation outputs ref after the update is done. + /// This makes it easier to chain operations that need to use the reset value. + /// + /// If values in ref is to be updated more than once, because there are + /// duplicate entries in indices, the order at which the updates happen + /// for each value is undefined. + /// + /// Requires updates.shape = indices.shape + ref.shape[1:] or updates.shape = []. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt> + /// </div> + /// + /// See also tf.batch_scatter_update and tf.scatter_nd_update. + /// + public static Tensor scatter_update (Tensor referecne, Tensor indices, Tensor updates, bool? use_locking = null, string name = "ScatterUpdate") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["indices"] = indices; + dict["updates"] = updates; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("ScatterUpdate", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes fingerprints of the input strings. + /// + /// + /// vector of strings to compute fingerprints on. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SdcaFprint'. + /// + /// + /// a (N,2) shaped matrix where N is the number of elements in the input + /// vector. Each row contains the low and high parts of the fingerprint. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor sdca_fprint (Tensor input, string name = "SdcaFprint") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("SdcaFprint", name: name, keywords: dict); + return op.output; + } + + /// + /// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for + /// + /// + /// a list of vectors which contain example indices. + /// + /// + /// a list of vectors which contain feature indices. + /// + /// + /// a list of vectors which contains feature value + /// associated with each feature group. + /// + /// + /// a list of matrices which contains the dense feature values. + /// + /// + /// a vector which contains the weight associated with each + /// example. + /// + /// + /// a vector which contains the label/target associated with each + /// example. + /// + /// + /// a list of vectors where each value is the indices which has + /// corresponding weights in sparse_weights. This field maybe omitted for the + /// dense approach. + /// + /// + /// a list of vectors where each value is the weight associated with + /// a sparse feature group. + /// + /// + /// a list of vectors where the values are the weights associated + /// with a dense feature group. + /// + /// + /// a list of vectors containing the example state data. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SdcaOptimizer'. + /// + /// + /// Optional argument + /// Type of the primal loss. Currently SdcaSolver supports logistic, + /// squared and hinge losses. + /// + /// + /// Optional argument + /// Symmetric l1 regularization strength. + /// + /// + /// Optional argument + /// Symmetric l2 regularization strength. + /// + /// + /// Optional argument + /// Number of partitions of the global loss function. + /// + /// + /// Optional argument + /// Number of iterations per mini-batch. + /// + /// + /// Whether to use Adaptive SDCA for the inner loop. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// out_example_state_data : a list of vectors containing the updated example state + /// data. + /// out_delta_sparse_weights : a list of vectors where each value is the delta + /// weights associated with a sparse feature group. + /// out_delta_dense_weights : a list of vectors where the values are the delta + /// weights associated with a dense feature group. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// linear models with L1 + L2 regularization. As global optimization objective is + /// strongly-convex, the optimizer optimizes the dual objective at each step. The + /// optimizer applies each update one example at a time. Examples are sampled + /// uniformly, and the optimizer is learning rate free and enjoys linear convergence + /// rate. + /// + /// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br> + /// Shai Shalev-Shwartz, Tong Zhang. 2012 + /// + /// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$ + /// + /// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br> + /// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, + /// Peter Richtarik, Martin Takac. 2015 + /// + /// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br> + /// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015 + /// + public static (Tensor out_example_state_data, Tensor[] out_delta_sparse_weights, Tensor[] out_delta_dense_weights) sdca_optimizer (Tensor[] sparse_example_indices, Tensor[] sparse_feature_indices, Tensor[] sparse_feature_values, Tensor[] dense_features, Tensor example_weights, Tensor example_labels, Tensor[] sparse_indices, Tensor[] sparse_weights, Tensor[] dense_weights, Tensor example_state_data, string loss_type, float l1, float l2, int num_loss_partitions, int num_inner_iterations, bool? adaptative = null, string name = "SdcaOptimizer") + { + var dict = new Dictionary(); + dict["sparse_example_indices"] = sparse_example_indices; + dict["sparse_feature_indices"] = sparse_feature_indices; + dict["sparse_feature_values"] = sparse_feature_values; + dict["dense_features"] = dense_features; + dict["example_weights"] = example_weights; + dict["example_labels"] = example_labels; + dict["sparse_indices"] = sparse_indices; + dict["sparse_weights"] = sparse_weights; + dict["dense_weights"] = dense_weights; + dict["example_state_data"] = example_state_data; + dict["loss_type"] = loss_type; + dict["l1"] = l1; + dict["l2"] = l2; + dict["num_loss_partitions"] = num_loss_partitions; + dict["num_inner_iterations"] = num_inner_iterations; + if (adaptative.HasValue) + dict["adaptative"] = adaptative.Value; + var op = _op_def_lib._apply_op_helper("SdcaOptimizer", name: name, keywords: dict); + int _idx = 0; + var out_example_state_data = op.outputs[_idx++]; + var out_delta_sparse_weights = Enumerable.Range(0, op.OutputListLength("out_delta_sparse_weights")).Select(_ => op.outputs[_idx++]).ToArray(); + var out_delta_dense_weights = Enumerable.Range(0, op.OutputListLength("out_delta_dense_weights")).Select(_ => op.outputs[_idx++]).ToArray(); + return (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights); + } + + /// + /// Applies L1 regularization shrink step on the parameters. + /// + /// + /// a list of vectors where each value is the weight associated with a + /// feature group. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SdcaShrinkL1'. + /// + /// + /// Optional argument + /// Symmetric l1 regularization strength. + /// + /// + /// Optional argument + /// Symmetric l2 regularization strength. Should be a positive float. + /// + /// + /// Returns the description of the operation + /// + public static Operation sdca_shrink_l1 (Tensor[] weights, float l1, float l2, string name = "SdcaShrinkL1") + { + var dict = new Dictionary(); + dict["weights"] = weights; + dict["l1"] = l1; + dict["l2"] = l2; + var op = _op_def_lib._apply_op_helper("SdcaShrinkL1", name: name, keywords: dict); + return op; + } + + /// + /// Computes the maximum along segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor whose size is equal to the size of data's + /// first dimension. Values should be sorted and can be repeated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentMax'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \max_j(data_j)\\) where max is over j such + /// that segment_ids[j] == i. + /// + /// If the max is empty for a given segment ID i, output[i] = 0. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt> + /// </div> + /// + public static Tensor segment_max (Tensor data, Tensor segment_ids, string name = "SegmentMax") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + var op = _op_def_lib._apply_op_helper("SegmentMax", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the mean along segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor whose size is equal to the size of data's + /// first dimension. Values should be sorted and can be repeated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentMean'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \frac{\sum_j data_j}{N}\\) where mean is + /// over j such that segment_ids[j] == i and N is the total number of + /// values summed. + /// + /// If the mean is empty for a given segment ID i, output[i] = 0. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt> + /// </div> + /// + public static Tensor segment_mean (Tensor data, Tensor segment_ids, string name = "SegmentMean") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + var op = _op_def_lib._apply_op_helper("SegmentMean", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the minimum along segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor whose size is equal to the size of data's + /// first dimension. Values should be sorted and can be repeated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentMin'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \min_j(data_j)\\) where min is over j such + /// that segment_ids[j] == i. + /// + /// If the min is empty for a given segment ID i, output[i] = 0. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt> + /// </div> + /// + public static Tensor segment_min (Tensor data, Tensor segment_ids, string name = "SegmentMin") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + var op = _op_def_lib._apply_op_helper("SegmentMin", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the product along segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor whose size is equal to the size of data's + /// first dimension. Values should be sorted and can be repeated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentProd'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \prod_j data_j\\) where the product is over j such + /// that segment_ids[j] == i. + /// + /// If the product is empty for a given segment ID i, output[i] = 1. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt> + /// </div> + /// + public static Tensor segment_prod (Tensor data, Tensor segment_ids, string name = "SegmentProd") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + var op = _op_def_lib._apply_op_helper("SegmentProd", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sum along segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor whose size is equal to the size of data's + /// first dimension. Values should be sorted and can be repeated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentSum'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \sum_j data_j\\) where sum is over j such + /// that segment_ids[j] == i. + /// + /// If the sum is empty for a given segment ID i, output[i] = 0. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt> + /// </div> + /// + public static Tensor segment_sum (Tensor data, Tensor segment_ids, string name = "SegmentSum") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + var op = _op_def_lib._apply_op_helper("SegmentSum", name: name, keywords: dict); + return op.output; + } + + /// + /// Selects elements from x or y, depending on condition. + /// + /// + /// + /// + /// = A Tensor which may have the same shape as condition. + /// If condition is rank 1, x may have higher rank, + /// but its first dimension must match the size of condition. + /// + /// + /// = A Tensor with the same type and shape as x. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Select'. + /// + /// + /// = A Tensor with the same type and shape as x and y. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The x, and y tensors must all have the same shape, and the + /// output will also have that shape. + /// + /// The condition tensor must be a scalar if x and y are scalars. + /// If x and y are vectors or higher rank, then condition must be either a + /// scalar, a vector with size matching the first dimension of x, or must have + /// the same shape as x. + /// + /// The condition tensor acts as a mask that chooses, based on the value at each + /// element, whether the corresponding element / row in the output should be + /// taken from x (if true) or y (if false). + /// + /// If condition is a vector and x and y are higher rank matrices, then + /// it chooses which row (outer dimension) to copy from x and y. + /// If condition has the same shape as x and y, then it chooses which + /// element to copy from x and y. + /// + /// For example: + /// + /// + /// # 'condition' tensor is [[True, False] + /// # [False, True]] + /// # 't' is [[1, 2], + /// # [3, 4]] + /// # 'e' is [[5, 6], + /// # [7, 8]] + /// select(condition, t, e) # => [[1, 6], [7, 4]] + /// + /// + /// # 'condition' tensor is [True, False] + /// # 't' is [[1, 2], + /// # [3, 4]] + /// # 'e' is [[5, 6], + /// # [7, 8]] + /// select(condition, t, e) ==> [[1, 2], + /// [7, 8]] + /// + /// + /// + public static Tensor select (Tensor condition, Tensor t, Tensor e, string name = "Select") + { + var dict = new Dictionary(); + dict["condition"] = condition; + dict["t"] = t; + dict["e"] = e; + var op = _op_def_lib._apply_op_helper("Select", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the Eigen Decomposition of a batch of square self-adjoint matrices. + /// + /// + /// Shape is [..., M, M]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SelfAdjointEig'. + /// + /// + /// Shape is [..., M+1, M]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The input is a tensor of shape [..., M, M] whose inner-most 2 dimensions + /// form square matrices, with the same constraints as the single matrix + /// SelfAdjointEig. + /// + /// The result is a [..., M+1, M] matrix with [..., 0,:] containing the + /// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues + /// are sorted in non-decreasing order. + /// + public static Tensor self_adjoint_eig (Tensor input, string name = "SelfAdjointEig") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("SelfAdjointEig", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the eigen decomposition of one or more square self-adjoint matrices. + /// + /// + /// Tensor input of shape [N, N]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SelfAdjointEigV2'. + /// + /// + /// If True then eigenvectors will be computed and returned in v. + /// Otherwise, only the eigenvalues will be computed. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// e : Eigenvalues. Shape is [N]. + /// v : Eigenvectors. Shape is [N, N]. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in + /// input such that input[..., :, :] = v[..., :, :] * diag(e[..., :]). The eigenvalues + /// are sorted in non-decreasing order. + /// + /// + /// # a is a tensor. + /// # e is a tensor of eigenvalues. + /// # v is a tensor of eigenvectors. + /// e, v = self_adjoint_eig(a) + /// e = self_adjoint_eig(a, compute_v=False) + /// + /// + public static (Tensor e, Tensor v) self_adjoint_eig_v2 (Tensor input, bool? compute_v = null, string name = "SelfAdjointEigV2") + { + var dict = new Dictionary(); + dict["input"] = input; + if (compute_v.HasValue) + dict["compute_v"] = compute_v.Value; + var op = _op_def_lib._apply_op_helper("SelfAdjointEigV2", name: name, keywords: dict); + int _idx = 0; + var e = op.outputs[_idx++]; + var v = op.outputs[_idx++]; + return (e, v); + } + + /// + /// Computes scaled exponential linear: scale * alpha * (exp(features) - 1) + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Selu'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// if < 0, scale * features otherwise. + /// + /// To be used together with + /// initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN'). + /// For correct dropout, use tf.contrib.nn.alpha_dropout. + /// + /// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + /// + public static Tensor selu (Tensor features, string name = "Selu") + { + var dict = new Dictionary(); + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("Selu", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients for the scaled exponential linear (Selu) operation. + /// + /// + /// The backpropagated gradients to the corresponding Selu operation. + /// + /// + /// The outputs of the corresponding Selu operation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SeluGrad'. + /// + /// + /// The gradients: gradients * (outputs + scale * alpha) + /// if outputs < 0, scale * gradients otherwise. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor selu_grad (Tensor gradients, Tensor outputs, string name = "SeluGrad") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["outputs"] = outputs; + var op = _op_def_lib._apply_op_helper("SeluGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts the given resource_handle representing an iterator to a variant tensor. + /// + /// + /// A handle to an iterator resource. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SerializeIterator'. + /// + /// + /// A variant tensor storing the state of the iterator contained in the + /// resource. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor serialize_iterator (Tensor resource_handle, string name = "SerializeIterator") + { + var dict = new Dictionary(); + dict["resource_handle"] = resource_handle; + var op = _op_def_lib._apply_op_helper("SerializeIterator", name: name, keywords: dict); + return op.output; + } + + /// + /// Serialize an N-minibatch SparseTensor into an [N, 3] Tensor object. + /// + /// + /// 2-D. The indices of the minibatch SparseTensor. + /// + /// + /// 1-D. The values of the minibatch SparseTensor. + /// + /// + /// 1-D. The shape of the minibatch SparseTensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SerializeManySparse'. + /// + /// + /// The dtype to use for serialization; the supported types are string + /// (default) and variant. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The SparseTensor must have rank R greater than 1, and the first dimension + /// is treated as the minibatch dimension. Elements of the SparseTensor + /// must be sorted in increasing order of this first dimension. The serialized + /// SparseTensor objects going into each row of serialized_sparse will have + /// rank R-1. + /// + /// The minibatch size N is extracted from sparse_shape[0]. + /// + public static Tensor serialize_many_sparse (Tensor sparse_indices, Tensor sparse_values, Tensor sparse_shape, TF_DataType? out_type = null, string name = "SerializeManySparse") + { + var dict = new Dictionary(); + dict["sparse_indices"] = sparse_indices; + dict["sparse_values"] = sparse_values; + dict["sparse_shape"] = sparse_shape; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("SerializeManySparse", name: name, keywords: dict); + return op.output; + } + + /// + /// Serialize a SparseTensor into a [3] Tensor object. + /// + /// + /// 2-D. The indices of the SparseTensor. + /// + /// + /// 1-D. The values of the SparseTensor. + /// + /// + /// 1-D. The shape of the SparseTensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SerializeSparse'. + /// + /// + /// The dtype to use for serialization; the supported types are string + /// (default) and variant. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor serialize_sparse (Tensor sparse_indices, Tensor sparse_values, Tensor sparse_shape, TF_DataType? out_type = null, string name = "SerializeSparse") + { + var dict = new Dictionary(); + dict["sparse_indices"] = sparse_indices; + dict["sparse_values"] = sparse_values; + dict["sparse_shape"] = sparse_shape; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("SerializeSparse", name: name, keywords: dict); + return op.output; + } + + /// + /// Transforms a Tensor into a serialized TensorProto proto. + /// + /// + /// A Tensor of type T. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SerializeTensor'. + /// + /// + /// A serialized TensorProto proto of the input tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor serialize_tensor (Tensor tensor, string name = "SerializeTensor") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + var op = _op_def_lib._apply_op_helper("SerializeTensor", name: name, keywords: dict); + return op.output; + } + + /// + /// Number of unique elements along last dimension of input set. + /// + /// + /// 2D Tensor, indices of a SparseTensor. + /// + /// + /// 1D Tensor, values of a SparseTensor. + /// + /// + /// 1D Tensor, shape of a SparseTensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SetSize'. + /// + /// + /// + /// + /// For set ranked n, this is a Tensor with rank n-1, and the same 1st + /// n-1 dimensions as set. Each value is the number of unique elements in + /// the corresponding [0...n-1] dimension of set. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Input set is a SparseTensor represented by set_indices, set_values, + /// and set_shape. The last dimension contains values in a set, duplicates are + /// allowed but ignored. + /// + /// If validate_indices is True, this op validates the order and range of set + /// indices. + /// + public static Tensor set_size (Tensor set_indices, Tensor set_values, Tensor set_shape, bool? validate_indices = null, string name = "SetSize") + { + var dict = new Dictionary(); + dict["set_indices"] = set_indices; + dict["set_values"] = set_values; + dict["set_shape"] = set_shape; + if (validate_indices.HasValue) + dict["validate_indices"] = validate_indices.Value; + var op = _op_def_lib._apply_op_helper("SetSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the shape of a tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Shape'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns a 1-D integer tensor representing the shape of input. + /// + /// For example: + /// + /// + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// shape(t) ==> [2, 2, 3] + /// + /// + public static Tensor shape (Tensor input, TF_DataType? out_type = null, string name = "Shape") + { + var dict = new Dictionary(); + dict["input"] = input; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("Shape", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns shape of tensors. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShapeN'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns N 1-D integer tensors representing shape of input[i]s. + /// + public static Tensor[] shape_n (Tensor[] input, TF_DataType? out_type = null, string name = "ShapeN") + { + var dict = new Dictionary(); + dict["input"] = input; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("ShapeN", name: name, keywords: dict); + int _idx = 0; + var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray(); + return (output); + } + + /// + /// Generate a sharded filename. The filename is printf formatted as + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShardedFilename'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// %s-%05d-of-%05d, basename, shard, num_shards. + /// + public static Tensor sharded_filename (Tensor basename, Tensor shard, Tensor num_shards, string name = "ShardedFilename") + { + var dict = new Dictionary(); + dict["basename"] = basename; + dict["shard"] = shard; + dict["num_shards"] = num_shards; + var op = _op_def_lib._apply_op_helper("ShardedFilename", name: name, keywords: dict); + return op.output; + } + + /// + /// Generate a glob pattern matching all sharded file names. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShardedFilespec'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor sharded_filespec (Tensor basename, Tensor num_shards, string name = "ShardedFilespec") + { + var dict = new Dictionary(); + dict["basename"] = basename; + dict["num_shards"] = num_shards; + var op = _op_def_lib._apply_op_helper("ShardedFilespec", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that shuffles and repeats elements from input_dataset + /// + /// + /// + /// + /// The number of output elements to buffer in an iterator over + /// this dataset. Compare with the min_after_dequeue attr when creating a + /// RandomShuffleQueue. + /// + /// + /// A scalar seed for the random number generator. If either seed or + /// seed2 is set to be non-zero, the random number generator is seeded + /// by the given seed. Otherwise, a random seed is used. + /// + /// + /// A second scalar seed to avoid seed collision. + /// + /// + /// A scalar representing the number of times the underlying dataset + /// should be repeated. The default is -1, which results in infinite repetition. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShuffleAndRepeatDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// pseudorandomly. + /// + public static Tensor shuffle_and_repeat_dataset (Tensor input_dataset, Tensor buffer_size, Tensor seed, Tensor seed2, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "ShuffleAndRepeatDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["buffer_size"] = buffer_size; + dict["seed"] = seed; + dict["seed2"] = seed2; + dict["count"] = count; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("ShuffleAndRepeatDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that shuffles elements from input_dataset pseudorandomly. + /// + /// + /// + /// + /// The number of output elements to buffer in an iterator over + /// this dataset. Compare with the min_after_dequeue attr when creating a + /// RandomShuffleQueue. + /// + /// + /// A scalar seed for the random number generator. If either seed or + /// seed2 is set to be non-zero, the random number generator is seeded + /// by the given seed. Otherwise, a random seed is used. + /// + /// + /// A second scalar seed to avoid seed collision. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShuffleDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// If true, each iterator over this dataset will be given + /// a different pseudorandomly generated seed, based on a sequence seeded by the + /// seed and seed2 inputs. If false, each iterator will be given the same + /// seed, and repeated iteration over this dataset will yield the exact same + /// sequence of results. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor shuffle_dataset (Tensor input_dataset, Tensor buffer_size, Tensor seed, Tensor seed2, TF_DataType[] output_types, TensorShape[] output_shapes, bool? reshuffle_each_iteration = null, string name = "ShuffleDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["buffer_size"] = buffer_size; + dict["seed"] = seed; + dict["seed2"] = seed2; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + if (reshuffle_each_iteration.HasValue) + dict["reshuffle_each_iteration"] = reshuffle_each_iteration.Value; + var op = _op_def_lib._apply_op_helper("ShuffleDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// An op that shuts down a running distributed TPU system. The Op returns + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShutdownDistributedTPU'. + /// + /// + /// Returns the description of the operation + /// + /// + /// an error if no system is running. + /// + public static Operation shutdown_distributed_t_p_u (string name = "ShutdownDistributedTPU") + { + var dict = new Dictionary(); + var op = _op_def_lib._apply_op_helper("ShutdownDistributedTPU", name: name, keywords: dict); + return op; + } + + /// + /// Computes sigmoid of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sigmoid'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, y = 1 / (1 + exp(-x)). + /// + public static Tensor sigmoid (Tensor x, string name = "Sigmoid") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient of the sigmoid of x wrt its input. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SigmoidGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, grad = dy * y * (1 - y), where y = sigmoid(x), and + /// dy is the corresponding input gradient. + /// + public static Tensor sigmoid_grad (Tensor y, Tensor dy, string name = "SigmoidGrad") + { + var dict = new Dictionary(); + dict["y"] = y; + dict["dy"] = dy; + var op = _op_def_lib._apply_op_helper("SigmoidGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns an element-wise indication of the sign of a number. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sign'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0. + /// + /// For complex numbers, y = sign(x) = x / |x| if x != 0, otherwise y = 0. + /// + public static Tensor sign (Tensor x, string name = "Sign") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Sign", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes sin of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sin'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor sin (Tensor x, string name = "Sin") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Sin", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes hyperbolic sine of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sinh'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor sinh (Tensor x, string name = "Sinh") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Sinh", name: name, keywords: dict); + return op.output; + } + + /// + /// A placeholder for input pipeline graph optimizations. + /// + /// + /// A variant tensor representing the input dataset. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SinkDataset'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// A placeholder for input pipeline graph optimizations. + /// + public static Tensor sink_dataset (Tensor input_dataset, string name = "SinkDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + var op = _op_def_lib._apply_op_helper("SinkDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the size of a tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Size'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns an integer representing the number of elements in + /// input. + /// + /// For example: + /// + /// + /// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + /// size(t) ==> 12 + /// + /// + public static Tensor size (Tensor input, TF_DataType? out_type = null, string name = "Size") + { + var dict = new Dictionary(); + dict["input"] = input; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("Size", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that skips count elements from the input_dataset. + /// + /// + /// + /// + /// A scalar representing the number of elements from the input_dataset + /// that should be skipped. If count is -1, skips everything. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SkipDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor skip_dataset (Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "SkipDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["count"] = count; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("SkipDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Parses a text file and creates a batch of examples. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Skipgram'. + /// + /// + /// Optional argument + /// The corpus's text file name. + /// + /// + /// Optional argument + /// The size of produced batch. + /// + /// + /// The number of words to predict to the left and right of the target. + /// + /// + /// The minimum number of word occurrences for it to be included in the + /// vocabulary. + /// + /// + /// Threshold for word occurrence. Words that appear with higher + /// frequency will be randomly down-sampled. Set to 0 to disable. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// vocab_word : A vector of words in the corpus. + /// vocab_freq : Frequencies of words. Sorted in the non-ascending order. + /// words_per_epoch : Number of words per epoch in the data file. + /// current_epoch : The current epoch number. + /// total_words_processed : The total number of words processed so far. + /// examples : A vector of word ids. + /// labels : A vector of word ids. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor vocab_word, Tensor vocab_freq, Tensor words_per_epoch, Tensor current_epoch, Tensor total_words_processed, Tensor examples, Tensor labels) skipgram (string filename, int batch_size, int? window_size = null, int? min_count = null, float? subsample = null, string name = "Skipgram") + { + var dict = new Dictionary(); + dict["filename"] = filename; + dict["batch_size"] = batch_size; + if (window_size.HasValue) + dict["window_size"] = window_size.Value; + if (min_count.HasValue) + dict["min_count"] = min_count.Value; + if (subsample.HasValue) + dict["subsample"] = subsample.Value; + var op = _op_def_lib._apply_op_helper("Skipgram", name: name, keywords: dict); + int _idx = 0; + var vocab_word = op.outputs[_idx++]; + var vocab_freq = op.outputs[_idx++]; + var words_per_epoch = op.outputs[_idx++]; + var current_epoch = op.outputs[_idx++]; + var total_words_processed = op.outputs[_idx++]; + var examples = op.outputs[_idx++]; + var labels = op.outputs[_idx++]; + return (vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels); + } + + /// + /// Return a slice from 'input'. + /// + /// + /// + /// + /// begin[i] specifies the offset into the 'i'th dimension of + /// 'input' to slice from. + /// + /// + /// size[i] specifies the number of elements of the 'i'th dimension + /// of 'input' to slice. If size[i] is -1, all remaining elements in dimension + /// i are included in the slice (i.e. this is equivalent to setting + /// size[i] = input.dim_size(i) - begin[i]). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Slice'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The output tensor is a tensor with dimensions described by 'size' + /// whose values are extracted from 'input' starting at the offsets in + /// 'begin'. + /// + /// *Requirements*: + /// 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + /// + public static Tensor slice (Tensor input, Tensor begin, Tensor size, string name = "Slice") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["begin"] = begin; + dict["size"] = size; + var op = _op_def_lib._apply_op_helper("Slice", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that passes a sliding window over input_dataset. + /// + /// + /// + /// + /// A scalar representing the number of elements in the + /// sliding window. + /// + /// + /// A scalar representing the steps moving the sliding window + /// forward in one iteration. It must be positive. + /// + /// + /// A scalar representing the stride of the input elements of the sliding window. + /// It must be positive. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SlideDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor slide_dataset (Tensor input_dataset, Tensor window_size, Tensor window_shift, Tensor window_stride, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "SlideDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["window_size"] = window_size; + dict["window_shift"] = window_shift; + dict["window_stride"] = window_stride; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("SlideDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a copy of the input tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Snapshot'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor snapshot (Tensor input, string name = "Snapshot") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("Snapshot", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes softmax activations. + /// + /// + /// 2-D with shape [batch_size, num_classes]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Softmax'. + /// + /// + /// Same shape as logits. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For each batch i and class j we have + /// + /// $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ + /// + public static Tensor softmax (Tensor logits, string name = "Softmax") + { + var dict = new Dictionary(); + dict["logits"] = logits; + var op = _op_def_lib._apply_op_helper("Softmax", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// + /// batch_size x num_classes matrix + /// + /// + /// batch_size x num_classes matrix + /// The caller must ensure that each batch of labels represents a valid + /// probability distribution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SoftmaxCrossEntropyWithLogits'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// loss : Per example loss (batch_size vector). + /// backprop : backpropagated gradients (batch_size x num_classes matrix). + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Inputs are the logits, not probabilities. + /// + public static (Tensor loss, Tensor backprop) softmax_cross_entropy_with_logits (Tensor features, Tensor labels, string name = "SoftmaxCrossEntropyWithLogits") + { + var dict = new Dictionary(); + dict["features"] = features; + dict["labels"] = labels; + var op = _op_def_lib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name: name, keywords: dict); + int _idx = 0; + var loss = op.outputs[_idx++]; + var backprop = op.outputs[_idx++]; + return (loss, backprop); + } + + /// + /// Computes softplus: log(exp(features) + 1). + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Softplus'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor softplus (Tensor features, string name = "Softplus") + { + var dict = new Dictionary(); + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("Softplus", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes softplus gradients for a softplus operation. + /// + /// + /// The backpropagated gradients to the corresponding softplus operation. + /// + /// + /// The features passed as input to the corresponding softplus operation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SoftplusGrad'. + /// + /// + /// The gradients: gradients / (1 + exp(-features)). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor softplus_grad (Tensor gradients, Tensor features, string name = "SoftplusGrad") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("SoftplusGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes softsign: features / (abs(features) + 1). + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Softsign'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor softsign (Tensor features, string name = "Softsign") + { + var dict = new Dictionary(); + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("Softsign", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes softsign gradients for a softsign operation. + /// + /// + /// The backpropagated gradients to the corresponding softsign operation. + /// + /// + /// The features passed as input to the corresponding softsign operation. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SoftsignGrad'. + /// + /// + /// The gradients: gradients / (1 + abs(features)) ** 2. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor softsign_grad (Tensor gradients, Tensor features, string name = "SoftsignGrad") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["features"] = features; + var op = _op_def_lib._apply_op_helper("SoftsignGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// SpaceToBatch for 4-D tensors of type T. + /// + /// + /// 4-D with shape [batch, height, width, depth]. + /// + /// + /// 2-D tensor of non-negative integers with shape [2, 2]. It specifies + /// the padding of the input with zeros across the spatial dimensions as follows: + /// + /// paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] + /// + /// The effective spatial dimensions of the zero-padded input tensor will be: + /// + /// height_pad = pad_top + height + pad_bottom + /// width_pad = pad_left + width + pad_right + /// + /// The attr block_size must be greater than one. It indicates the block size. + /// + /// * Non-overlapping blocks of size block_size x block size in the height and + /// width dimensions are rearranged into the batch dimension at each location. + /// * The batch of the output tensor is batch * block_size * block_size. + /// * Both height_pad and width_pad must be divisible by block_size. + /// + /// The shape of the output will be: + /// + /// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + /// depth] + /// + /// Some examples: + /// + /// (1) For the following input of shape [1, 2, 2, 1] and block_size of 2: + /// + /// + /// x = [[[[1], [2]], [[3], [4]]]] + /// + /// + /// The output tensor has shape [4, 1, 1, 1] and value: + /// + /// + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// + /// + /// (2) For the following input of shape [1, 2, 2, 3] and block_size of 2: + /// + /// + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// + /// The output tensor has shape [4, 1, 1, 3] and value: + /// + /// + /// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + /// + /// + /// (3) For the following input of shape [1, 4, 4, 1] and block_size of 2: + /// + /// + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// + /// + /// The output tensor has shape [4, 2, 2, 1] and value: + /// + /// + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// + /// + /// (4) For the following input of shape [2, 2, 4, 1] and block_size of 2: + /// + /// + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// + /// + /// The output tensor has shape [8, 1, 2, 1] and value: + /// + /// + /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + /// + /// + /// Among others, this operation is useful for reducing atrous convolution into + /// regular convolution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SpaceToBatch'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is a legacy version of the more general SpaceToBatchND. + /// + /// Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + /// More specifically, this op outputs a copy of the input tensor where values from + /// the height and width dimensions are moved to the batch dimension. After + /// the zero-padding, both height and width of the input must be divisible by the + /// block size. + /// + public static Tensor space_to_batch (Tensor input, Tensor paddings, int block_size, string name = "SpaceToBatch") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["paddings"] = paddings; + dict["block_size"] = block_size; + var op = _op_def_lib._apply_op_helper("SpaceToBatch", name: name, keywords: dict); + return op.output; + } + + /// + /// SpaceToBatch for N-D tensors of type T. + /// + /// + /// N-D with shape input_shape = [batch] + spatial_shape + remaining_shape, + /// where spatial_shape has M dimensions. + /// + /// + /// 1-D with shape [M], all values must be >= 1. + /// + /// + /// 2-D with shape [M, 2], all values must be >= 0. + /// paddings[i] = [pad_start, pad_end] specifies the padding for input dimension + /// i + 1, which corresponds to spatial dimension i. It is required that + /// block_shape[i] divides input_shape[i + 1] + pad_start + pad_end. + /// + /// This operation is equivalent to the following steps: + /// + /// 1. Zero-pad the start and end of dimensions [1, ..., M] of the + /// input according to paddings to produce padded of shape padded_shape. + /// + /// 2. Reshape padded to reshaped_padded of shape: + /// + /// [batch] + + /// [padded_shape[1] / block_shape[0], + /// block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1], + /// block_shape[M-1]] + + /// remaining_shape + /// + /// 3. Permute dimensions of reshaped_padded to produce + /// permuted_reshaped_padded of shape: + /// + /// block_shape + + /// [batch] + + /// [padded_shape[1] / block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1]] + + /// remaining_shape + /// + /// 4. Reshape permuted_reshaped_padded to flatten block_shape into the batch + /// dimension, producing an output tensor of shape: + /// + /// [batch * prod(block_shape)] + + /// [padded_shape[1] / block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1]] + + /// remaining_shape + /// + /// Some examples: + /// + /// (1) For the following input of shape [1, 2, 2, 1], block_shape = [2, 2], and + /// paddings = [[0, 0], [0, 0]]: + /// + /// + /// x = [[[[1], [2]], [[3], [4]]]] + /// + /// + /// The output tensor has shape [4, 1, 1, 1] and value: + /// + /// + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// + /// + /// (2) For the following input of shape [1, 2, 2, 3], block_shape = [2, 2], and + /// paddings = [[0, 0], [0, 0]]: + /// + /// + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// + /// The output tensor has shape [4, 1, 1, 3] and value: + /// + /// + /// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + /// + /// + /// (3) For the following input of shape [1, 4, 4, 1], block_shape = [2, 2], and + /// paddings = [[0, 0], [0, 0]]: + /// + /// + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// + /// + /// The output tensor has shape [4, 2, 2, 1] and value: + /// + /// + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// + /// + /// (4) For the following input of shape [2, 2, 4, 1], block_shape = [2, 2], and + /// paddings = [[0, 0], [2, 0]]: + /// + /// + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// + /// + /// The output tensor has shape [8, 1, 3, 1] and value: + /// + /// + /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + /// [[[0], [2], [4]]], [[[0], [10], [12]]], + /// [[[0], [5], [7]]], [[[0], [13], [15]]], + /// [[[0], [6], [8]]], [[[0], [14], [16]]]] + /// + /// + /// Among others, this operation is useful for reducing atrous convolution into + /// regular convolution. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SpaceToBatchND'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation divides "spatial" dimensions [1, ..., M] of the input into a + /// grid of blocks of shape block_shape, and interleaves these blocks with the + /// "batch" dimension (0) such that in the output, the spatial dimensions + /// [1, ..., M] correspond to the position within the grid, and the batch + /// dimension combines both the position within a spatial block and the original + /// batch position. Prior to division into blocks, the spatial dimensions of the + /// input are optionally zero padded according to paddings. See below for a + /// precise description. + /// + public static Tensor space_to_batch_n_d (Tensor input, Tensor block_shape, Tensor paddings, string name = "SpaceToBatchND") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["block_shape"] = block_shape; + dict["paddings"] = paddings; + var op = _op_def_lib._apply_op_helper("SpaceToBatchND", name: name, keywords: dict); + return op.output; + } + + /// + /// SpaceToDepth for tensors of type T. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SpaceToDepth'. + /// + /// + /// Optional argument + /// The size of the spatial block. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Rearranges blocks of spatial data, into depth. More specifically, + /// this op outputs a copy of the input tensor where values from the height + /// and width dimensions are moved to the depth dimension. + /// The attr block_size indicates the input block size. + /// + /// * Non-overlapping blocks of size block_size x block size are rearranged + /// into depth at each location. + /// * The depth of the output tensor is block_size * block_size * input_depth. + /// * The Y, X coordinates within each block of the input become the high order + /// component of the output channel index. + /// * The input tensor's height and width must be divisible by block_size. + /// + /// The data_format attr specifies the layout of the input and output tensors + /// with the following options: + /// "NHWC": [ batch, height, width, channels ] + /// "NCHW": [ batch, channels, height, width ] + /// "NCHW_VECT_C": + /// qint8 [ batch, channels / 4, height, width, 4 ] + /// + /// It is useful to consider the operation as transforming a 6-D Tensor. + /// e.g. for data_format = NHWC, + /// Each element in the input tensor can be specified via 6 coordinates, + /// ordered by decreasing memory layout significance as: + /// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + /// within the output image, bX, bY means coordinates + /// within the input block, iC means input channels). + /// The output would be a transpose to the following layout: + /// n,oY,oX,bY,bX,iC + /// + /// This operation is useful for resizing the activations between convolutions + /// (but keeping all data), e.g. instead of pooling. It is also useful for training + /// purely convolutional models. + /// + /// For example, given an input of shape [1, 2, 2, 1], data_format = "NHWC" and + /// block_size = 2: + /// + /// + /// x = [[[[1], [2]], + /// [[3], [4]]]] + /// + /// + /// This operation will output a tensor of shape [1, 1, 1, 4]: + /// + /// + /// [[[[1, 2, 3, 4]]]] + /// + /// + /// Here, the input has a batch of 1 and each batch element has shape [2, 2, 1], + /// the corresponding output will have a single element (i.e. width and height are + /// both 1) and will have a depth of 4 channels (1 * block_size * block_size). + /// The output element shape is [1, 1, 4]. + /// + /// For an input tensor with larger depth, here of shape [1, 2, 2, 3], e.g. + /// + /// + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// + /// This operation, for block_size of 2, will return the following tensor of shape + /// [1, 1, 1, 12] + /// + /// + /// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + /// + /// + /// Similarly, for the following input of shape [1 4 4 1], and a block size of 2: + /// + /// + /// x = [[[[1], [2], [5], [6]], + /// [[3], [4], [7], [8]], + /// [[9], [10], [13], [14]], + /// [[11], [12], [15], [16]]]] + /// + /// + /// the operator will return the following tensor of shape [1 2 2 4]: + /// + /// + /// x = [[[[1, 2, 3, 4], + /// [5, 6, 7, 8]], + /// [[9, 10, 11, 12], + /// [13, 14, 15, 16]]]] + /// + /// + public static Tensor space_to_depth (Tensor input, int block_size, string data_format = null, string name = "SpaceToDepth") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["block_size"] = block_size; + if (data_format != null) + dict["data_format"] = data_format; + var op = _op_def_lib._apply_op_helper("SpaceToDepth", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies a sparse gradient to a given accumulator. + /// + /// + /// The handle to a accumulator. + /// + /// + /// The local_step value at which the sparse gradient was computed. + /// + /// + /// Indices of the sparse gradient to be accumulated. Must be a + /// vector. + /// + /// + /// Values are the non-zero slices of the gradient, and must have + /// the same first dimension as indices, i.e., the nnz represented by indices and + /// values must be consistent. + /// + /// + /// Shape of the sparse gradient to be accumulated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseAccumulatorApplyGradient'. + /// + /// + /// Optional argument + /// Boolean indicating whether gradient_shape is unknown, in which + /// case the input is ignored during validation. + /// + /// + /// Returns the description of the operation + /// + /// + /// Does not add if local_step is smaller than the accumulator's + /// global_step. + /// + public static Operation sparse_accumulator_apply_gradient (Tensor handle, Tensor local_step, Tensor gradient_indices, Tensor gradient_values, Tensor gradient_shape, bool has_known_shape, string name = "SparseAccumulatorApplyGradient") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["local_step"] = local_step; + dict["gradient_indices"] = gradient_indices; + dict["gradient_values"] = gradient_values; + dict["gradient_shape"] = gradient_shape; + dict["has_known_shape"] = has_known_shape; + var op = _op_def_lib._apply_op_helper("SparseAccumulatorApplyGradient", name: name, keywords: dict); + return op; + } + + /// + /// Extracts the average sparse gradient in a SparseConditionalAccumulator. + /// + /// + /// The handle to a SparseConditionalAccumulator. + /// + /// + /// Number of gradients required before we return an aggregate. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseAccumulatorTakeGradient'. + /// + /// + /// Optional argument + /// The data type of accumulated gradients. Needs to correspond to the type + /// of the accumulator. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// indices : Indices of the average of the accumulated sparse gradients. + /// values : Values of the average of the accumulated sparse gradients. + /// shape : Shape of the average of the accumulated sparse gradients. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The op will blocks until sufficient (i.e., more than num_required) + /// gradients have been accumulated. If the accumulator has already + /// aggregated more than num_required gradients, it will return its + /// average of the accumulated gradients. Also automatically increments + /// the recorded global_step in the accumulator by 1, and resets the + /// aggregate to 0. + /// + public static (Tensor indices, Tensor values, Tensor shape) sparse_accumulator_take_gradient (Tensor handle, Tensor num_required, TF_DataType dtype, string name = "SparseAccumulatorTakeGradient") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["num_required"] = num_required; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("SparseAccumulatorTakeGradient", name: name, keywords: dict); + int _idx = 0; + var indices = op.outputs[_idx++]; + var values = op.outputs[_idx++]; + var shape = op.outputs[_idx++]; + return (indices, values, shape); + } + + /// + /// Adds two SparseTensor objects to produce another SparseTensor. + /// + /// + /// 2-D. The indices of the first SparseTensor, size [nnz, ndims] Matrix. + /// + /// + /// 1-D. The values of the first SparseTensor, size [nnz] Vector. + /// + /// + /// 1-D. The shape of the first SparseTensor, size [ndims] Vector. + /// + /// + /// 2-D. The indices of the second SparseTensor, size [nnz, ndims] Matrix. + /// + /// + /// 1-D. The values of the second SparseTensor, size [nnz] Vector. + /// + /// + /// 1-D. The shape of the second SparseTensor, size [ndims] Vector. + /// + /// + /// 0-D. The magnitude threshold that determines if an output value/index + /// pair takes space. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseAdd'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sum_indices : + /// sum_values : + /// sum_shape : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The input SparseTensor objects' indices are assumed ordered in standard + /// lexicographic order. If this is not the case, before this step run + /// SparseReorder to restore index ordering. + /// + /// By default, if two values sum to zero at some index, the output SparseTensor + /// would still include that particular location in its index, storing a zero in the + /// corresponding value slot. To override this, callers can specify thresh, + /// indicating that if the sum has a magnitude strictly smaller than thresh, its + /// corresponding value and index would then not be included. In particular, + /// thresh == 0 (default) means everything is kept and actual thresholding happens + /// only for a positive value. + /// + /// In the following shapes, nnz is the count after taking thresh into account. + /// + public static (Tensor sum_indices, Tensor sum_values, Tensor sum_shape) sparse_add (Tensor a_indices, Tensor a_values, Tensor a_shape, Tensor b_indices, Tensor b_values, Tensor b_shape, Tensor thresh, string name = "SparseAdd") + { + var dict = new Dictionary(); + dict["a_indices"] = a_indices; + dict["a_values"] = a_values; + dict["a_shape"] = a_shape; + dict["b_indices"] = b_indices; + dict["b_values"] = b_values; + dict["b_shape"] = b_shape; + dict["thresh"] = thresh; + var op = _op_def_lib._apply_op_helper("SparseAdd", name: name, keywords: dict); + int _idx = 0; + var sum_indices = op.outputs[_idx++]; + var sum_values = op.outputs[_idx++]; + var sum_shape = op.outputs[_idx++]; + return (sum_indices, sum_values, sum_shape); + } + + /// + /// The gradient operator for the SparseAdd op. + /// + /// + /// 1-D with shape [nnz(sum)]. The gradient with respect to + /// the non-empty values of the sum. + /// + /// + /// 2-D. The indices of the SparseTensor A, size [nnz(A), ndims]. + /// + /// + /// 2-D. The indices of the SparseTensor B, size [nnz(B), ndims]. + /// + /// + /// 2-D. The indices of the sum SparseTensor, size + /// [nnz(sum), ndims]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseAddGrad'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// a_val_grad : 1-D with shape [nnz(A)]. The gradient with respect to the + /// non-empty values of A. + /// b_val_grad : 1-D with shape [nnz(B)]. The gradient with respect to the + /// non-empty values of B. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The SparseAdd op calculates A + B, where A, B, and the sum are all represented + /// as SparseTensor objects. This op takes in the upstream gradient w.r.t. + /// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty + /// values of A and B. + /// + public static (Tensor a_val_grad, Tensor b_val_grad) sparse_add_grad (Tensor backprop_val_grad, Tensor a_indices, Tensor b_indices, Tensor sum_indices, string name = "SparseAddGrad") + { + var dict = new Dictionary(); + dict["backprop_val_grad"] = backprop_val_grad; + dict["a_indices"] = a_indices; + dict["b_indices"] = b_indices; + dict["sum_indices"] = sum_indices; + var op = _op_def_lib._apply_op_helper("SparseAddGrad", name: name, keywords: dict); + int _idx = 0; + var a_val_grad = op.outputs[_idx++]; + var b_val_grad = op.outputs[_idx++]; + return (a_val_grad, b_val_grad); + } + + /// + /// var: Should be from a Variable(). + /// + /// + /// + /// + /// Should be from a Variable(). + /// + /// + /// : Should be from a Variable(). + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// Decay factor. Must be a scalar. + /// + /// + /// Constant factor. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyAdadelta'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor sparse_apply_adadelta (Tensor var, Tensor accum, Tensor accum_update, Tensor lr, Tensor rho, Tensor epsilon, Tensor grad, Tensor indices, bool? use_locking = null, string name = "SparseApplyAdadelta") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["accum_update"] = accum_update; + dict["lr"] = lr; + dict["rho"] = rho; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyAdadelta", name: name, keywords: dict); + return op.output; + } + + /// + /// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyAdagrad'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// $$accum += grad * grad$$ + /// $$var -= lr * grad * (1 / sqrt(accum))$$ + /// + public static Tensor sparse_apply_adagrad (Tensor var, Tensor accum, Tensor lr, Tensor grad, Tensor indices, bool? use_locking = null, bool? update_slots = null, string name = "SparseApplyAdagrad") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (update_slots.HasValue) + dict["update_slots"] = update_slots.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyAdagrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// Training step number. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyAdagradDA'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor sparse_apply_adagrad_d_a (Tensor var, Tensor gradient_accumulator, Tensor gradient_squared_accumulator, Tensor grad, Tensor indices, Tensor lr, Tensor l1, Tensor l2, Tensor global_step, bool? use_locking = null, string name = "SparseApplyAdagradDA") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["gradient_accumulator"] = gradient_accumulator; + dict["gradient_squared_accumulator"] = gradient_squared_accumulator; + dict["grad"] = grad; + dict["indices"] = indices; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["global_step"] = global_step; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyAdagradDA", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the centered RMSProp algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay rate. Must be a scalar. + /// + /// + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var, ms and mom. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyCenteredRMSProp'. + /// + /// + /// If True, updating of the var, mg, ms, and mom tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The centered RMSProp algorithm uses an estimate of the centered second moment + /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which + /// uses the (uncentered) second moment. This often helps with training, but is + /// slightly more expensive in terms of computation and memory. + /// + /// Note that in dense implementation of this algorithm, mg, ms, and mom will + /// update even if the grad is zero, but in this sparse implementation, mg, ms, + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// mean_grad = decay * mean_grad + (1-decay) * gradient + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + /// + /// $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + /// $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + /// $$var <- var - mom$$ + /// + public static Tensor sparse_apply_centered_r_m_s_prop (Tensor var, Tensor mg, Tensor ms, Tensor mom, Tensor lr, Tensor rho, Tensor momentum, Tensor epsilon, Tensor grad, Tensor indices, bool? use_locking = null, string name = "SparseApplyCenteredRMSProp") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["mg"] = mg; + dict["ms"] = ms; + dict["mom"] = mom; + dict["lr"] = lr; + dict["rho"] = rho; + dict["momentum"] = momentum; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyCenteredRMSProp", name: name, keywords: dict); + return op.output; + } + + /// + /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyFtrl'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// That is for rows we have grad for, we update var, accum and linear as follows: + /// $$accum_new = accum + grad * grad$$ + /// $$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$ + /// $$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$ + /// $$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| > l1\ else\ 0.0$$ + /// $$accum = accum_{new}$$ + /// + public static Tensor sparse_apply_ftrl (Tensor var, Tensor accum, Tensor linear, Tensor grad, Tensor indices, Tensor lr, Tensor l1, Tensor l2, Tensor lr_power, bool? use_locking = null, string name = "SparseApplyFtrl") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["linear"] = linear; + dict["grad"] = grad; + dict["indices"] = indices; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["lr_power"] = lr_power; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyFtrl", name: name, keywords: dict); + return op.output; + } + + /// + /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 shrinkage regulariation. Must be a scalar. + /// + /// + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyFtrlV2'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// That is for rows we have grad for, we update var, accum and linear as follows: + /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var + /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + /// linear += grad_with_shrinkage + + /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + public static Tensor sparse_apply_ftrl_v2 (Tensor var, Tensor accum, Tensor linear, Tensor grad, Tensor indices, Tensor lr, Tensor l1, Tensor l2, Tensor l2_shrinkage, Tensor lr_power, bool? use_locking = null, string name = "SparseApplyFtrlV2") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["linear"] = linear; + dict["grad"] = grad; + dict["indices"] = indices; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["l2_shrinkage"] = l2_shrinkage; + dict["lr_power"] = lr_power; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyFtrlV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// Momentum. Must be a scalar. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyMomentum'. + /// + /// + /// If True, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// If True, the tensor passed to compute grad will be + /// var - lr * momentum * accum, so in the end, the var you get is actually + /// var - lr * momentum * accum. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Set use_nesterov = True if you want to use Nesterov momentum. + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// + /// $$accum = accum * momentum + grad$$ + /// $$var -= lr * accum$$ + /// + public static Tensor sparse_apply_momentum (Tensor var, Tensor accum, Tensor lr, Tensor grad, Tensor indices, Tensor momentum, bool? use_locking = null, bool? use_nesterov = null, string name = "SparseApplyMomentum") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["grad"] = grad; + dict["indices"] = indices; + dict["momentum"] = momentum; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + if (use_nesterov.HasValue) + dict["use_nesterov"] = use_nesterov.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyMomentum", name: name, keywords: dict); + return op.output; + } + + /// + /// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Learning rate. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyProximalAdagrad'. + /// + /// + /// If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// $$accum += grad * grad$$ + /// $$prox_v = var$$ + /// $$prox_v -= lr * grad * (1 / sqrt(accum))$$ + /// $$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$ + /// + public static Tensor sparse_apply_proximal_adagrad (Tensor var, Tensor accum, Tensor lr, Tensor l1, Tensor l2, Tensor grad, Tensor indices, bool? use_locking = null, string name = "SparseApplyProximalAdagrad") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["accum"] = accum; + dict["lr"] = lr; + dict["l1"] = l1; + dict["l2"] = l2; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyProximalAdagrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Sparse update '*var' as FOBOS algorithm with fixed learning rate. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// L1 regularization. Must be a scalar. + /// + /// + /// L2 regularization. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var and accum. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyProximalGradientDescent'. + /// + /// + /// If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// That is for rows we have grad for, we update var as follows: + /// $$prox_v = var - alpha * grad$$ + /// $$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$ + /// + public static Tensor sparse_apply_proximal_gradient_descent (Tensor var, Tensor alpha, Tensor l1, Tensor l2, Tensor grad, Tensor indices, bool? use_locking = null, string name = "SparseApplyProximalGradientDescent") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["alpha"] = alpha; + dict["l1"] = l1; + dict["l2"] = l2; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyProximalGradientDescent", name: name, keywords: dict); + return op.output; + } + + /// + /// Update '*var' according to the RMSProp algorithm. + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Should be from a Variable(). + /// + /// + /// Scaling factor. Must be a scalar. + /// + /// + /// Decay rate. Must be a scalar. + /// + /// + /// + /// + /// Ridge term. Must be a scalar. + /// + /// + /// The gradient. + /// + /// + /// A vector of indices into the first dimension of var, ms and mom. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyRMSProp'. + /// + /// + /// If True, updating of the var, ms, and mom tensors is protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// + /// + /// Same as "var". + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Note that in dense implementation of this algorithm, ms and mom will + /// update even if the grad is zero, but in this sparse implementation, ms + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + /// + /// $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + /// $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + /// $$var <- var - mom$$ + /// + public static Tensor sparse_apply_r_m_s_prop (Tensor var, Tensor ms, Tensor mom, Tensor lr, Tensor rho, Tensor momentum, Tensor epsilon, Tensor grad, Tensor indices, bool? use_locking = null, string name = "SparseApplyRMSProp") + { + var dict = new Dictionary(); + dict["var"] = var; + dict["ms"] = ms; + dict["mom"] = mom; + dict["lr"] = lr; + dict["rho"] = rho; + dict["momentum"] = momentum; + dict["epsilon"] = epsilon; + dict["grad"] = grad; + dict["indices"] = indices; + if (use_locking.HasValue) + dict["use_locking"] = use_locking.Value; + var op = _op_def_lib._apply_op_helper("SparseApplyRMSProp", name: name, keywords: dict); + return op.output; + } + + /// + /// Concatenates a list of SparseTensor along the specified dimension. + /// + /// + /// 2-D. Indices of each input SparseTensor. + /// + /// + /// 1-D. Non-empty values of each SparseTensor. + /// + /// + /// 1-D. Shapes of each SparseTensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseConcat'. + /// + /// + /// Optional argument + /// Dimension to concatenate along. Must be in range [-rank, rank), + /// where rank is the number of dimensions in each input SparseTensor. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : 2-D. Indices of the concatenated SparseTensor. + /// output_values : 1-D. Non-empty values of the concatenated SparseTensor. + /// output_shape : 1-D. Shape of the concatenated SparseTensor. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Concatenation is with respect to the dense versions of these sparse tensors. + /// It is assumed that each input is a SparseTensor whose elements are ordered + /// along increasing dimension number. + /// + /// All inputs' shapes must match, except for the concat dimension. The + /// indices, values, and shapes lists must have the same length. + /// + /// The output shape is identical to the inputs', except along the concat + /// dimension, where it is the sum of the inputs' sizes along that dimension. + /// + /// The output elements will be resorted to preserve the sort order along + /// increasing dimension number. + /// + /// This op runs in O(M log M) time, where M is the total number of non-empty + /// values across all inputs. This is due to the need for an internal sort in + /// order to concatenate efficiently across an arbitrary dimension. + /// + /// For example, if concat_dim = 1 and the inputs are + /// + /// sp_inputs[0]: shape = [2, 3] + /// [0, 2]: "a" + /// [1, 0]: "b" + /// [1, 1]: "c" + /// + /// sp_inputs[1]: shape = [2, 4] + /// [0, 1]: "d" + /// [0, 2]: "e" + /// + /// then the output will be + /// + /// shape = [2, 7] + /// [0, 2]: "a" + /// [0, 4]: "d" + /// [0, 5]: "e" + /// [1, 0]: "b" + /// [1, 1]: "c" + /// + /// Graphically this is equivalent to doing + /// + /// [ a] concat [ d e ] = [ a d e ] + /// [b c ] [ ] [b c ] + /// + public static (Tensor output_indices, Tensor output_values, Tensor output_shape) sparse_concat (Tensor[] indices, Tensor[] values, Tensor[] shapes, int concat_dim, string name = "SparseConcat") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["values"] = values; + dict["shapes"] = shapes; + dict["concat_dim"] = concat_dim; + var op = _op_def_lib._apply_op_helper("SparseConcat", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + var output_shape = op.outputs[_idx++]; + return (output_indices, output_values, output_shape); + } + + /// + /// A conditional accumulator for aggregating sparse gradients. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseConditionalAccumulator'. + /// + /// + /// Optional argument + /// The type of the value being accumulated. + /// + /// + /// Optional argument + /// The shape of the values. + /// + /// + /// If non-empty, this accumulator is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this accumulator will be shared under the given name + /// across multiple sessions. + /// + /// + /// The handle to the accumulator. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The accumulator accepts gradients marked with local_step greater or + /// equal to the most recent global_step known to the accumulator. The + /// average can be extracted from the accumulator, provided sufficient + /// gradients have been accumulated. Extracting the average automatically + /// resets the aggregate to 0, and increments the global_step recorded by + /// the accumulator. + /// + public static Tensor sparse_conditional_accumulator (TF_DataType dtype, TensorShape shape, string container = null, string shared_name = null, string name = "SparseConditionalAccumulator") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + dict["shape"] = shape; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("SparseConditionalAccumulator", name: name, keywords: dict); + return op.output; + } + + /// + /// Generates sparse cross from a list of sparse and dense tensors. + /// + /// + /// 2-D. Indices of each input SparseTensor. + /// + /// + /// 1-D. values of each SparseTensor. + /// + /// + /// 1-D. Shapes of each SparseTensor. + /// + /// + /// 2-D. Columns represented by dense Tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseCross'. + /// + /// + /// Optional argument + /// If true, returns the hash of the cross instead of the string. + /// This will allow us avoiding string manipulations. + /// + /// + /// Optional argument + /// It is used if hashed_output is true. + /// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. + /// + /// + /// Optional argument + /// Specify the hash_key that will be used by the FingerprintCat64 + /// function to combine the crosses fingerprints. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : 2-D. Indices of the concatenated SparseTensor. + /// output_values : 1-D. Non-empty values of the concatenated or hashed + /// SparseTensor. + /// output_shape : 1-D. Shape of the concatenated SparseTensor. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The op takes two lists, one of 2D SparseTensor and one of 2D Tensor, each + /// representing features of one feature column. It outputs a 2D SparseTensor with + /// the batchwise crosses of these features. + /// + /// For example, if the inputs are + /// + /// inputs[0]: SparseTensor with shape = [2, 2] + /// [0, 0]: "a" + /// [1, 0]: "b" + /// [1, 1]: "c" + /// + /// inputs[1]: SparseTensor with shape = [2, 1] + /// [0, 0]: "d" + /// [1, 0]: "e" + /// + /// inputs[2]: Tensor [["f"], ["g"]] + /// + /// then the output will be + /// + /// shape = [2, 2] + /// [0, 0]: "a_X_d_X_f" + /// [1, 0]: "b_X_e_X_g" + /// [1, 1]: "c_X_e_X_g" + /// + /// if hashed_output=true then the output will be + /// + /// shape = [2, 2] + /// [0, 0]: FingerprintCat64( + /// Fingerprint64("f"), FingerprintCat64( + /// Fingerprint64("d"), Fingerprint64("a"))) + /// [1, 0]: FingerprintCat64( + /// Fingerprint64("g"), FingerprintCat64( + /// Fingerprint64("e"), Fingerprint64("b"))) + /// [1, 1]: FingerprintCat64( + /// Fingerprint64("g"), FingerprintCat64( + /// Fingerprint64("e"), Fingerprint64("c"))) + /// + public static (Tensor output_indices, Tensor output_values, Tensor output_shape) sparse_cross (Tensor[] indices, Tensor[] values, Tensor[] shapes, Tensor[] dense_inputs, bool hashed_output, int num_buckets, int hash_key, TF_DataType out_type, TF_DataType internal_type, string name = "SparseCross") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["values"] = values; + dict["shapes"] = shapes; + dict["dense_inputs"] = dense_inputs; + dict["hashed_output"] = hashed_output; + dict["num_buckets"] = num_buckets; + dict["hash_key"] = hash_key; + dict["out_type"] = out_type; + dict["internal_type"] = internal_type; + var op = _op_def_lib._apply_op_helper("SparseCross", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + var output_shape = op.outputs[_idx++]; + return (output_indices, output_values, output_shape); + } + + /// + /// Adds up a SparseTensor and a dense Tensor, using these special rules: + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// + /// + /// 1-D. N non-empty values corresponding to sp_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// R-D. The dense Tensor operand. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseDenseCwiseAdd'. + /// + /// + /// 1-D. The N values that are operated on. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// (1) Broadcasts the dense side to have the same shape as the sparse side, if + /// eligible; + /// (2) Then, only the dense values pointed to by the indices of the SparseTensor + /// participate in the cwise addition. + /// + /// By these rules, the result is a logical SparseTensor with exactly the same + /// indices and shape, but possibly with different non-zero values. The output of + /// this Op is the resultant non-zero values. + /// + public static Tensor sparse_dense_cwise_add (Tensor sp_indices, Tensor sp_values, Tensor sp_shape, Tensor dense, string name = "SparseDenseCwiseAdd") + { + var dict = new Dictionary(); + dict["sp_indices"] = sp_indices; + dict["sp_values"] = sp_values; + dict["sp_shape"] = sp_shape; + dict["dense"] = dense; + var op = _op_def_lib._apply_op_helper("SparseDenseCwiseAdd", name: name, keywords: dict); + return op.output; + } + + /// + /// Component-wise divides a SparseTensor by a dense Tensor. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// + /// + /// 1-D. N non-empty values corresponding to sp_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// R-D. The dense Tensor operand. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseDenseCwiseDiv'. + /// + /// + /// 1-D. The N values that are operated on. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not + /// the other direction. + /// + public static Tensor sparse_dense_cwise_div (Tensor sp_indices, Tensor sp_values, Tensor sp_shape, Tensor dense, string name = "SparseDenseCwiseDiv") + { + var dict = new Dictionary(); + dict["sp_indices"] = sp_indices; + dict["sp_values"] = sp_values; + dict["sp_shape"] = sp_shape; + dict["dense"] = dense; + var op = _op_def_lib._apply_op_helper("SparseDenseCwiseDiv", name: name, keywords: dict); + return op.output; + } + + /// + /// Component-wise multiplies a SparseTensor by a dense Tensor. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// + /// + /// 1-D. N non-empty values corresponding to sp_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// R-D. The dense Tensor operand. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseDenseCwiseMul'. + /// + /// + /// 1-D. The N values that are operated on. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The output locations corresponding to the implicitly zero elements in the sparse + /// tensor will be zero (i.e., will not take up storage space), regardless of the + /// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). + /// + /// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not + /// the other direction. + /// + public static Tensor sparse_dense_cwise_mul (Tensor sp_indices, Tensor sp_values, Tensor sp_shape, Tensor dense, string name = "SparseDenseCwiseMul") + { + var dict = new Dictionary(); + dict["sp_indices"] = sp_indices; + dict["sp_values"] = sp_values; + dict["sp_shape"] = sp_shape; + dict["dense"] = dense; + var op = _op_def_lib._apply_op_helper("SparseDenseCwiseMul", name: name, keywords: dict); + return op.output; + } + + /// + /// Fills empty rows in the input 2-D SparseTensor with a default value. + /// + /// + /// 2-D. the indices of the sparse tensor. + /// + /// + /// 1-D. the values of the sparse tensor. + /// + /// + /// 1-D. the shape of the sparse tensor. + /// + /// + /// 0-D. default value to insert into location [row, 0, ..., 0] + /// for rows missing from the input sparse tensor. + /// output indices: 2-D. the indices of the filled sparse tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseFillEmptyRows'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : + /// output_values : 1-D. the values of the filled sparse tensor. + /// empty_row_indicator : 1-D. whether the dense row was missing in the + /// input sparse tensor. + /// reverse_index_map : 1-D. a map from the input indices to the output indices. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The input SparseTensor is represented via the tuple of inputs + /// (indices, values, dense_shape). The output SparseTensor has the + /// same dense_shape but with indices output_indices and values + /// output_values. + /// + /// This op inserts a single entry for every row that doesn't have any values. + /// The index is created as [row, 0, ..., 0] and the inserted value + /// is default_value. + /// + /// For example, suppose sp_input has shape [5, 6] and non-empty values: + /// + /// [0, 1]: a + /// [0, 3]: b + /// [2, 0]: c + /// [3, 1]: d + /// + /// Rows 1 and 4 are empty, so the output will be of shape [5, 6] with values: + /// + /// [0, 1]: a + /// [0, 3]: b + /// [1, 0]: default_value + /// [2, 0]: c + /// [3, 1]: d + /// [4, 0]: default_value + /// + /// The output SparseTensor will be in row-major order and will have the + /// same shape as the input. + /// + /// This op also returns an indicator vector shaped [dense_shape[0]] such that + /// + /// empty_row_indicator[i] = True iff row i was an empty row. + /// + /// And a reverse index map vector shaped [indices.shape[0]] that is used during + /// backpropagation, + /// + /// reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] + /// + public static (Tensor output_indices, Tensor output_values, Tensor empty_row_indicator, Tensor reverse_index_map) sparse_fill_empty_rows (Tensor indices, Tensor values, Tensor dense_shape, Tensor default_value, string name = "SparseFillEmptyRows") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["values"] = values; + dict["dense_shape"] = dense_shape; + dict["default_value"] = default_value; + var op = _op_def_lib._apply_op_helper("SparseFillEmptyRows", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + var empty_row_indicator = op.outputs[_idx++]; + var reverse_index_map = op.outputs[_idx++]; + return (output_indices, output_values, empty_row_indicator, reverse_index_map); + } + + /// + /// The gradient of SparseFillEmptyRows. + /// + /// + /// 1-D. The reverse index map from SparseFillEmptyRows. + /// + /// + /// 1-D. The gradients from backprop. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseFillEmptyRowsGrad'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// d_values : 1-D. The backprop into values. + /// d_default_value : 0-D. The backprop into default_value. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Takes vectors reverse_index_map, shaped [N], and grad_values, + /// shaped [N_full], where N_full >= N and copies data into either + /// d_values or d_default_value. Here d_values is shaped [N] and + /// d_default_value is a scalar. + /// + /// d_values[j] = grad_values[reverse_index_map[j]] + /// d_default_value = sum_{k : 0 .. N_full - 1} ( + /// grad_values[k] * 1{k not in reverse_index_map}) + /// + public static (Tensor d_values, Tensor d_default_value) sparse_fill_empty_rows_grad (Tensor reverse_index_map, Tensor grad_values, string name = "SparseFillEmptyRowsGrad") + { + var dict = new Dictionary(); + dict["reverse_index_map"] = reverse_index_map; + dict["grad_values"] = grad_values; + var op = _op_def_lib._apply_op_helper("SparseFillEmptyRowsGrad", name: name, keywords: dict); + int _idx = 0; + var d_values = op.outputs[_idx++]; + var d_default_value = op.outputs[_idx++]; + return (d_values, d_default_value); + } + + /// + /// Multiply matrix "a" by matrix "b". + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseMatMul'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of "a" must + /// match the outer dimension of "b". Both "a" and "b" must be Tensors not + /// SparseTensors. This op is optimized for the case where at least one of "a" or + /// "b" is sparse, in the sense that they have a large proportion of zero values. + /// The breakeven for using this versus a dense matrix multiply on one platform was + /// 30% zero values in the sparse matrix. + /// + /// The gradient computation of this operation will only take advantage of sparsity + /// in the input gradient when that gradient comes from a Relu. + /// + public static Tensor sparse_mat_mul (Tensor a, Tensor b, bool? transpose_a = null, bool? transpose_b = null, bool? a_is_sparse = null, bool? b_is_sparse = null, string name = "SparseMatMul") + { + var dict = new Dictionary(); + dict["a"] = a; + dict["b"] = b; + if (transpose_a.HasValue) + dict["transpose_a"] = transpose_a.Value; + if (transpose_b.HasValue) + dict["transpose_b"] = transpose_b.Value; + if (a_is_sparse.HasValue) + dict["a_is_sparse"] = a_is_sparse.Value; + if (b_is_sparse.HasValue) + dict["b_is_sparse"] = b_is_sparse.Value; + var op = _op_def_lib._apply_op_helper("SparseMatMul", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the max of elements across dimensions of a SparseTensor. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// + /// + /// 1-D. N non-empty values corresponding to input_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// 1-D. Length-K vector containing the reduction axes. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReduceMax'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// R-K-D. The reduced Tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This Op takes a SparseTensor and is the sparse counterpart to + /// tf.reduce_max(). In particular, this Op also returns a dense Tensor + /// instead of a sparse one. + /// + /// Reduces sp_input along the dimensions given in reduction_axes. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// reduction_axes. If keep_dims is true, the reduced dimensions are retained + /// with length 1. + /// + /// If reduction_axes has no entries, all dimensions are reduced, and a tensor + /// with a single element is returned. Additionally, the axes can be negative, + /// which are interpreted according to the indexing rules in Python. + /// + public static Tensor sparse_reduce_max (Tensor input_indices, Tensor input_values, Tensor input_shape, Tensor reduction_axes, bool? keep_dims = null, string name = "SparseReduceMax") + { + var dict = new Dictionary(); + dict["input_indices"] = input_indices; + dict["input_values"] = input_values; + dict["input_shape"] = input_shape; + dict["reduction_axes"] = reduction_axes; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("SparseReduceMax", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the max of elements across dimensions of a SparseTensor. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// + /// + /// 1-D. N non-empty values corresponding to input_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// 1-D. Length-K vector containing the reduction axes. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReduceMaxSparse'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : + /// output_values : + /// output_shape : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This Op takes a SparseTensor and is the sparse counterpart to + /// tf.reduce_max(). In contrast to SparseReduceMax, this Op returns a + /// SparseTensor. + /// + /// Reduces sp_input along the dimensions given in reduction_axes. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// reduction_axes. If keep_dims is true, the reduced dimensions are retained + /// with length 1. + /// + /// If reduction_axes has no entries, all dimensions are reduced, and a tensor + /// with a single element is returned. Additionally, the axes can be negative, + /// which are interpreted according to the indexing rules in Python. + /// + public static (Tensor output_indices, Tensor output_values, Tensor output_shape) sparse_reduce_max_sparse (Tensor input_indices, Tensor input_values, Tensor input_shape, Tensor reduction_axes, bool? keep_dims = null, string name = "SparseReduceMaxSparse") + { + var dict = new Dictionary(); + dict["input_indices"] = input_indices; + dict["input_values"] = input_values; + dict["input_shape"] = input_shape; + dict["reduction_axes"] = reduction_axes; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("SparseReduceMaxSparse", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + var output_shape = op.outputs[_idx++]; + return (output_indices, output_values, output_shape); + } + + /// + /// Computes the sum of elements across dimensions of a SparseTensor. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// + /// + /// 1-D. N non-empty values corresponding to input_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// 1-D. Length-K vector containing the reduction axes. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReduceSum'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// R-K-D. The reduced Tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This Op takes a SparseTensor and is the sparse counterpart to + /// tf.reduce_sum(). In particular, this Op also returns a dense Tensor + /// instead of a sparse one. + /// + /// Reduces sp_input along the dimensions given in reduction_axes. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// reduction_axes. If keep_dims is true, the reduced dimensions are retained + /// with length 1. + /// + /// If reduction_axes has no entries, all dimensions are reduced, and a tensor + /// with a single element is returned. Additionally, the axes can be negative, + /// which are interpreted according to the indexing rules in Python. + /// + public static Tensor sparse_reduce_sum (Tensor input_indices, Tensor input_values, Tensor input_shape, Tensor reduction_axes, bool? keep_dims = null, string name = "SparseReduceSum") + { + var dict = new Dictionary(); + dict["input_indices"] = input_indices; + dict["input_values"] = input_values; + dict["input_shape"] = input_shape; + dict["reduction_axes"] = reduction_axes; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("SparseReduceSum", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sum of elements across dimensions of a SparseTensor. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// + /// + /// 1-D. N non-empty values corresponding to input_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// 1-D. Length-K vector containing the reduction axes. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReduceSumSparse'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : + /// output_values : + /// output_shape : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This Op takes a SparseTensor and is the sparse counterpart to + /// tf.reduce_sum(). In contrast to SparseReduceSum, this Op returns a + /// SparseTensor. + /// + /// Reduces sp_input along the dimensions given in reduction_axes. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// reduction_axes. If keep_dims is true, the reduced dimensions are retained + /// with length 1. + /// + /// If reduction_axes has no entries, all dimensions are reduced, and a tensor + /// with a single element is returned. Additionally, the axes can be negative, + /// which are interpreted according to the indexing rules in Python. + /// + public static (Tensor output_indices, Tensor output_values, Tensor output_shape) sparse_reduce_sum_sparse (Tensor input_indices, Tensor input_values, Tensor input_shape, Tensor reduction_axes, bool? keep_dims = null, string name = "SparseReduceSumSparse") + { + var dict = new Dictionary(); + dict["input_indices"] = input_indices; + dict["input_values"] = input_values; + dict["input_shape"] = input_shape; + dict["reduction_axes"] = reduction_axes; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("SparseReduceSumSparse", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + var output_shape = op.outputs[_idx++]; + return (output_indices, output_values, output_shape); + } + + /// + /// Reorders a SparseTensor into the canonical, row-major ordering. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// + /// + /// 1-D. N non-empty values corresponding to input_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReorder'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : 2-D. N x R matrix with the same indices as input_indices, but + /// in canonical row-major ordering. + /// output_values : 1-D. N non-empty values corresponding to output_indices. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Note that by convention, all sparse ops preserve the canonical ordering along + /// increasing dimension number. The only time ordering can be violated is during + /// manual manipulation of the indices and values vectors to add entries. + /// + /// Reordering does not affect the shape of the SparseTensor. + /// + /// If the tensor has rank R and N non-empty values, input_indices has + /// shape [N, R], input_values has length N, and input_shape has length R. + /// + public static (Tensor output_indices, Tensor output_values) sparse_reorder (Tensor input_indices, Tensor input_values, Tensor input_shape, string name = "SparseReorder") + { + var dict = new Dictionary(); + dict["input_indices"] = input_indices; + dict["input_values"] = input_values; + dict["input_shape"] = input_shape; + var op = _op_def_lib._apply_op_helper("SparseReorder", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + return (output_indices, output_values); + } + + /// + /// Reshapes a SparseTensor to represent values in a new dense shape. + /// + /// + /// 2-D. N x R_in matrix with the indices of non-empty values in a + /// SparseTensor. + /// + /// + /// 1-D. R_in vector with the input SparseTensor's dense shape. + /// + /// + /// 1-D. R_out vector with the requested new dense shape. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReshape'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : 2-D. N x R_out matrix with the updated indices of non-empty + /// values in the output SparseTensor. + /// output_shape : 1-D. R_out vector with the full dense shape of the output + /// SparseTensor. This is the same as new_shape but with any -1 dimensions + /// filled in. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This operation has the same semantics as reshape on the represented dense + /// tensor. The input_indices are recomputed based on the requested new_shape. + /// + /// If one component of new_shape is the special value -1, the size of that + /// dimension is computed so that the total dense size remains constant. At + /// most one component of new_shape can be -1. The number of dense elements + /// implied by new_shape must be the same as the number of dense elements + /// originally implied by input_shape. + /// + /// Reshaping does not affect the order of values in the SparseTensor. + /// + /// If the input tensor has rank R_in and N non-empty values, and new_shape + /// has length R_out, then input_indices has shape [N, R_in], + /// input_shape has length R_in, output_indices has shape [N, R_out], and + /// output_shape has length R_out. + /// + public static (Tensor output_indices, Tensor output_shape) sparse_reshape (Tensor input_indices, Tensor input_shape, Tensor new_shape, string name = "SparseReshape") + { + var dict = new Dictionary(); + dict["input_indices"] = input_indices; + dict["input_shape"] = input_shape; + dict["new_shape"] = new_shape; + var op = _op_def_lib._apply_op_helper("SparseReshape", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_shape = op.outputs[_idx++]; + return (output_indices, output_shape); + } + + /// + /// Computes the mean along sparse segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor. Has same rank as segment_ids. + /// + /// + /// A 1-D tensor. Values should be sorted and can be repeated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentMean'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// Like SegmentMean, but segment_ids can have rank less than data's first + /// dimension, selecting a subset of dimension 0, specified by indices. + /// + public static Tensor sparse_segment_mean (Tensor data, Tensor indices, Tensor segment_ids, string name = "SparseSegmentMean") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["indices"] = indices; + dict["segment_ids"] = segment_ids; + var op = _op_def_lib._apply_op_helper("SparseSegmentMean", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients for SparseSegmentMean. + /// + /// + /// gradient propagated to the SparseSegmentMean op. + /// + /// + /// indices passed to the corresponding SparseSegmentMean op. + /// + /// + /// segment_ids passed to the corresponding SparseSegmentMean op. + /// + /// + /// dimension 0 of "data" passed to SparseSegmentMean op. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentMeanGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Returns tensor "output" with same shape as grad, except for dimension 0 whose + /// value is output_dim0. + /// + public static Tensor sparse_segment_mean_grad (Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string name = "SparseSegmentMeanGrad") + { + var dict = new Dictionary(); + dict["grad"] = grad; + dict["indices"] = indices; + dict["segment_ids"] = segment_ids; + dict["output_dim0"] = output_dim0; + var op = _op_def_lib._apply_op_helper("SparseSegmentMeanGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the mean along sparse segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor. Has same rank as segment_ids. + /// + /// + /// A 1-D tensor. Values should be sorted and can be repeated. + /// + /// + /// Should equal the number of distinct segment IDs. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentMeanWithNumSegments'. + /// + /// + /// Has same shape as data, except for dimension 0 which has size + /// num_segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Like SparseSegmentMean, but allows missing ids in segment_ids. If an id is + /// misisng, the output tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + public static Tensor sparse_segment_mean_with_num_segments (Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string name = "SparseSegmentMeanWithNumSegments") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["indices"] = indices; + dict["segment_ids"] = segment_ids; + dict["num_segments"] = num_segments; + var op = _op_def_lib._apply_op_helper("SparseSegmentMeanWithNumSegments", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. + /// + /// + /// + /// + /// A 1-D tensor. Has same rank as segment_ids. + /// + /// + /// A 1-D tensor. Values should be sorted and can be repeated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSqrtN'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// N is the size of the segment being reduced. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + public static Tensor sparse_segment_sqrt_n (Tensor data, Tensor indices, Tensor segment_ids, string name = "SparseSegmentSqrtN") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["indices"] = indices; + dict["segment_ids"] = segment_ids; + var op = _op_def_lib._apply_op_helper("SparseSegmentSqrtN", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes gradients for SparseSegmentSqrtN. + /// + /// + /// gradient propagated to the SparseSegmentSqrtN op. + /// + /// + /// indices passed to the corresponding SparseSegmentSqrtN op. + /// + /// + /// segment_ids passed to the corresponding SparseSegmentSqrtN op. + /// + /// + /// dimension 0 of "data" passed to SparseSegmentSqrtN op. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSqrtNGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Returns tensor "output" with same shape as grad, except for dimension 0 whose + /// value is output_dim0. + /// + public static Tensor sparse_segment_sqrt_n_grad (Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string name = "SparseSegmentSqrtNGrad") + { + var dict = new Dictionary(); + dict["grad"] = grad; + dict["indices"] = indices; + dict["segment_ids"] = segment_ids; + dict["output_dim0"] = output_dim0; + var op = _op_def_lib._apply_op_helper("SparseSegmentSqrtNGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. + /// + /// + /// + /// + /// A 1-D tensor. Has same rank as segment_ids. + /// + /// + /// A 1-D tensor. Values should be sorted and can be repeated. + /// + /// + /// Should equal the number of distinct segment IDs. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSqrtNWithNumSegments'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// N is the size of the segment being reduced. + /// + /// Like SparseSegmentSqrtN, but allows missing ids in segment_ids. If an id is + /// misisng, the output tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + public static Tensor sparse_segment_sqrt_n_with_num_segments (Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string name = "SparseSegmentSqrtNWithNumSegments") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["indices"] = indices; + dict["segment_ids"] = segment_ids; + dict["num_segments"] = num_segments; + var op = _op_def_lib._apply_op_helper("SparseSegmentSqrtNWithNumSegments", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sum along sparse segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor. Has same rank as segment_ids. + /// + /// + /// A 1-D tensor. Values should be sorted and can be repeated. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSum'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size k, the number of segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// Like SegmentSum, but segment_ids can have rank less than data's first + /// dimension, selecting a subset of dimension 0, specified by indices. + /// + /// For example: + /// + /// + /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + /// + /// # Select two rows, one segment. + /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + /// # => [[0 0 0 0]] + /// + /// # Select two rows, two segment. + /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + /// # => [[ 1 2 3 4] + /// # [-1 -2 -3 -4]] + /// + /// # Select all rows, two segments. + /// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + /// # => [[0 0 0 0] + /// # [5 6 7 8]] + /// + /// # Which is equivalent to: + /// tf.segment_sum(c, tf.constant([0, 0, 1])) + /// + /// + public static Tensor sparse_segment_sum (Tensor data, Tensor indices, Tensor segment_ids, string name = "SparseSegmentSum") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["indices"] = indices; + dict["segment_ids"] = segment_ids; + var op = _op_def_lib._apply_op_helper("SparseSegmentSum", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sum along sparse segments of a tensor. + /// + /// + /// + /// + /// A 1-D tensor. Has same rank as segment_ids. + /// + /// + /// A 1-D tensor. Values should be sorted and can be repeated. + /// + /// + /// Should equal the number of distinct segment IDs. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSumWithNumSegments'. + /// + /// + /// Has same shape as data, except for dimension 0 which + /// has size num_segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Like SparseSegmentSum, but allows missing ids in segment_ids. If an id is + /// misisng, the output tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// For example: + /// + /// + /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + /// + /// tf.sparse_segment_sum_with_num_segments( + /// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + /// # => [[0 0 0 0] + /// # [0 0 0 0] + /// # [0 0 0 0]] + /// + /// tf.sparse_segment_sum_with_num_segments(c, + /// tf.constant([0, 1]), + /// tf.constant([0, 2], + /// num_segments=4)) + /// # => [[ 1 2 3 4] + /// # [ 0 0 0 0] + /// # [-1 -2 -3 -4] + /// # [ 0 0 0 0]] + /// + /// + public static Tensor sparse_segment_sum_with_num_segments (Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string name = "SparseSegmentSumWithNumSegments") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["indices"] = indices; + dict["segment_ids"] = segment_ids; + dict["num_segments"] = num_segments; + var op = _op_def_lib._apply_op_helper("SparseSegmentSumWithNumSegments", name: name, keywords: dict); + return op.output; + } + + /// + /// Slice a SparseTensor based on the start and size. + /// + /// + /// 2-D tensor represents the indices of the sparse tensor. + /// + /// + /// 1-D tensor represents the values of the sparse tensor. + /// + /// + /// 1-D. tensor represents the shape of the sparse tensor. + /// + /// + /// 1-D. tensor represents the start of the slice. + /// + /// + /// 1-D. tensor represents the size of the slice. + /// output indices: A list of 1-D tensors represents the indices of the output + /// sparse tensors. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSlice'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : + /// output_values : A list of 1-D tensors represents the values of the output sparse + /// tensors. + /// output_shape : A list of 1-D tensors represents the shape of the output sparse + /// tensors. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// For example, if the input is + /// + /// input_tensor = shape = [2, 7] + /// [ a d e ] + /// [b c ] + /// + /// Graphically the output tensors are: + /// + /// sparse_slice([0, 0], [2, 4]) = shape = [2, 4] + /// [ a ] + /// [b c ] + /// + /// sparse_slice([0, 4], [2, 3]) = shape = [2, 3] + /// [ d e ] + /// [ ] + /// + public static (Tensor output_indices, Tensor output_values, Tensor output_shape) sparse_slice (Tensor indices, Tensor values, Tensor shape, Tensor start, Tensor size, string name = "SparseSlice") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["values"] = values; + dict["shape"] = shape; + dict["start"] = start; + dict["size"] = size; + var op = _op_def_lib._apply_op_helper("SparseSlice", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + var output_shape = op.outputs[_idx++]; + return (output_indices, output_values, output_shape); + } + + /// + /// The gradient operator for the SparseSlice op. + /// + /// + /// 1-D. The gradient with respect to + /// the non-empty values of the sliced SparseTensor. + /// + /// + /// 2-D. The indices of the input SparseTensor. + /// + /// + /// 1-D. tensor represents the start of the slice. + /// + /// + /// 2-D. The indices of the sliced SparseTensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSliceGrad'. + /// + /// + /// 1-D. The gradient with respect to the non-empty values of input SparseTensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op takes in the upstream gradient w.r.t. non-empty values of + /// the sliced SparseTensor, and outputs the gradients w.r.t. + /// the non-empty values of input SparseTensor. + /// + public static Tensor sparse_slice_grad (Tensor backprop_val_grad, Tensor input_indices, Tensor input_start, Tensor output_indices, string name = "SparseSliceGrad") + { + var dict = new Dictionary(); + dict["backprop_val_grad"] = backprop_val_grad; + dict["input_indices"] = input_indices; + dict["input_start"] = input_start; + dict["output_indices"] = output_indices; + var op = _op_def_lib._apply_op_helper("SparseSliceGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies softmax to a batched N-D SparseTensor. + /// + /// + /// 2-D. NNZ x R matrix with the indices of non-empty values in a + /// SparseTensor, in canonical ordering. + /// + /// + /// 1-D. NNZ non-empty values corresponding to sp_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmax'. + /// + /// + /// 1-D. The NNZ values for the result SparseTensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The inputs represent an N-D SparseTensor with logical shape [..., B, C] + /// (where N >= 2), and with indices sorted in the canonical lexicographic order. + /// + /// This op is equivalent to applying the normal tf.nn.softmax() to each innermost + /// logical submatrix with shape [B, C], but with the catch that *the implicitly + /// zero elements do not participate*. Specifically, the algorithm is equivalent + /// to the following: + /// + /// (1) Applies tf.nn.softmax() to a densified view of each innermost submatrix + /// with shape [B, C], along the size-C dimension; + /// (2) Masks out the original implicitly-zero locations; + /// (3) Renormalizes the remaining elements. + /// + /// Hence, the SparseTensor result has exactly the same non-zero indices and + /// shape. + /// + public static Tensor sparse_softmax (Tensor sp_indices, Tensor sp_values, Tensor sp_shape, string name = "SparseSoftmax") + { + var dict = new Dictionary(); + dict["sp_indices"] = sp_indices; + dict["sp_values"] = sp_values; + dict["sp_shape"] = sp_shape; + var op = _op_def_lib._apply_op_helper("SparseSoftmax", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// + /// batch_size x num_classes matrix + /// + /// + /// batch_size vector with values in [0, num_classes). + /// This is the label for the given minibatch entry. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// loss : Per example loss (batch_size vector). + /// backprop : backpropagated gradients (batch_size x num_classes matrix). + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept + /// a matrix of label probabilities, but rather a single label per row + /// of features. This label is considered to have probability 1.0 for the + /// given row. + /// + /// Inputs are the logits, not probabilities. + /// + public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits (Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") + { + var dict = new Dictionary(); + dict["features"] = features; + dict["labels"] = labels; + var op = _op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, keywords: dict); + int _idx = 0; + var loss = op.outputs[_idx++]; + var backprop = op.outputs[_idx++]; + return (loss, backprop); + } + + /// + /// Returns the element-wise max of two SparseTensors. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, in the canonical lexicographic ordering. + /// + /// + /// 1-D. N non-empty values corresponding to a_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// counterpart to a_indices for the other operand. + /// + /// + /// counterpart to a_values for the other operand; must be of the same dtype. + /// + /// + /// counterpart to a_shape for the other operand; the two shapes must be equal. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSparseMaximum'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : 2-D. The indices of the output SparseTensor. + /// output_values : 1-D. The values of the output SparseTensor. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Assumes the two SparseTensors have the same shape, i.e., no broadcasting. + /// + public static (Tensor output_indices, Tensor output_values) sparse_sparse_maximum (Tensor a_indices, Tensor a_values, Tensor a_shape, Tensor b_indices, Tensor b_values, Tensor b_shape, string name = "SparseSparseMaximum") + { + var dict = new Dictionary(); + dict["a_indices"] = a_indices; + dict["a_values"] = a_values; + dict["a_shape"] = a_shape; + dict["b_indices"] = b_indices; + dict["b_values"] = b_values; + dict["b_shape"] = b_shape; + var op = _op_def_lib._apply_op_helper("SparseSparseMaximum", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + return (output_indices, output_values); + } + + /// + /// Returns the element-wise min of two SparseTensors. + /// + /// + /// 2-D. N x R matrix with the indices of non-empty values in a + /// SparseTensor, in the canonical lexicographic ordering. + /// + /// + /// 1-D. N non-empty values corresponding to a_indices. + /// + /// + /// 1-D. Shape of the input SparseTensor. + /// + /// + /// counterpart to a_indices for the other operand. + /// + /// + /// counterpart to a_values for the other operand; must be of the same dtype. + /// + /// + /// counterpart to a_shape for the other operand; the two shapes must be equal. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSparseMinimum'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : 2-D. The indices of the output SparseTensor. + /// output_values : 1-D. The values of the output SparseTensor. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Assumes the two SparseTensors have the same shape, i.e., no broadcasting. + /// + public static (Tensor output_indices, Tensor output_values) sparse_sparse_minimum (Tensor a_indices, Tensor a_values, Tensor a_shape, Tensor b_indices, Tensor b_values, Tensor b_shape, string name = "SparseSparseMinimum") + { + var dict = new Dictionary(); + dict["a_indices"] = a_indices; + dict["a_values"] = a_values; + dict["a_shape"] = a_shape; + dict["b_indices"] = b_indices; + dict["b_values"] = b_values; + dict["b_shape"] = b_shape; + var op = _op_def_lib._apply_op_helper("SparseSparseMinimum", name: name, keywords: dict); + int _idx = 0; + var output_indices = op.outputs[_idx++]; + var output_values = op.outputs[_idx++]; + return (output_indices, output_values); + } + + /// + /// Split a SparseTensor into num_split tensors along one dimension. + /// + /// + /// 0-D. The dimension along which to split. Must be in the range + /// [0, rank(shape)). + /// + /// + /// 2-D tensor represents the indices of the sparse tensor. + /// + /// + /// 1-D tensor represents the values of the sparse tensor. + /// + /// + /// 1-D. tensor represents the shape of the sparse tensor. + /// output indices: A list of 1-D tensors represents the indices of the output + /// sparse tensors. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSplit'. + /// + /// + /// Optional argument + /// The number of ways to split. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_indices : + /// output_values : A list of 1-D tensors represents the values of the output sparse + /// tensors. + /// output_shape : A list of 1-D tensors represents the shape of the output sparse + /// tensors. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// If the shape[split_dim] is not an integer multiple of num_split. Slices + /// [0 : shape[split_dim] % num_split] gets one extra dimension. + /// For example, if split_dim = 1 and num_split = 2 and the input is + /// + /// input_tensor = shape = [2, 7] + /// [ a d e ] + /// [b c ] + /// + /// Graphically the output tensors are: + /// + /// output_tensor[0] = shape = [2, 4] + /// [ a ] + /// [b c ] + /// + /// output_tensor[1] = shape = [2, 3] + /// [ d e ] + /// [ ] + /// + public static (Tensor[] output_indices, Tensor[] output_values, Tensor[] output_shape) sparse_split (Tensor split_dim, Tensor indices, Tensor values, Tensor shape, int num_split, string name = "SparseSplit") + { + var dict = new Dictionary(); + dict["split_dim"] = split_dim; + dict["indices"] = indices; + dict["values"] = values; + dict["shape"] = shape; + dict["num_split"] = num_split; + var op = _op_def_lib._apply_op_helper("SparseSplit", name: name, keywords: dict); + int _idx = 0; + var output_indices = Enumerable.Range(0, op.OutputListLength("output_indices")).Select(_ => op.outputs[_idx++]).ToArray(); + var output_values = Enumerable.Range(0, op.OutputListLength("output_values")).Select(_ => op.outputs[_idx++]).ToArray(); + var output_shape = Enumerable.Range(0, op.OutputListLength("output_shape")).Select(_ => op.outputs[_idx++]).ToArray(); + return (output_indices, output_values, output_shape); + } + + /// + /// Adds up a SparseTensor and a dense Tensor, producing a dense Tensor. + /// + /// + /// 2-D. The indices of the SparseTensor, with shape [nnz, ndims]. + /// + /// + /// 1-D. The values of the SparseTensor, with shape [nnz]. + /// + /// + /// 1-D. The shape of the SparseTensor, with shape [ndims]. + /// + /// + /// ndims-D Tensor. With shape a_shape. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseTensorDenseAdd'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This Op does not require a_indices be sorted in standard lexicographic order. + /// + public static Tensor sparse_tensor_dense_add (Tensor a_indices, Tensor a_values, Tensor a_shape, Tensor b, string name = "SparseTensorDenseAdd") + { + var dict = new Dictionary(); + dict["a_indices"] = a_indices; + dict["a_values"] = a_values; + dict["a_shape"] = a_shape; + dict["b"] = b; + var op = _op_def_lib._apply_op_helper("SparseTensorDenseAdd", name: name, keywords: dict); + return op.output; + } + + /// + /// Multiply SparseTensor (of rank 2) "A" by dense matrix "B". + /// + /// + /// 2-D. The indices of the SparseTensor, size [nnz, 2] Matrix. + /// + /// + /// 1-D. The values of the SparseTensor, size [nnz] Vector. + /// + /// + /// 1-D. The shape of the SparseTensor, size [2] Vector. + /// + /// + /// 2-D. A dense Matrix. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseTensorDenseMatMul'. + /// + /// + /// Use the adjoint of A in the matrix multiply. If A is complex, this + /// is transpose(conj(A)). Otherwise it's transpose(A). + /// + /// + /// Use the adjoint of B in the matrix multiply. If B is complex, this + /// is transpose(conj(B)). Otherwise it's transpose(B). + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// No validity checking is performed on the indices of A. However, the following + /// input format is recommended for optimal behavior: + /// + /// if adjoint_a == false: + /// A should be sorted in lexicographically increasing order. Use SparseReorder + /// if you're not sure. + /// if adjoint_a == true: + /// A should be sorted in order of increasing dimension 1 (i.e., "column major" + /// order instead of "row major" order). + /// + public static Tensor sparse_tensor_dense_mat_mul (Tensor a_indices, Tensor a_values, Tensor a_shape, Tensor b, bool? adjoint_a = null, bool? adjoint_b = null, string name = "SparseTensorDenseMatMul") + { + var dict = new Dictionary(); + dict["a_indices"] = a_indices; + dict["a_values"] = a_values; + dict["a_shape"] = a_shape; + dict["b"] = b; + if (adjoint_a.HasValue) + dict["adjoint_a"] = adjoint_a.Value; + if (adjoint_b.HasValue) + dict["adjoint_b"] = adjoint_b.Value; + var op = _op_def_lib._apply_op_helper("SparseTensorDenseMatMul", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that splits a SparseTensor into elements row-wise. + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseTensorSliceDataset'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor sparse_tensor_slice_dataset (Tensor indices, Tensor values, Tensor dense_shape, string name = "SparseTensorSliceDataset") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["values"] = values; + dict["dense_shape"] = dense_shape; + var op = _op_def_lib._apply_op_helper("SparseTensorSliceDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts a sparse representation into a dense tensor. + /// + /// + /// 0-D, 1-D, or 2-D. sparse_indices[i] contains the complete + /// index where sparse_values[i] will be placed. + /// + /// + /// 1-D. Shape of the dense output tensor. + /// + /// + /// 1-D. Values corresponding to each row of sparse_indices, + /// or a scalar value to be used for all sparse indices. + /// + /// + /// Scalar value to set for indices not specified in + /// sparse_indices. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseToDense'. + /// + /// + /// If true, indices are checked to make sure they are sorted in + /// lexicographic order and that there are no repeats. + /// + /// + /// Dense output tensor of shape output_shape. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Builds an array dense with shape output_shape such that + /// + /// + /// # If sparse_indices is scalar + /// dense[i] = (i == sparse_indices ? sparse_values : default_value) + /// + /// # If sparse_indices is a vector, then for each i + /// dense[sparse_indices[i]] = sparse_values[i] + /// + /// # If sparse_indices is an n by d matrix, then for each i in [0, n) + /// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] + /// + /// + /// All other values in dense are set to default_value. If sparse_values is a + /// scalar, all sparse indices are set to this single value. + /// + /// Indices should be sorted in lexicographic order, and indices must not + /// contain any repeats. If validate_indices is true, these properties + /// are checked during execution. + /// + public static Tensor sparse_to_dense (Tensor sparse_indices, Tensor output_shape, Tensor sparse_values, Tensor default_value, bool? validate_indices = null, string name = "SparseToDense") + { + var dict = new Dictionary(); + dict["sparse_indices"] = sparse_indices; + dict["output_shape"] = output_shape; + dict["sparse_values"] = sparse_values; + dict["default_value"] = default_value; + if (validate_indices.HasValue) + dict["validate_indices"] = validate_indices.Value; + var op = _op_def_lib._apply_op_helper("SparseToDense", name: name, keywords: dict); + return op.output; + } + + /// + /// Applies set operation along last dimension of 2 SparseTensor inputs. + /// + /// + /// 2D Tensor, indices of a SparseTensor. Must be in row-major + /// order. + /// + /// + /// 1D Tensor, values of a SparseTensor. Must be in row-major + /// order. + /// + /// + /// 1D Tensor, shape of a SparseTensor. set1_shape[0...n-1] must + /// be the same as set2_shape[0...n-1], set1_shape[n] is the + /// max set size across 0...n-1 dimensions. + /// + /// + /// 2D Tensor, indices of a SparseTensor. Must be in row-major + /// order. + /// + /// + /// 1D Tensor, values of a SparseTensor. Must be in row-major + /// order. + /// + /// + /// 1D Tensor, shape of a SparseTensor. set2_shape[0...n-1] must + /// be the same as set1_shape[0...n-1], set2_shape[n] is the + /// max set size across 0...n-1 dimensions. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseToSparseSetOperation'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// result_indices : 2D indices of a SparseTensor. + /// result_values : 1D values of a SparseTensor. + /// result_shape : 1D Tensor shape of a SparseTensor. result_shape[0...n-1] is + /// the same as the 1st n-1 dimensions of set1 and set2, result_shape[n] + /// is the max result set size across all 0...n-1 dimensions. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// See SetOperationOp::SetOperationFromContext for values of set_operation. + /// + /// If validate_indices is True, SparseToSparseSetOperation validates the + /// order and range of set1 and set2 indices. + /// + /// Input set1 is a SparseTensor represented by set1_indices, set1_values, + /// and set1_shape. For set1 ranked n, 1st n-1 dimensions must be the same + /// as set2. Dimension n contains values in a set, duplicates are allowed but + /// ignored. + /// + /// Input set2 is a SparseTensor represented by set2_indices, set2_values, + /// and set2_shape. For set2 ranked n, 1st n-1 dimensions must be the same + /// as set1. Dimension n contains values in a set, duplicates are allowed but + /// ignored. + /// + /// If validate_indices is True, this op validates the order and range of set1 + /// and set2 indices. + /// + /// Output result is a SparseTensor represented by result_indices, + /// result_values, and result_shape. For set1 and set2 ranked n, this + /// has rank n and the same 1st n-1 dimensions as set1 and set2. The nth + /// dimension contains the result of set_operation applied to the corresponding + /// [0...n-1] dimension of set. + /// + public static (Tensor result_indices, Tensor result_values, Tensor result_shape) sparse_to_sparse_set_operation (Tensor set1_indices, Tensor set1_values, Tensor set1_shape, Tensor set2_indices, Tensor set2_values, Tensor set2_shape, string set_operation, bool? validate_indices = null, string name = "SparseToSparseSetOperation") + { + var dict = new Dictionary(); + dict["set1_indices"] = set1_indices; + dict["set1_values"] = set1_values; + dict["set1_shape"] = set1_shape; + dict["set2_indices"] = set2_indices; + dict["set2_values"] = set2_values; + dict["set2_shape"] = set2_shape; + dict["set_operation"] = set_operation; + if (validate_indices.HasValue) + dict["validate_indices"] = validate_indices.Value; + var op = _op_def_lib._apply_op_helper("SparseToSparseSetOperation", name: name, keywords: dict); + int _idx = 0; + var result_indices = op.outputs[_idx++]; + var result_values = op.outputs[_idx++]; + var result_shape = op.outputs[_idx++]; + return (result_indices, result_values, result_shape); + } + + /// + /// Splits a tensor into num_split tensors along one dimension. + /// + /// + /// 0-D. The dimension along which to split. Must be in the range + /// [-rank(value), rank(value)). + /// + /// + /// The tensor to split. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Split'. + /// + /// + /// Optional argument + /// The number of ways to split. Must evenly divide + /// value.shape[split_dim]. + /// + /// + /// They are identically shaped tensors, whose shape matches that of value + /// except along axis, where their sizes are + /// values.shape[split_dim] / num_split. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor[] split (Tensor split_dim, Tensor value, int num_split, string name = "Split") + { + var dict = new Dictionary(); + dict["split_dim"] = split_dim; + dict["value"] = value; + dict["num_split"] = num_split; + var op = _op_def_lib._apply_op_helper("Split", name: name, keywords: dict); + int _idx = 0; + var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray(); + return (output); + } + + /// + /// Splits a tensor into num_split tensors along one dimension. + /// + /// + /// The tensor to split. + /// + /// + /// list containing the sizes of each output tensor along the split + /// dimension. Must sum to the dimension of value along split_dim. + /// Can contain one -1 indicating that dimension is to be inferred. + /// + /// + /// 0-D. The dimension along which to split. Must be in the range + /// [-rank(value), rank(value)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SplitV'. + /// + /// + /// Optional argument + /// + /// + /// Tensors whose shape matches that of value + /// except along axis, where their sizes are + /// size_splits[i]. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor[] split_v (Tensor value, Tensor size_splits, Tensor split_dim, int num_split, string name = "SplitV") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["size_splits"] = size_splits; + dict["split_dim"] = split_dim; + dict["num_split"] = num_split; + var op = _op_def_lib._apply_op_helper("SplitV", name: name, keywords: dict); + int _idx = 0; + var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray(); + return (output); + } + + /// + /// Creates a dataset that executes a SQL query and emits rows of the result set. + /// + /// + /// The database type. Currently, the only supported type is 'sqlite'. + /// + /// + /// A connection string to connect to the database. + /// + /// + /// A SQL query to execute. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SqlDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor sql_dataset (Tensor driver_name, Tensor data_source_name, Tensor query, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "SqlDataset") + { + var dict = new Dictionary(); + dict["driver_name"] = driver_name; + dict["data_source_name"] = data_source_name; + dict["query"] = query; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("SqlDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes square root of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sqrt'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = \sqrt{x} = x^{1/2}\\). + /// + public static Tensor sqrt (Tensor x, string name = "Sqrt") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Sqrt", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient for the sqrt of x wrt its input. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SqrtGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, grad = dy * 0.5 / y, where y = sqrt(x), and dy + /// is the corresponding input gradient. + /// + public static Tensor sqrt_grad (Tensor y, Tensor dy, string name = "SqrtGrad") + { + var dict = new Dictionary(); + dict["y"] = y; + dict["dy"] = dy; + var op = _op_def_lib._apply_op_helper("SqrtGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes square of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Square'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// I.e., \\(y = x * x = x^2\\). + /// + public static Tensor square (Tensor x, string name = "Square") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Square", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns (x - y)(x - y) element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SquaredDifference'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: SquaredDifference supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor squared_difference (Tensor x, Tensor y, string name = "SquaredDifference") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("SquaredDifference", name: name, keywords: dict); + return op.output; + } + + /// + /// Removes dimensions of size 1 from the shape of a tensor. + /// + /// + /// The input to squeeze. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Squeeze'. + /// + /// + /// If specified, only squeezes the dimensions listed. The dimension + /// index starts at 0. It is an error to squeeze a dimension that is not 1. Must + /// be in the range [-rank(input), rank(input)). + /// + /// + /// Contains the same data as input, but has one or more dimensions of + /// size 1 removed. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Given a tensor input, this operation returns a tensor of the same type with + /// all dimensions of size 1 removed. If you don't want to remove all size 1 + /// dimensions, you can remove specific size 1 dimensions by specifying + /// axis. + /// + /// For example: + /// + /// + /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + /// shape(squeeze(t)) ==> [2, 3] + /// + /// + /// Or, to remove specific size 1 dimensions: + /// + /// + /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + /// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + /// + /// + public static Tensor squeeze (Tensor input, int[] squeeze_dims = null, string name = "Squeeze") + { + var dict = new Dictionary(); + dict["input"] = input; + if (squeeze_dims != null) + dict["squeeze_dims"] = squeeze_dims; + var op = _op_def_lib._apply_op_helper("Squeeze", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated, use StackV2. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Stack'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stack (TF_DataType elem_type, string stack_name = null, string name = "Stack") + { + var dict = new Dictionary(); + dict["elem_type"] = elem_type; + if (stack_name != null) + dict["stack_name"] = stack_name; + var op = _op_def_lib._apply_op_helper("Stack", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated, use StackCloseV2. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackClose'. + /// + /// + /// Returns the description of the operation + /// + public static Operation stack_close (Tensor handle, string name = "StackClose") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("StackClose", name: name, keywords: dict); + return op; + } + + /// + /// Delete the stack from its resource container. + /// + /// + /// The handle to a stack. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackCloseV2'. + /// + /// + /// Returns the description of the operation + /// + public static Operation stack_close_v2 (Tensor handle, string name = "StackCloseV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("StackCloseV2", name: name, keywords: dict); + return op; + } + + /// + /// Deprecated, use StackPopV2. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackPop'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stack_pop (Tensor handle, TF_DataType elem_type, string name = "StackPop") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["elem_type"] = elem_type; + var op = _op_def_lib._apply_op_helper("StackPop", name: name, keywords: dict); + return op.output; + } + + /// + /// Pop the element at the top of the stack. + /// + /// + /// The handle to a stack. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackPopV2'. + /// + /// + /// Optional argument + /// The type of the elem that is popped. + /// + /// + /// The tensor that is popped from the top of the stack. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stack_pop_v2 (Tensor handle, TF_DataType elem_type, string name = "StackPopV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["elem_type"] = elem_type; + var op = _op_def_lib._apply_op_helper("StackPopV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated, use StackPushV2. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackPush'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stack_push (Tensor handle, Tensor elem, bool? swap_memory = null, string name = "StackPush") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["elem"] = elem; + if (swap_memory.HasValue) + dict["swap_memory"] = swap_memory.Value; + var op = _op_def_lib._apply_op_helper("StackPush", name: name, keywords: dict); + return op.output; + } + + /// + /// Push an element onto the stack. + /// + /// + /// The handle to a stack. + /// + /// + /// The tensor to be pushed onto the stack. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackPushV2'. + /// + /// + /// Swap elem to CPU. Default to false. + /// + /// + /// The same tensor as the input 'elem'. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stack_push_v2 (Tensor handle, Tensor elem, bool? swap_memory = null, string name = "StackPushV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["elem"] = elem; + if (swap_memory.HasValue) + dict["swap_memory"] = swap_memory.Value; + var op = _op_def_lib._apply_op_helper("StackPushV2", name: name, keywords: dict); + return op.output; + } + + /// + /// A stack that produces elements in first-in last-out order. + /// + /// + /// The maximum size of the stack if non-negative. If negative, the stack + /// size is unlimited. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackV2'. + /// + /// + /// Optional argument + /// The type of the elements on the stack. + /// + /// + /// Overrides the name used for the temporary stack resource. Default + /// value is the name of the 'Stack' op (which is guaranteed unique). + /// + /// + /// The handle to the stack. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stack_v2 (Tensor max_size, TF_DataType elem_type, string stack_name = null, string name = "StackV2") + { + var dict = new Dictionary(); + dict["max_size"] = max_size; + dict["elem_type"] = elem_type; + if (stack_name != null) + dict["stack_name"] = stack_name; + var op = _op_def_lib._apply_op_helper("StackV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Stage values similar to a lightweight Enqueue. + /// + /// + /// a list of tensors + /// dtypes A list of data types that inserted values should adhere to. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Stage'. + /// + /// + /// Maximum number of elements in the Staging Area. If > 0, inserts + /// on the container will block when the capacity is reached. + /// + /// + /// The maximum number of bytes allowed for Tensors in the Staging Area. + /// If > 0, inserts will block until sufficient space is available. + /// + /// + /// If non-empty, this queue is placed in the given container. Otherwise, + /// a default container is used. + /// + /// + /// It is necessary to match this name to the matching Unstage Op. + /// + /// + /// Returns the description of the operation + /// + /// + /// The basic functionality of this Op is similar to a queue with many + /// fewer capabilities and options. This Op is optimized for performance. + /// + public static Operation stage (Tensor[] values, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "Stage") + { + var dict = new Dictionary(); + dict["values"] = values; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("Stage", name: name, keywords: dict); + return op; + } + + /// + /// Op removes all elements in the underlying container. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StageClear'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Returns the description of the operation + /// + public static Operation stage_clear (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "StageClear") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("StageClear", name: name, keywords: dict); + return op; + } + + /// + /// Op peeks at the values at the specified index. If the + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StagePeek'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// underlying container does not contain sufficient elements + /// this op will block until it does. This Op is optimized for + /// performance. + /// + public static Tensor[] stage_peek (Tensor index, TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "StagePeek") + { + var dict = new Dictionary(); + dict["index"] = index; + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("StagePeek", name: name, keywords: dict); + int _idx = 0; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (values); + } + + /// + /// Op returns the number of elements in the underlying container. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StageSize'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stage_size (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "StageSize") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("StageSize", name: name, keywords: dict); + return op.output; + } + + /// + /// Draws samples from a multinomial distribution. + /// + /// + /// 2-D Tensor with shape [batch_size, num_classes]. Each slice [i, :] + /// represents the unnormalized log probabilities for all classes. + /// + /// + /// 0-D. Number of independent samples to draw for each row slice. + /// + /// + /// 2 seeds (shape [2]). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatelessMultinomial'. + /// + /// + /// + /// + /// 2-D Tensor with shape [batch_size, num_samples]. Each slice [i, :] + /// contains the drawn class labels with range [0, num_classes). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stateless_multinomial (Tensor logits, Tensor num_samples, Tensor seed, TF_DataType? output_dtype = null, string name = "StatelessMultinomial") + { + var dict = new Dictionary(); + dict["logits"] = logits; + dict["num_samples"] = num_samples; + dict["seed"] = seed; + if (output_dtype.HasValue) + dict["output_dtype"] = output_dtype.Value; + var op = _op_def_lib._apply_op_helper("StatelessMultinomial", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs deterministic pseudorandom values from a normal distribution. + /// + /// + /// The shape of the output tensor. + /// + /// + /// 2 seeds (shape [2]). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatelessRandomNormal'. + /// + /// + /// The type of the output. + /// + /// + /// Random values with specified shape. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The generated values will have mean 0 and standard deviation 1. + /// + /// The outputs are a deterministic function of shape and seed. + /// + public static Tensor stateless_random_normal (Tensor shape, Tensor seed, TF_DataType? dtype = null, string name = "StatelessRandomNormal") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["seed"] = seed; + if (dtype.HasValue) + dict["dtype"] = dtype.Value; + var op = _op_def_lib._apply_op_helper("StatelessRandomNormal", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs deterministic pseudorandom random values from a uniform distribution. + /// + /// + /// The shape of the output tensor. + /// + /// + /// 2 seeds (shape [2]). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatelessRandomUniform'. + /// + /// + /// The type of the output. + /// + /// + /// Random values with specified shape. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The generated values follow a uniform distribution in the range [0, 1). The + /// lower bound 0 is included in the range, while the upper bound 1 is excluded. + /// + /// The outputs are a deterministic function of shape and seed. + /// + public static Tensor stateless_random_uniform (Tensor shape, Tensor seed, TF_DataType? dtype = null, string name = "StatelessRandomUniform") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["seed"] = seed; + if (dtype.HasValue) + dict["dtype"] = dtype.Value; + var op = _op_def_lib._apply_op_helper("StatelessRandomUniform", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs deterministic pseudorandom values from a truncated normal distribution. + /// + /// + /// The shape of the output tensor. + /// + /// + /// 2 seeds (shape [2]). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatelessTruncatedNormal'. + /// + /// + /// The type of the output. + /// + /// + /// Random values with specified shape. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The generated values follow a normal distribution with mean 0 and standard + /// deviation 1, except that values whose magnitude is more than 2 standard + /// deviations from the mean are dropped and re-picked. + /// + /// The outputs are a deterministic function of shape and seed. + /// + public static Tensor stateless_truncated_normal (Tensor shape, Tensor seed, TF_DataType? dtype = null, string name = "StatelessTruncatedNormal") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["seed"] = seed; + if (dtype.HasValue) + dict["dtype"] = dtype.Value; + var op = _op_def_lib._apply_op_helper("StatelessTruncatedNormal", name: name, keywords: dict); + return op.output; + } + + /// + /// Replaces the match of pattern in input with rewrite. + /// + /// + /// The text to be processed. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StaticRegexReplace'. + /// + /// + /// Optional argument + /// The regular expression to match the input. + /// + /// + /// Optional argument + /// The rewrite to be applied to the matched expresion. + /// + /// + /// If True, the replacement is global, otherwise the replacement + /// is done only on the first match. + /// + /// + /// The text after applying pattern and rewrite. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + /// + public static Tensor static_regex_replace (Tensor input, string pattern, string rewrite, bool? replace_global = null, string name = "StaticRegexReplace") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["pattern"] = pattern; + dict["rewrite"] = rewrite; + if (replace_global.HasValue) + dict["replace_global"] = replace_global.Value; + var op = _op_def_lib._apply_op_helper("StaticRegexReplace", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a statistics manager resource. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatsAggregatorHandle'. + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stats_aggregator_handle (string container = null, string shared_name = null, string name = "StatsAggregatorHandle") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("StatsAggregatorHandle", name: name, keywords: dict); + return op.output; + } + + /// + /// Produces a summary of any statistics recorded by the given statistics manager. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatsAggregatorSummary'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor stats_aggregator_summary (Tensor iterator, string name = "StatsAggregatorSummary") + { + var dict = new Dictionary(); + dict["iterator"] = iterator; + var op = _op_def_lib._apply_op_helper("StatsAggregatorSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// Stops gradient computation. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StopGradient'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// When executed in a graph, this op outputs its input tensor as-is. + /// + /// When building ops to compute gradients, this op prevents the contribution of + /// its inputs to be taken into account. Normally, the gradient generator adds ops + /// to a graph to compute the derivatives of a specified 'loss' by recursively + /// finding out inputs that contributed to its computation. If you insert this op + /// in the graph it inputs are masked from the gradient generator. They are not + /// taken into account for computing gradients. + /// + /// This is useful any time you want to compute a value with TensorFlow but need + /// to pretend that the value was a constant. Some examples include: + /// + /// * The *EM* algorithm where the *M-step* should not involve backpropagation + /// through the output of the *E-step*. + /// * Contrastive divergence training of Boltzmann machines where, when + /// differentiating the energy function, the training must not backpropagate + /// through the graph that generated the samples from the model. + /// * Adversarial training, where no backprop should happen through the adversarial + /// example generation process. + /// + public static Tensor stop_gradient (Tensor input, string name = "StopGradient") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("StopGradient", name: name, keywords: dict); + return op.output; + } + + /// + /// Return a strided slice from input. + /// + /// + /// + /// + /// begin[k] specifies the offset into the kth range specification. + /// The exact dimension this corresponds to will be determined by context. + /// Out-of-bounds values will be silently clamped. If the kth bit of + /// begin_mask then begin[k] is ignored and the full range of the + /// appropriate dimension is used instead. Negative values causes indexing + /// to start from the highest element e.g. If foo==[1,2,3] then foo[-1]==3. + /// + /// + /// end[i] is like begin with the exception that end_mask is + /// used to determine full ranges. + /// + /// + /// strides[i] specifies the increment in the ith specification + /// after extracting a given element. Negative indices will reverse + /// the original order. Out or range values are + /// clamped to [0,dim[i]) if slice[i]>0 or [-1,dim[i]-1] if slice[i] < 0 + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StridedSlice'. + /// + /// + /// a bitmask where a bit i being 1 means to ignore the begin + /// value and instead use the largest interval possible. At runtime + /// begin[i] will be replaced with [0, n-1) if stride[i] > 0 or + /// [-1, n-1] if stride[i] < 0 + /// + /// + /// analogous to begin_mask + /// + /// + /// a bitmask where bit i being 1 means the ith + /// position is actually an ellipsis. One bit at most can be 1. + /// If ellipsis_mask == 0, then an implicit ellipsis mask of 1 << (m+1) + /// is provided. This means that foo[3:5] == foo[3:5, ...]. An ellipsis + /// implicitly creates as many range specifications as necessary to fully + /// specify the sliced range for every dimension. For example for a 4-dimensional + /// tensor foo the slice foo[2, ..., 5:8] implies foo[2, :, :, 5:8]. + /// + /// + /// a bitmask where bit i being 1 means the ith + /// specification creates a new shape 1 dimension. For example + /// foo[:4, tf.newaxis, :2] would produce a shape (4, 1, 2) tensor. + /// + /// + /// a bitmask where bit i implies that the ith + /// specification should shrink the dimensionality. begin and end + /// must imply a slice of size 1 in the dimension. For example in + /// python one might do foo[:, 3, :] which would result in + /// shrink_axis_mask being 2. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Note, most python users will want to use the Python Tensor.__getitem__ + /// or Variable.__getitem__ rather than this op directly. + /// + /// The goal of this op is to produce a new tensor with a subset of + /// the elements from the n dimensional input tensor. The subset is chosen using + /// a sequence of m sparse range specifications encoded into the arguments + /// of this function. Note, in some cases + /// m could be equal to n, but this need not be the case. Each + /// range specification entry can be one of the following: + /// + /// - An ellipsis (...). Ellipses are used to imply zero or more + /// dimensions of full-dimension selection and are produced using + /// ellipsis_mask. For example, foo[...] is the identity slice. + /// + /// - A new axis. This is used to insert a new shape=1 dimension and is + /// produced using new_axis_mask. For example, foo[:, ...] where + /// foo is shape (3, 4) produces a (1, 3, 4) tensor. + /// + /// + /// - A range begin:end:stride. This is used to specify how much to choose from + /// a given dimension. stride can be any integer but 0. begin is an integer + /// which represents the index of the first value to select while end represents + /// the index of the last value to select. The number of values selected in each + /// dimension is end - begin if stride > 0 and begin - end if stride < 0. + /// begin and end can be negative where -1 is the last element, -2 is + /// the second to last. begin_mask controls whether to replace the explicitly + /// given begin with an implicit effective value of 0 if stride > 0 and + /// -1 if stride < 0. end_mask is analogous but produces the number + /// required to create the largest open interval. For example, given a shape + /// (3,) tensor foo[:], the effective begin and end are 0 and 3. Do + /// not assume this is equivalent to foo[0:-1] which has an effective begin + /// and end of 0 and 2. Another example is foo[-2::-1] which reverses the + /// first dimension of a tensor while dropping the last two (in the original + /// order elements). For example foo = [1,2,3,4]; foo[-2::-1] is [4,3]. + /// + /// - A single index. This is used to keep only elements that have a given + /// index. For example (foo[2, :] on a shape (5,6) tensor produces a + /// shape (6,) tensor. This is encoded in begin and end and + /// shrink_axis_mask. + /// + /// Each conceptual range specification is encoded in the op's argument. This + /// encoding is best understand by considering a non-trivial example. In + /// particular, + /// foo[1, 2:4, None, ..., :-3:-1, :] will be encoded as + /// + /// + /// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) + /// end = [2, 4, x, x, -3, x] + /// strides = [1, 1, x, x, -1, 1] + /// begin_mask = 1<<4 | 1 << 5 = 48 + /// end_mask = 1<<5 = 32 + /// ellipsis_mask = 1<<3 = 8 + /// new_axis_mask = 1<<2 4 + /// shrink_axis_mask = 1<<0 + /// + /// + /// In this case if foo.shape is (5, 5, 5, 5, 5, 5) the final shape of + /// the slice becomes (2, 1, 5, 5, 2, 5). + /// Let us walk step by step through each argument specification. + /// + /// 1. The first argument in the example slice is turned into begin = 1 and + /// end = begin + 1 = 2. To disambiguate from the original spec 2:4 we + /// also set the appropriate bit in shrink_axis_mask. + /// + /// 2. 2:4 is contributes 2, 4, 1 to begin, end, and stride. All masks have + /// zero bits contributed. + /// + /// 3. None is a synonym for tf.newaxis. This means insert a dimension of size 1 + /// dimension in the final shape. Dummy values are contributed to begin, + /// end and stride, while the new_axis_mask bit is set. + /// + /// 4. ... grab the full ranges from as many dimensions as needed to + /// fully specify a slice for every dimension of the input shape. + /// + /// 5. :-3:-1 shows the use of negative indices. A negative index i associated + /// with a dimension that has shape s is converted to a positive index + /// s + i. So -1 becomes s-1 (i.e. the last element). This conversion + /// is done internally so begin, end and strides receive x, -3, and -1. + /// The appropriate begin_mask bit is set to indicate the start range is the + /// full range (ignoring the x). + /// + /// 6. : indicates that the entire contents of the corresponding dimension + /// is selected. This is equivalent to :: or 0::1. begin, end, and strides + /// receive 0, 0, and 1, respectively. The appropriate bits in begin_mask and + /// end_mask are also set. + /// + /// *Requirements*: + /// 0 != strides[i] for i in [0, m) + /// ellipsis_mask must be a power of two (only one ellipsis) + /// + public static Tensor strided_slice (Tensor input, Tensor begin, Tensor end, Tensor strides, int? begin_mask = null, int? end_mask = null, int? ellipsis_mask = null, int? new_axis_mask = null, int? shrink_axis_mask = null, string name = "StridedSlice") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["begin"] = begin; + dict["end"] = end; + dict["strides"] = strides; + if (begin_mask.HasValue) + dict["begin_mask"] = begin_mask.Value; + if (end_mask.HasValue) + dict["end_mask"] = end_mask.Value; + if (ellipsis_mask.HasValue) + dict["ellipsis_mask"] = ellipsis_mask.Value; + if (new_axis_mask.HasValue) + dict["new_axis_mask"] = new_axis_mask.Value; + if (shrink_axis_mask.HasValue) + dict["shrink_axis_mask"] = shrink_axis_mask.Value; + var op = _op_def_lib._apply_op_helper("StridedSlice", name: name, keywords: dict); + return op.output; + } + + /// + /// Assign value to the sliced l-value reference of ref. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StridedSliceAssign'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The values of value are assigned to the positions in the variable + /// ref that are selected by the slice parameters. The slice parameters + /// begin, end, strides, etc. work exactly as in StridedSlice. + /// + /// NOTE this op currently does not support broadcasting and so value's + /// shape must be exactly the shape produced by the slice of ref. + /// + public static Tensor strided_slice_assign (Tensor referecne, Tensor begin, Tensor end, Tensor strides, Tensor value, int? begin_mask = null, int? end_mask = null, int? ellipsis_mask = null, int? new_axis_mask = null, int? shrink_axis_mask = null, string name = "StridedSliceAssign") + { + var dict = new Dictionary(); + dict["ref"] = referecne; + dict["begin"] = begin; + dict["end"] = end; + dict["strides"] = strides; + dict["value"] = value; + if (begin_mask.HasValue) + dict["begin_mask"] = begin_mask.Value; + if (end_mask.HasValue) + dict["end_mask"] = end_mask.Value; + if (ellipsis_mask.HasValue) + dict["ellipsis_mask"] = ellipsis_mask.Value; + if (new_axis_mask.HasValue) + dict["new_axis_mask"] = new_axis_mask.Value; + if (shrink_axis_mask.HasValue) + dict["shrink_axis_mask"] = shrink_axis_mask.Value; + var op = _op_def_lib._apply_op_helper("StridedSliceAssign", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the gradient of StridedSlice. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StridedSliceGrad'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Since StridedSlice cuts out pieces of its input which is size + /// shape, its gradient will have the same shape (which is passed here + /// as shape). The gradient will be zero in any element that the slice + /// does not select. + /// + /// Arguments are the same as StridedSliceGrad with the exception that + /// dy is the input gradient to be propagated and shape is the + /// shape of StridedSlice's input. + /// + public static Tensor strided_slice_grad (Tensor shape, Tensor begin, Tensor end, Tensor strides, Tensor dy, int? begin_mask = null, int? end_mask = null, int? ellipsis_mask = null, int? new_axis_mask = null, int? shrink_axis_mask = null, string name = "StridedSliceGrad") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["begin"] = begin; + dict["end"] = end; + dict["strides"] = strides; + dict["dy"] = dy; + if (begin_mask.HasValue) + dict["begin_mask"] = begin_mask.Value; + if (end_mask.HasValue) + dict["end_mask"] = end_mask.Value; + if (ellipsis_mask.HasValue) + dict["ellipsis_mask"] = ellipsis_mask.Value; + if (new_axis_mask.HasValue) + dict["new_axis_mask"] = new_axis_mask.Value; + if (shrink_axis_mask.HasValue) + dict["shrink_axis_mask"] = shrink_axis_mask.Value; + var op = _op_def_lib._apply_op_helper("StridedSliceGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Joins the strings in the given list of string tensors into one tensor; + /// + /// + /// A list of string tensors. The tensors must all have the same shape, + /// or be scalars. Scalars may be mixed in; these will be broadcast to the shape + /// of non-scalar inputs. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringJoin'. + /// + /// + /// string, an optional join separator. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// with the given separator (default is an empty separator). + /// + public static Tensor string_join (Tensor[] inputs, string separator = null, string name = "StringJoin") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + if (separator != null) + dict["separator"] = separator; + var op = _op_def_lib._apply_op_helper("StringJoin", name: name, keywords: dict); + return op.output; + } + + /// + /// Split elements of input based on delimiter into a SparseTensor. + /// + /// + /// 1-D. Strings to split. + /// + /// + /// 0-D. Delimiter characters (bytes), or empty string. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringSplit'. + /// + /// + /// A bool. If True, skip the empty strings from the result. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// indices : A dense matrix of int64 representing the indices of the sparse tensor. + /// values : A vector of strings corresponding to the splited values. + /// shape : a length-2 vector of int64 representing the shape of the sparse + /// tensor, where the first value is N and the second value is the maximum number + /// of tokens in a single input entry. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Let N be the size of source (typically N will be the batch size). Split each + /// element of input based on delimiter and return a SparseTensor + /// containing the splitted tokens. Empty tokens are ignored. + /// + /// delimiter can be empty, or a string of split characters. If delimiter is an + /// empty string, each element of input is split into individual single-byte + /// character strings, including splitting of UTF-8 multibyte sequences. Otherwise + /// every character of delimiter is a potential split point. + /// + /// For example: + /// N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output + /// will be + /// + /// indices = [0, 0; + /// 0, 1; + /// 1, 0; + /// 1, 1; + /// 1, 2] + /// shape = [2, 3] + /// values = ['hello', 'world', 'a', 'b', 'c'] + /// + public static (Tensor indices, Tensor values, Tensor shape) string_split (Tensor input, Tensor delimiter, bool? skip_empty = null, string name = "StringSplit") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["delimiter"] = delimiter; + if (skip_empty.HasValue) + dict["skip_empty"] = skip_empty.Value; + var op = _op_def_lib._apply_op_helper("StringSplit", name: name, keywords: dict); + int _idx = 0; + var indices = op.outputs[_idx++]; + var values = op.outputs[_idx++]; + var shape = op.outputs[_idx++]; + return (indices, values, shape); + } + + /// + /// Split elements of source based on sep into a SparseTensor. + /// + /// + /// 1-D string Tensor, the strings to split. + /// + /// + /// 0-D string Tensor, the delimiter character. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringSplitV2'. + /// + /// + /// An int. If maxsplit > 0, limit of the split of the result. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// indices : + /// values : + /// shape : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Let N be the size of source (typically N will be the batch size). Split each + /// element of source based on sep and return a SparseTensor + /// containing the split tokens. Empty tokens are ignored. + /// + /// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', + /// then the output will be + /// + /// st.indices = [0, 0; + /// 0, 1; + /// 1, 0; + /// 1, 1; + /// 1, 2] + /// st.shape = [2, 3] + /// st.values = ['hello', 'world', 'a', 'b', 'c'] + /// + /// + /// If sep is given, consecutive delimiters are not grouped together and are + /// deemed to delimit empty strings. For example, source of "1<>2<><>3" and + /// sep of "<>" returns ["1", "2", "", "3"]. If sep is None or an empty + /// string, consecutive whitespace are regarded as a single separator, and the + /// result will contain no empty strings at the startor end if the string has + /// leading or trailing whitespace. + /// + /// Note that the above mentioned behavior matches python's str.split. + /// + public static (Tensor indices, Tensor values, Tensor shape) string_split_v2 (Tensor input, Tensor sep, int? maxsplit = null, string name = "StringSplitV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["sep"] = sep; + if (maxsplit.HasValue) + dict["maxsplit"] = maxsplit.Value; + var op = _op_def_lib._apply_op_helper("StringSplitV2", name: name, keywords: dict); + int _idx = 0; + var indices = op.outputs[_idx++]; + var values = op.outputs[_idx++]; + var shape = op.outputs[_idx++]; + return (indices, values, shape); + } + + /// + /// Strip leading and trailing whitespaces from the Tensor. + /// + /// + /// A string Tensor of any shape. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringStrip'. + /// + /// + /// A string Tensor of the same shape as the input. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor string_strip (Tensor input, string name = "StringStrip") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("StringStrip", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts each string in the input Tensor to its hash mod by a number of buckets. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringToHashBucket'. + /// + /// + /// Optional argument + /// The number of buckets. + /// + /// + /// A Tensor of the same shape as the input string_tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The hash function is deterministic on the content of the string within the + /// process. + /// + /// Note that the hash function may change from time to time. + /// This functionality will be deprecated and it's recommended to use + /// tf.string_to_hash_bucket_fast() or tf.string_to_hash_bucket_strong(). + /// + public static Tensor string_to_hash_bucket (Tensor string_tensor, int num_buckets, string name = "StringToHashBucket") + { + var dict = new Dictionary(); + dict["string_tensor"] = string_tensor; + dict["num_buckets"] = num_buckets; + var op = _op_def_lib._apply_op_helper("StringToHashBucket", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts each string in the input Tensor to its hash mod by a number of buckets. + /// + /// + /// The strings to assign a hash bucket. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringToHashBucketFast'. + /// + /// + /// Optional argument + /// The number of buckets. + /// + /// + /// A Tensor of the same shape as the input string_tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The hash function is deterministic on the content of the string within the + /// process and will never change. However, it is not suitable for cryptography. + /// This function may be used when CPU time is scarce and inputs are trusted or + /// unimportant. There is a risk of adversaries constructing inputs that all hash + /// to the same bucket. To prevent this problem, use a strong hash function with + /// tf.string_to_hash_bucket_strong. + /// + public static Tensor string_to_hash_bucket_fast (Tensor input, int num_buckets, string name = "StringToHashBucketFast") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["num_buckets"] = num_buckets; + var op = _op_def_lib._apply_op_helper("StringToHashBucketFast", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts each string in the input Tensor to its hash mod by a number of buckets. + /// + /// + /// The strings to assign a hash bucket. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringToHashBucketStrong'. + /// + /// + /// Optional argument + /// The number of buckets. + /// + /// + /// Optional argument + /// The key for the keyed hash function passed as a list of two uint64 + /// elements. + /// + /// + /// A Tensor of the same shape as the input string_tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The hash function is deterministic on the content of the string within the + /// process. The hash function is a keyed hash function, where attribute key + /// defines the key of the hash function. key is an array of 2 elements. + /// + /// A strong hash is important when inputs may be malicious, e.g. URLs with + /// additional components. Adversaries could try to make their inputs hash to the + /// same bucket for a denial-of-service attack or to skew the results. A strong + /// hash prevents this by making it difficult, if not infeasible, to compute inputs + /// that hash to the same bucket. This comes at a cost of roughly 4x higher compute + /// time than tf.string_to_hash_bucket_fast. + /// + public static Tensor string_to_hash_bucket_strong (Tensor input, int num_buckets, int[] key, string name = "StringToHashBucketStrong") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["num_buckets"] = num_buckets; + dict["key"] = key; + var op = _op_def_lib._apply_op_helper("StringToHashBucketStrong", name: name, keywords: dict); + return op.output; + } + + /// + /// Converts each string in the input Tensor to the specified numeric type. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringToNumber'. + /// + /// + /// The numeric type to interpret each string in string_tensor as. + /// + /// + /// A Tensor of the same shape as the input string_tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// (Note that int32 overflow results in an error while float overflow + /// results in a rounded value.) + /// + public static Tensor string_to_number (Tensor string_tensor, TF_DataType? out_type = null, string name = "StringToNumber") + { + var dict = new Dictionary(); + dict["string_tensor"] = string_tensor; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("StringToNumber", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns x - y element-wise. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sub'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// *NOTE*: Subtract supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor sub (Tensor x, Tensor y, string name = "Sub") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("Sub", name: name, keywords: dict); + return op.output; + } + + /// + /// Return substrings from Tensor of strings. + /// + /// + /// Tensor of strings + /// + /// + /// Scalar defining the position of first character in each substring + /// + /// + /// Scalar defining the number of characters to include in each substring + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Substr'. + /// + /// + /// Tensor of substrings + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// For each string in the input Tensor, creates a substring starting at index + /// pos with a total length of len. + /// + /// If len defines a substring that would extend beyond the length of the input + /// string, then as many characters as possible are used. + /// + /// A negative pos indicates distance within the string backwards from the end. + /// + /// If pos specifies an index which is out of range for any of the input strings, + /// then an InvalidArgumentError is thrown. + /// + /// pos and len must have the same shape, otherwise a ValueError is thrown on + /// Op creation. + /// + /// *NOTE*: Substr supports broadcasting up to two dimensions. More about + /// broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// --- + /// + /// Examples + /// + /// Using scalar pos and len: + /// + /// + /// input = [b'Hello', b'World'] + /// position = 1 + /// length = 3 + /// + /// output = [b'ell', b'orl'] + /// + /// + /// Using pos and len with same shape as input: + /// + /// + /// input = [[b'ten', b'eleven', b'twelve'], + /// [b'thirteen', b'fourteen', b'fifteen'], + /// [b'sixteen', b'seventeen', b'eighteen']] + /// position = [[1, 2, 3], + /// [1, 2, 3], + /// [1, 2, 3]] + /// length = [[2, 3, 4], + /// [4, 3, 2], + /// [5, 5, 5]] + /// + /// output = [[b'en', b'eve', b'lve'], + /// [b'hirt', b'urt', b'te'], + /// [b'ixtee', b'vente', b'hteen']] + /// + /// + /// Broadcasting pos and len onto input: + /// + /// + /// input = [[b'ten', b'eleven', b'twelve'], + /// [b'thirteen', b'fourteen', b'fifteen'], + /// [b'sixteen', b'seventeen', b'eighteen'], + /// [b'nineteen', b'twenty', b'twentyone']] + /// position = [1, 2, 3] + /// length = [1, 2, 3] + /// + /// output = [[b'e', b'ev', b'lve'], + /// [b'h', b'ur', b'tee'], + /// [b'i', b've', b'hte'], + /// [b'i', b'en', b'nty']] + /// + /// + /// Broadcasting input onto pos and len: + /// + /// + /// input = b'thirteen' + /// position = [1, 5, 7] + /// length = [3, 2, 1] + /// + /// output = [b'hir', b'ee', b'n'] + /// + /// + public static Tensor substr (Tensor input, Tensor pos, Tensor len, string name = "Substr") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["pos"] = pos; + dict["len"] = len; + var op = _op_def_lib._apply_op_helper("Substr", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sum of elements across dimensions of a tensor. + /// + /// + /// The tensor to reduce. + /// + /// + /// The dimensions to reduce. Must be in the range + /// [-rank(input), rank(input)). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sum'. + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// The reduced tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Reduces input along the dimensions given in axis. Unless + /// keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + /// axis. If keep_dims is true, the reduced dimensions are + /// retained with length 1. + /// + public static Tensor sum (Tensor input, Tensor reduction_indices, bool? keep_dims = null, string name = "Sum") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["reduction_indices"] = reduction_indices; + if (keep_dims.HasValue) + dict["keep_dims"] = keep_dims.Value; + var op = _op_def_lib._apply_op_helper("Sum", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the singular value decompositions of one or more matrices. + /// + /// + /// A tensor of shape [..., M, N] whose inner-most 2 dimensions + /// form matrices of size [M, N]. Let P be the minimum of M and N. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Svd'. + /// + /// + /// If true, left and right singular vectors will be + /// computed and returned in u and v, respectively. + /// If false, u and v are not set and should never referenced. + /// + /// + /// If true, compute full-sized u and v. If false + /// (the default), compute only the leading P singular vectors. + /// Ignored if compute_uv is False. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// s : Singular values. Shape is [..., P]. + /// u : Left singular vectors. If full_matrices is False then shape is + /// [..., M, P]; if full_matrices is True then shape is + /// [..., M, M]. Undefined if compute_uv is False. + /// v : Left singular vectors. If full_matrices is False then shape is + /// [..., N, P]. If full_matrices is True then shape is [..., N, N]. + /// Undefined if compute_uv is false. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Computes the SVD of each inner matrix in input such that + /// input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :]) + /// + /// + /// # a is a tensor containing a batch of matrices. + /// # s is a tensor of singular values for each matrix. + /// # u is the tensor containing of left singular vectors for each matrix. + /// # v is the tensor containing of right singular vectors for each matrix. + /// s, u, v = svd(a) + /// s, _, _ = svd(a, compute_uv=False) + /// + /// + public static (Tensor s, Tensor u, Tensor v) svd (Tensor input, bool? compute_uv = null, bool? full_matrices = null, string name = "Svd") + { + var dict = new Dictionary(); + dict["input"] = input; + if (compute_uv.HasValue) + dict["compute_uv"] = compute_uv.Value; + if (full_matrices.HasValue) + dict["full_matrices"] = full_matrices.Value; + var op = _op_def_lib._apply_op_helper("Svd", name: name, keywords: dict); + int _idx = 0; + var s = op.outputs[_idx++]; + var u = op.outputs[_idx++]; + var v = op.outputs[_idx++]; + return (s, u, v); + } + + /// + /// Forwards data to the output port determined by pred. + /// + /// + /// The tensor to be forwarded to the appropriate output. + /// + /// + /// A scalar that specifies which output port will receive data. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Switch'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_false : If pred is false, data will be forwarded to this output. + /// output_true : If pred is true, data will be forwarded to this output. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// If pred is true, the data input is forwarded to output_true. Otherwise, + /// the data goes to output_false. + /// + /// See also RefSwitch and Merge. + /// + public static (Tensor output_false, Tensor output_true) switch_ (Tensor data, Tensor pred, string name = "Switch") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["pred"] = pred; + var op = _op_def_lib._apply_op_helper("Switch", name: name, keywords: dict); + int _idx = 0; + var output_false = op.outputs[_idx++]; + var output_true = op.outputs[_idx++]; + return (output_false, output_true); + } + + /// + /// Creates a dataset that emits the records from one or more TFRecord files. + /// + /// + /// A scalar or vector containing the name(s) of the file(s) to be + /// read. + /// + /// + /// A scalar containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + /// + /// + /// A scalar representing the number of bytes to buffer. A value of + /// 0 means no buffering will be performed. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TFRecordDataset'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor t_f_record_dataset (Tensor filenames, Tensor compression_type, Tensor buffer_size, string name = "TFRecordDataset") + { + var dict = new Dictionary(); + dict["filenames"] = filenames; + dict["compression_type"] = compression_type; + dict["buffer_size"] = buffer_size; + var op = _op_def_lib._apply_op_helper("TFRecordDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs the records from a TensorFlow Records file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TFRecordReader'. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor t_f_record_reader (string container = null, string shared_name = null, string compression_type = null, string name = "TFRecordReader") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (compression_type != null) + dict["compression_type"] = compression_type; + var op = _op_def_lib._apply_op_helper("TFRecordReader", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs the records from a TensorFlow Records file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TFRecordReaderV2'. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor t_f_record_reader_v2 (string container = null, string shared_name = null, string compression_type = null, string name = "TFRecordReaderV2") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + if (compression_type != null) + dict["compression_type"] = compression_type; + var op = _op_def_lib._apply_op_helper("TFRecordReaderV2", name: name, keywords: dict); + return op.output; + } + + /// + /// An op enabling differentiation of TPU Embeddings. + /// + /// + /// A trainable variable, enabling optimizers to find this op. + /// + /// + /// The embedding activations Tensor to return. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingActivations'. + /// + /// + /// Optional argument + /// The id of the table in the embedding layer configuration from which + /// these activations were computed. + /// + /// + /// Optional argument + /// Identifier of the set of embedding indices which produced these + /// activations. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op simply returns its first input, which is assumed to have been sliced + /// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of this + /// op, and its first argument being a trainable Variable, enables automatic + /// differentiation of graphs containing embeddings via the TPU Embedding Python + /// libraries. + /// + public static Tensor t_p_u_embedding_activations (Tensor embedding_variable, Tensor sliced_activations, int table_id, int lookup_id, string name = "TPUEmbeddingActivations") + { + var dict = new Dictionary(); + dict["embedding_variable"] = embedding_variable; + dict["sliced_activations"] = sliced_activations; + dict["table_id"] = table_id; + dict["lookup_id"] = lookup_id; + var op = _op_def_lib._apply_op_helper("TPUEmbeddingActivations", name: name, keywords: dict); + return op.output; + } + + /// + /// An op that feeds a batch of embedding indices and weights to the TPU. + /// + /// + /// A list of rank 1 Tensors specifying row indices of the COO + /// sparse matrix representing the embedding lookups for each table. + /// + /// + /// A list of rank 1 Tensors specifying column indices of the + /// COO sparse matrix representing the embedding lookups for each table. + /// + /// + /// A list of rank 1 Tensors specifying the nonzero values + /// of the COO sparse matrix representing the embedding lookups for each table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingEnqueueSparseBatch'. + /// + /// + /// The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + /// + /// + /// Returns the description of the operation + /// + /// + /// Embedding lookups are equivalent to sparse-dense matrix multiplications: the + /// sparse matrix contains nonzeros in column j in order to retrieve row j from the + /// embedding table. + /// + /// The three Tensor list arguments (sample_indices, embedding_indices, and + /// aggregation_weights) represent these sparse matrices in COO format. The Tensor + /// lists each have one entry for each embedding table specified in the model. + /// For the kth embedding table, the three Tensors at position k in the list + /// specify a COO-format sparse matrix. For the kth table, the row indices, + /// column indices, and nonzero values of the COO sparse matrix are specified by + /// sample_indices[k], embedding_indices[k], and aggregation_weights[k], + /// respectively. Entries must be sorted by row index, then by column index. + /// + /// There should be at most one TPUEmbeddingEnqueueSparseBatch op in a signle + /// training step per TPU shard. + /// + public static Operation t_p_u_embedding_enqueue_sparse_batch (Tensor[] sample_indices, Tensor[] embedding_indices, Tensor[] aggregation_weights, int? device_ordinal = null, string name = "TPUEmbeddingEnqueueSparseBatch") + { + var dict = new Dictionary(); + dict["sample_indices"] = sample_indices; + dict["embedding_indices"] = embedding_indices; + dict["aggregation_weights"] = aggregation_weights; + if (device_ordinal.HasValue) + dict["device_ordinal"] = device_ordinal.Value; + var op = _op_def_lib._apply_op_helper("TPUEmbeddingEnqueueSparseBatch", name: name, keywords: dict); + return op; + } + + /// + /// Load an embedding table shard into TensorNode memories for use with Adagrad. + /// + /// + /// The shard of the embedding table resident on the host executing this + /// op. For single-TPU models, this is the entire embedding table. + /// + /// + /// Shard of the Adagrad accumulators resident on the host executing + /// this op. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingLoadAdagradParameters'. + /// + /// + /// Optional argument + /// Serialized TPUEmbeddingConfiguration proto. + /// + /// + /// Optional argument + /// The id of the table specified in the embedding_config. + /// + /// + /// Optional argument + /// The number of CPU hosts in the distributed training job. + /// + /// + /// Optional argument + /// Which CPU host in the distributed training job will execute this op. + /// + /// + /// Returns the description of the operation + /// + /// + /// TPU embeddings use dedicated per-optimizer Ops for loading and retrieving + /// trainable variables and optimizer state from TPU memory. This op enables + /// functionality equivalent to AdagradOptimizer. + /// + public static Operation t_p_u_embedding_load_adagrad_parameters (Tensor parameters, Tensor accumulators, string tpu_embedding_config, int table_id, int num_hosts, int host_id, string name = "TPUEmbeddingLoadAdagradParameters") + { + var dict = new Dictionary(); + dict["parameters"] = parameters; + dict["accumulators"] = accumulators; + dict["tpu_embedding_config"] = tpu_embedding_config; + dict["table_id"] = table_id; + dict["num_hosts"] = num_hosts; + dict["host_id"] = host_id; + var op = _op_def_lib._apply_op_helper("TPUEmbeddingLoadAdagradParameters", name: name, keywords: dict); + return op; + } + + /// + /// Load an embedding table shard into TPU memory for use with GradientDescent. + /// + /// + /// The shard of the embedding table resident on the host executing this + /// op. For single-TPU models, this is the entire embedding table. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingLoadGradientDescentParameters'. + /// + /// + /// Optional argument + /// Serialized TPUEmbeddingConfiguration proto. + /// + /// + /// Optional argument + /// The id of the table specified in the tpu_embedding_config. + /// + /// + /// Optional argument + /// The number of CPU hosts in the distributed training job. + /// + /// + /// Optional argument + /// Which CPU host in the distributed training job will execute this op. + /// + /// + /// Returns the description of the operation + /// + /// + /// TPU embeddings use dedicated per-optimizer Ops for loading and retrieving + /// trainable variables and optimizer state from TPU memory. This op enables + /// functionality equivalent to GradientDescentOptimizer. + /// + public static Operation t_p_u_embedding_load_gradient_descent_parameters (Tensor parameters, string tpu_embedding_config, int table_id, int num_hosts, int host_id, string name = "TPUEmbeddingLoadGradientDescentParameters") + { + var dict = new Dictionary(); + dict["parameters"] = parameters; + dict["tpu_embedding_config"] = tpu_embedding_config; + dict["table_id"] = table_id; + dict["num_hosts"] = num_hosts; + dict["host_id"] = host_id; + var op = _op_def_lib._apply_op_helper("TPUEmbeddingLoadGradientDescentParameters", name: name, keywords: dict); + return op; + } + + /// + /// An op that receives embedding activations on the TPU. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingReceiveActivations'. + /// + /// + /// Optional argument + /// The number of output activation tensors, equal to the number of + /// embedding tables in the model. + /// + /// + /// Optional argument + /// Serialized TPUEmbeddingConfiguration proto. + /// + /// + /// A TensorList of embedding activations containing one Tensor per + /// embedding table in the model. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The TPU system performs the embedding lookups and aggregations specified by + /// the arguments to TPUEmbeddingEnqueueSparseBatch. The results of these + /// aggregations are visible to the Tensorflow Graph as the outputs of a + /// TPUEmbeddingDequeueActivations Op. This op returns a list containing one + /// Tensor of activations per table specified in the model. There can be at most + /// one ReceieveActivations op in the TPU graph. + /// + public static Tensor[] t_p_u_embedding_receive_activations (int num_tables, string tpu_embedding_config, string name = "TPUEmbeddingReceiveActivations") + { + var dict = new Dictionary(); + dict["num_tables"] = num_tables; + dict["tpu_embedding_config"] = tpu_embedding_config; + var op = _op_def_lib._apply_op_helper("TPUEmbeddingReceiveActivations", name: name, keywords: dict); + int _idx = 0; + var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray(); + return (outputs); + } + + /// + /// Retrieve an embedding table shard from TPU memory. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingRetrieveAdagradParameters'. + /// + /// + /// Optional argument + /// Serialized TPUEmbeddingConfiguration proto. + /// + /// + /// Optional argument + /// The id of the table specified in the embedding_config_json. + /// + /// + /// Optional argument + /// The number of CPU hosts in the distributed training job. + /// + /// + /// Optional argument + /// Which CPU host in the distributed training job will execute this op. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// parameters : + /// accumulators : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// TPU embeddings use dedicated per-optimizer Ops for loading and retrieving + /// trainable variables and optimizer state from TPU memory. This op enables + /// functionality equivalent to AdagradOptimizer. + /// + public static (Tensor parameters, Tensor accumulators) t_p_u_embedding_retrieve_adagrad_parameters (string tpu_embedding_config, int table_id, int num_hosts, int host_id, string name = "TPUEmbeddingRetrieveAdagradParameters") + { + var dict = new Dictionary(); + dict["tpu_embedding_config"] = tpu_embedding_config; + dict["table_id"] = table_id; + dict["num_hosts"] = num_hosts; + dict["host_id"] = host_id; + var op = _op_def_lib._apply_op_helper("TPUEmbeddingRetrieveAdagradParameters", name: name, keywords: dict); + int _idx = 0; + var parameters = op.outputs[_idx++]; + var accumulators = op.outputs[_idx++]; + return (parameters, accumulators); + } + + /// + /// Retrieve an embedding table shard from TPU memory. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingRetrieveGradientDescentParameters'. + /// + /// + /// Optional argument + /// Serialized TPUEmbeddingConfiguration proto. + /// + /// + /// Optional argument + /// The id of the table specified in tpu_embedding_config. + /// + /// + /// Optional argument + /// The number of CPU hosts in the distributed training job. + /// + /// + /// Optional argument + /// Which CPU host in the distributed training job will execute this op. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// TPU embeddings use dedicated per-optimizer Ops for loading and retrieving + /// trainable variables and optimizer state from TPU memory. This op enables + /// functionality equivalent to GradientDescentOptimizer. + /// + public static Tensor t_p_u_embedding_retrieve_gradient_descent_parameters (string tpu_embedding_config, int table_id, int num_hosts, int host_id, string name = "TPUEmbeddingRetrieveGradientDescentParameters") + { + var dict = new Dictionary(); + dict["tpu_embedding_config"] = tpu_embedding_config; + dict["table_id"] = table_id; + dict["num_hosts"] = num_hosts; + dict["host_id"] = host_id; + var op = _op_def_lib._apply_op_helper("TPUEmbeddingRetrieveGradientDescentParameters", name: name, keywords: dict); + return op.output; + } + + /// + /// An op that performs gradient updates of embedding tables. + /// + /// + /// A TensorList of gradients with which to update embedding tables. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingSendGradients'. + /// + /// + /// Optional argument + /// Serialized TPUEmbeddingConfiguration proto. + /// + /// + /// Returns the description of the operation + /// + /// + /// The TensorList argument has the same length and shapes as the return value of + /// TPUEmbeddingReceiveActivations, but contains gradients of the model's loss + /// with respect to the embedding activations. The embedding tables are updated + /// from these gradients via the optimizer specified in the configuration given + /// to tpu.initialize_system. + /// + public static Operation t_p_u_embedding_send_gradients (Tensor[] gradients, string tpu_embedding_config, string name = "TPUEmbeddingSendGradients") + { + var dict = new Dictionary(); + dict["gradients"] = gradients; + dict["tpu_embedding_config"] = tpu_embedding_config; + var op = _op_def_lib._apply_op_helper("TPUEmbeddingSendGradients", name: name, keywords: dict); + return op; + } + + /// + /// Operator that connects N unreplicated inputs to an N-way replicated TPU computation. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUReplicatedInput'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor t_p_u_replicated_input (Tensor[] inputs, string name = "TPUReplicatedInput") + { + var dict = new Dictionary(); + dict["inputs"] = inputs; + var op = _op_def_lib._apply_op_helper("TPUReplicatedInput", name: name, keywords: dict); + return op.output; + } + + /// + /// Operator that connects the output of an N-way replicated TPU computation to N separate outputs. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUReplicatedOutput'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor[] t_p_u_replicated_output (Tensor input, int num_replicas, string name = "TPUReplicatedOutput") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["num_replicas"] = num_replicas; + var op = _op_def_lib._apply_op_helper("TPUReplicatedOutput", name: name, keywords: dict); + int _idx = 0; + var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray(); + return (outputs); + } + + /// + /// Creates a dataset that contains count elements from the input_dataset. + /// + /// + /// + /// + /// A scalar representing the number of elements from the input_dataset + /// that should be taken. A value of -1 indicates that all of input_dataset + /// is taken. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TakeDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor take_dataset (Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "TakeDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["count"] = count; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("TakeDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Read SparseTensors from a SparseTensorsMap and concatenate them. + /// + /// + /// 1-D, The N serialized SparseTensor objects. + /// Shape: [N]. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TakeManySparseFromTensorsMap'. + /// + /// + /// Optional argument + /// The dtype of the SparseTensor objects stored in the + /// SparseTensorsMap. + /// + /// + /// The container name for the SparseTensorsMap read by this op. + /// + /// + /// The shared name for the SparseTensorsMap read by this op. + /// It should not be blank; rather the shared_name or unique Operation name + /// of the Op that created the original SparseTensorsMap should be used. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sparse_indices : 2-D. The indices of the minibatch SparseTensor. + /// sparse_values : 1-D. The values of the minibatch SparseTensor. + /// sparse_shape : 1-D. The shape of the minibatch SparseTensor. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// The input sparse_handles must be an int64 matrix of shape [N, 1] where + /// N is the minibatch size and the rows correspond to the output handles of + /// AddSparseToTensorsMap or AddManySparseToTensorsMap. The ranks of the + /// original SparseTensor objects that went into the given input ops must all + /// match. When the final SparseTensor is created, it has rank one + /// higher than the ranks of the incoming SparseTensor objects + /// (they have been concatenated along a new row dimension on the left). + /// + /// The output SparseTensor object's shape values for all dimensions but the + /// first are the max across the input SparseTensor objects' shape values + /// for the corresponding dimensions. Its first shape value is N, the minibatch + /// size. + /// + /// The input SparseTensor objects' indices are assumed ordered in + /// standard lexicographic order. If this is not the case, after this + /// step run SparseReorder to restore index ordering. + /// + /// For example, if the handles represent an input, which is a [2, 3] matrix + /// representing two original SparseTensor objects: + /// + /// + /// index = [ 0] + /// [10] + /// [20] + /// values = [1, 2, 3] + /// shape = [50] + /// + /// + /// and + /// + /// + /// index = [ 2] + /// [10] + /// values = [4, 5] + /// shape = [30] + /// + /// + /// then the final SparseTensor will be: + /// + /// + /// index = [0 0] + /// [0 10] + /// [0 20] + /// [1 2] + /// [1 10] + /// values = [1, 2, 3, 4, 5] + /// shape = [2 50] + /// + /// + public static (Tensor sparse_indices, Tensor sparse_values, Tensor sparse_shape) take_many_sparse_from_tensors_map (Tensor sparse_handles, TF_DataType dtype, string container = null, string shared_name = null, string name = "TakeManySparseFromTensorsMap") + { + var dict = new Dictionary(); + dict["sparse_handles"] = sparse_handles; + dict["dtype"] = dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("TakeManySparseFromTensorsMap", name: name, keywords: dict); + int _idx = 0; + var sparse_indices = op.outputs[_idx++]; + var sparse_values = op.outputs[_idx++]; + var sparse_shape = op.outputs[_idx++]; + return (sparse_indices, sparse_values, sparse_shape); + } + + /// + /// Computes tan of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Tan'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tan (Tensor x, string name = "Tan") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Tan", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes hyperbolic tangent of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Tanh'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tanh (Tensor x, string name = "Tanh") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("Tanh", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the gradient for the tanh of x wrt its input. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TanhGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, grad = dy * (1 - y*y), where y = tanh(x), and dy + /// is the corresponding input gradient. + /// + public static Tensor tanh_grad (Tensor y, Tensor dy, string name = "TanhGrad") + { + var dict = new Dictionary(); + dict["y"] = y; + dict["dy"] = dy; + var op = _op_def_lib._apply_op_helper("TanhGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns a tensor that may be mutated, but only persists within a single step. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TemporaryVariable'. + /// + /// + /// Optional argument + /// The shape of the variable tensor. + /// + /// + /// Optional argument + /// The type of elements in the variable tensor. + /// + /// + /// Overrides the name used for the temporary variable resource. Default + /// value is the name of the 'TemporaryVariable' op (which is guaranteed unique). + /// + /// + /// A reference to the variable tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This is an experimental op for internal use only and it is possible to use this + /// op in unsafe ways. DO NOT USE unless you fully understand the risks. + /// + /// It is the caller's responsibility to ensure that 'ref' is eventually passed to a + /// matching 'DestroyTemporaryVariable' op after all other uses have completed. + /// + /// Outputs a ref to the tensor state so it may be read or modified. + /// + /// E.g. + /// var = state_ops._temporary_variable([1, 2], types.float_) + /// var_name = var.op.name + /// var = state_ops.assign(var, [[4.0, 5.0]]) + /// var = state_ops.assign_add(var, [[6.0, 7.0]]) + /// final = state_ops._destroy_temporary_variable(var, var_name=var_name) + /// + public static Tensor temporary_variable (TensorShape shape, TF_DataType dtype, string var_name = null, string name = "TemporaryVariable") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["dtype"] = dtype; + if (var_name != null) + dict["var_name"] = var_name; + var op = _op_def_lib._apply_op_helper("TemporaryVariable", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated. Use TensorArrayCloseV3 + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayCloseV2'. + /// + /// + /// Returns the description of the operation + /// + public static Operation tensor_array_close_v2 (Tensor handle, string name = "TensorArrayCloseV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("TensorArrayCloseV2", name: name, keywords: dict); + return op; + } + + /// + /// Delete the TensorArray from its resource container. + /// + /// + /// The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayCloseV3'. + /// + /// + /// Returns the description of the operation + /// + /// + /// This enables the user to close and release the resource in the middle + /// of a step/run. + /// + public static Operation tensor_array_close_v3 (Tensor handle, string name = "TensorArrayCloseV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + var op = _op_def_lib._apply_op_helper("TensorArrayCloseV3", name: name, keywords: dict); + return op; + } + + /// + /// Deprecated. Use TensorArrayConcatV3 + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayConcatV2'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// value : + /// lengths : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + public static (Tensor value, Tensor lengths) tensor_array_concat_v2 (Tensor handle, Tensor flow_in, TF_DataType dtype, TensorShape element_shape_except0 = null, string name = "TensorArrayConcatV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["flow_in"] = flow_in; + dict["dtype"] = dtype; + if (element_shape_except0 != null) + dict["element_shape_except0"] = element_shape_except0; + var op = _op_def_lib._apply_op_helper("TensorArrayConcatV2", name: name, keywords: dict); + int _idx = 0; + var value = op.outputs[_idx++]; + var lengths = op.outputs[_idx++]; + return (value, lengths); + } + + /// + /// Concat the elements from the TensorArray into value value. + /// + /// + /// The handle to a TensorArray. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayConcatV3'. + /// + /// + /// Optional argument + /// The type of the elem that is returned. + /// + /// + /// The expected shape of an element, if known, + /// excluding the first dimension. Used to validate the shapes of + /// TensorArray elements. If this shape is not fully specified, concatenating + /// zero-size TensorArrays is an error. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// value : All of the elements in the TensorArray, concatenated along the first + /// axis. + /// lengths : A vector of the row sizes of the original T elements in the + /// value output. In the example above, this would be the values: + /// (n1, n2, ..., n(T-1)). + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Takes T elements of shapes + /// + /// + /// (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + /// + /// + /// and concatenates them into a Tensor of shape: + /// + /// + /// (n0 + n1 + ... + n(T-1) x d0 x d1 x ...) + /// + /// + /// All elements must have the same shape (excepting the first dimension). + /// + public static (Tensor value, Tensor lengths) tensor_array_concat_v3 (Tensor handle, Tensor flow_in, TF_DataType dtype, TensorShape element_shape_except0 = null, string name = "TensorArrayConcatV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["flow_in"] = flow_in; + dict["dtype"] = dtype; + if (element_shape_except0 != null) + dict["element_shape_except0"] = element_shape_except0; + var op = _op_def_lib._apply_op_helper("TensorArrayConcatV3", name: name, keywords: dict); + int _idx = 0; + var value = op.outputs[_idx++]; + var lengths = op.outputs[_idx++]; + return (value, lengths); + } + + /// + /// Deprecated. Use TensorArrayGatherV3 + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGatherV2'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_gather_v2 (Tensor handle, Tensor indices, Tensor flow_in, TF_DataType dtype, TensorShape element_shape = null, string name = "TensorArrayGatherV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["indices"] = indices; + dict["flow_in"] = flow_in; + dict["dtype"] = dtype; + if (element_shape != null) + dict["element_shape"] = element_shape; + var op = _op_def_lib._apply_op_helper("TensorArrayGatherV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Gather specific elements from the TensorArray into output value. + /// + /// + /// The handle to a TensorArray. + /// + /// + /// The locations in the TensorArray from which to read tensor elements. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGatherV3'. + /// + /// + /// Optional argument + /// The type of the elem that is returned. + /// + /// + /// The expected shape of an element, if known. Used to + /// validate the shapes of TensorArray elements. If this shape is not + /// fully specified, gathering zero-size TensorArrays is an error. + /// + /// + /// All of the elements in the TensorArray, concatenated along a new + /// axis (the new dimension 0). + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// All elements selected by indices must have the same shape. + /// + public static Tensor tensor_array_gather_v3 (Tensor handle, Tensor indices, Tensor flow_in, TF_DataType dtype, TensorShape element_shape = null, string name = "TensorArrayGatherV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["indices"] = indices; + dict["flow_in"] = flow_in; + dict["dtype"] = dtype; + if (element_shape != null) + dict["element_shape"] = element_shape; + var op = _op_def_lib._apply_op_helper("TensorArrayGatherV3", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated. Use TensorArrayGradV3 + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGradV2'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_grad_v2 (Tensor handle, Tensor flow_in, string source, string name = "TensorArrayGradV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["flow_in"] = flow_in; + dict["source"] = source; + var op = _op_def_lib._apply_op_helper("TensorArrayGradV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a TensorArray for storing the gradients of values in the given handle. + /// + /// + /// The handle to the forward TensorArray. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGradV3'. + /// + /// + /// Optional argument + /// The gradient source string, used to decide which gradient TensorArray + /// to return. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// grad_handle : + /// flow_out : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// If the given TensorArray gradient already exists, returns a reference to it. + /// + /// Locks the size of the original TensorArray by disabling its dynamic size flag. + /// + /// **A note about the input flow_in:** + /// + /// The handle flow_in forces the execution of the gradient lookup to occur + /// only after certain other operations have occurred. For example, when + /// the forward TensorArray is dynamically sized, writes to this TensorArray + /// may resize the object. The gradient TensorArray is statically sized based + /// on the size of the forward TensorArray when this operation executes. + /// Furthermore, the size of the forward TensorArray is frozen by this call. + /// As a result, the flow is used to ensure that the call to generate the gradient + /// TensorArray only happens after all writes are executed. + /// + /// In the case of dynamically sized TensorArrays, gradient computation should + /// only be performed on read operations that have themselves been chained via + /// flow to occur only after all writes have executed. That way the final size + /// of the forward TensorArray is known when this operation is called. + /// + /// **A note about the source attribute:** + /// + /// TensorArray gradient calls use an accumulator TensorArray object. If + /// multiple gradients are calculated and run in the same session, the multiple + /// gradient nodes may accidentally flow through the same accumulator TensorArray. + /// This double counts and generally breaks the TensorArray gradient flow. + /// + /// The solution is to identify which gradient call this particular + /// TensorArray gradient is being called in. This is performed by identifying + /// a unique string (e.g. "gradients", "gradients_1", ...) from the input + /// gradient Tensor's name. This string is used as a suffix when creating + /// the TensorArray gradient object here (the attribute source). + /// + /// The attribute source is added as a suffix to the forward TensorArray's + /// name when performing the creation / lookup, so that each separate gradient + /// calculation gets its own TensorArray accumulator. + /// + public static (Tensor grad_handle, Tensor flow_out) tensor_array_grad_v3 (Tensor handle, Tensor flow_in, string source, string name = "TensorArrayGradV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["flow_in"] = flow_in; + dict["source"] = source; + var op = _op_def_lib._apply_op_helper("TensorArrayGradV3", name: name, keywords: dict); + int _idx = 0; + var grad_handle = op.outputs[_idx++]; + var flow_out = op.outputs[_idx++]; + return (grad_handle, flow_out); + } + + /// + /// Creates a TensorArray for storing multiple gradients of values in the given handle. + /// + /// + /// The handle to the forward TensorArray. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// An int32 vector representing a shape. Elements in the gradient accumulator will + /// have shape which is this shape_to_prepend value concatenated with shape of the + /// elements in the TensorArray corresponding to the input handle. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGradWithShape'. + /// + /// + /// Optional argument + /// The gradient source string, used to decide which gradient TensorArray + /// to return. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// grad_handle : + /// flow_out : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Similar to TensorArrayGradV3. However it creates an accumulator with an + /// expanded shape compared to the input TensorArray whose gradient is being + /// computed. This enables multiple gradients for the same TensorArray to be + /// calculated using the same accumulator. + /// + public static (Tensor grad_handle, Tensor flow_out) tensor_array_grad_with_shape (Tensor handle, Tensor flow_in, Tensor shape_to_prepend, string source, string name = "TensorArrayGradWithShape") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["flow_in"] = flow_in; + dict["shape_to_prepend"] = shape_to_prepend; + dict["source"] = source; + var op = _op_def_lib._apply_op_helper("TensorArrayGradWithShape", name: name, keywords: dict); + int _idx = 0; + var grad_handle = op.outputs[_idx++]; + var flow_out = op.outputs[_idx++]; + return (grad_handle, flow_out); + } + + /// + /// Deprecated. Use TensorArrayReadV3 + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayReadV2'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_read_v2 (Tensor handle, Tensor index, Tensor flow_in, TF_DataType dtype, string name = "TensorArrayReadV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["index"] = index; + dict["flow_in"] = flow_in; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("TensorArrayReadV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Read an element from the TensorArray into output value. + /// + /// + /// The handle to a TensorArray. + /// + /// + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayReadV3'. + /// + /// + /// Optional argument + /// The type of the elem that is returned. + /// + /// + /// The tensor that is read from the TensorArray. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_read_v3 (Tensor handle, Tensor index, Tensor flow_in, TF_DataType dtype, string name = "TensorArrayReadV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["index"] = index; + dict["flow_in"] = flow_in; + dict["dtype"] = dtype; + var op = _op_def_lib._apply_op_helper("TensorArrayReadV3", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated. Use TensorArrayScatterV3 + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayScatterV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_scatter_v2 (Tensor handle, Tensor indices, Tensor value, Tensor flow_in, string name = "TensorArrayScatterV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["indices"] = indices; + dict["value"] = value; + dict["flow_in"] = flow_in; + var op = _op_def_lib._apply_op_helper("TensorArrayScatterV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Scatter the data from the input value into specific TensorArray elements. + /// + /// + /// The handle to a TensorArray. + /// + /// + /// The locations at which to write the tensor elements. + /// + /// + /// The concatenated tensor to write to the TensorArray. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayScatterV3'. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// indices must be a vector, its length must match the first dim of value. + /// + public static Tensor tensor_array_scatter_v3 (Tensor handle, Tensor indices, Tensor value, Tensor flow_in, string name = "TensorArrayScatterV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["indices"] = indices; + dict["value"] = value; + dict["flow_in"] = flow_in; + var op = _op_def_lib._apply_op_helper("TensorArrayScatterV3", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated. Use TensorArraySizeV3 + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArraySizeV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_size_v2 (Tensor handle, Tensor flow_in, string name = "TensorArraySizeV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["flow_in"] = flow_in; + var op = _op_def_lib._apply_op_helper("TensorArraySizeV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Get the current size of the TensorArray. + /// + /// + /// The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArraySizeV3'. + /// + /// + /// The current size of the TensorArray. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_size_v3 (Tensor handle, Tensor flow_in, string name = "TensorArraySizeV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["flow_in"] = flow_in; + var op = _op_def_lib._apply_op_helper("TensorArraySizeV3", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated. Use TensorArraySplitV3 + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArraySplitV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_split_v2 (Tensor handle, Tensor value, Tensor lengths, Tensor flow_in, string name = "TensorArraySplitV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["value"] = value; + dict["lengths"] = lengths; + dict["flow_in"] = flow_in; + var op = _op_def_lib._apply_op_helper("TensorArraySplitV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Split the data from the input value into TensorArray elements. + /// + /// + /// The handle to a TensorArray. + /// + /// + /// The concatenated tensor to write to the TensorArray. + /// + /// + /// The vector of lengths, how to split the rows of value into the + /// TensorArray. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArraySplitV3'. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Assuming that lengths takes on values + /// + /// + /// (n0, n1, ..., n(T-1)) + /// + /// + /// and that value has shape + /// + /// + /// (n0 + n1 + ... + n(T-1) x d0 x d1 x ...), + /// + /// this splits values into a TensorArray with T tensors. + /// + /// TensorArray index t will be the subtensor of values with starting position + /// + /// + /// (n0 + n1 + ... + n(t-1), 0, 0, ...) + /// + /// + /// and having size + /// + /// + /// nt x d0 x d1 x ... + /// + /// + public static Tensor tensor_array_split_v3 (Tensor handle, Tensor value, Tensor lengths, Tensor flow_in, string name = "TensorArraySplitV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["value"] = value; + dict["lengths"] = lengths; + dict["flow_in"] = flow_in; + var op = _op_def_lib._apply_op_helper("TensorArraySplitV3", name: name, keywords: dict); + return op.output; + } + + /// + /// Deprecated. Use TensorArrayV3 + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayV2'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_v2 (Tensor size, TF_DataType dtype, TensorShape element_shape = null, bool? dynamic_size = null, bool? clear_after_read = null, string tensor_array_name = null, string name = "TensorArrayV2") + { + var dict = new Dictionary(); + dict["size"] = size; + dict["dtype"] = dtype; + if (element_shape != null) + dict["element_shape"] = element_shape; + if (dynamic_size.HasValue) + dict["dynamic_size"] = dynamic_size.Value; + if (clear_after_read.HasValue) + dict["clear_after_read"] = clear_after_read.Value; + if (tensor_array_name != null) + dict["tensor_array_name"] = tensor_array_name; + var op = _op_def_lib._apply_op_helper("TensorArrayV2", name: name, keywords: dict); + return op.output; + } + + /// + /// An array of Tensors of given size. + /// + /// + /// The size of the array. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayV3'. + /// + /// + /// Optional argument + /// The type of the elements on the tensor_array. + /// + /// + /// The expected shape of an element, if known. Used to + /// validate the shapes of TensorArray elements. If this shape is not + /// fully specified, gathering zero-size TensorArrays is an error. + /// + /// + /// A boolean that determines whether writes to the TensorArray + /// are allowed to grow the size. By default, this is not allowed. + /// + /// + /// If true (default), Tensors in the TensorArray are cleared + /// after being read. This disables multiple read semantics but allows early + /// release of memory. + /// + /// + /// If true (default is false), then all + /// elements in the TensorArray will be expected to have have identical shapes. + /// This allows certain behaviors, like dynamically checking for + /// consistent shapes on write, and being able to fill in properly + /// shaped zero tensors on stack -- even if the element_shape attribute + /// is not fully defined. + /// + /// + /// Overrides the name used for the temporary tensor_array + /// resource. Default value is the name of the 'TensorArray' op (which + /// is guaranteed unique). + /// + /// + /// Returns a tuple with multiple values, as follows: + /// handle : The handle to the TensorArray. + /// flow : A scalar used to control gradient flow. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Write data via Write and read via Read or Pack. + /// + public static (Tensor handle, Tensor flow) tensor_array_v3 (Tensor size, TF_DataType dtype, TensorShape element_shape = null, bool? dynamic_size = null, bool? clear_after_read = null, bool? identical_element_shapes = null, string tensor_array_name = null, string name = "TensorArrayV3") + { + var dict = new Dictionary(); + dict["size"] = size; + dict["dtype"] = dtype; + if (element_shape != null) + dict["element_shape"] = element_shape; + if (dynamic_size.HasValue) + dict["dynamic_size"] = dynamic_size.Value; + if (clear_after_read.HasValue) + dict["clear_after_read"] = clear_after_read.Value; + if (identical_element_shapes.HasValue) + dict["identical_element_shapes"] = identical_element_shapes.Value; + if (tensor_array_name != null) + dict["tensor_array_name"] = tensor_array_name; + var op = _op_def_lib._apply_op_helper("TensorArrayV3", name: name, keywords: dict); + int _idx = 0; + var handle = op.outputs[_idx++]; + var flow = op.outputs[_idx++]; + return (handle, flow); + } + + /// + /// Deprecated. Use TensorArrayGradV3 + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayWriteV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_write_v2 (Tensor handle, Tensor index, Tensor value, Tensor flow_in, string name = "TensorArrayWriteV2") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["index"] = index; + dict["value"] = value; + dict["flow_in"] = flow_in; + var op = _op_def_lib._apply_op_helper("TensorArrayWriteV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Push an element onto the tensor_array. + /// + /// + /// The handle to a TensorArray. + /// + /// + /// The position to write to inside the TensorArray. + /// + /// + /// The tensor to write to the TensorArray. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayWriteV3'. + /// + /// + /// A float scalar that enforces proper chaining of operations. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_array_write_v3 (Tensor handle, Tensor index, Tensor value, Tensor flow_in, string name = "TensorArrayWriteV3") + { + var dict = new Dictionary(); + dict["handle"] = handle; + dict["index"] = index; + dict["value"] = value; + dict["flow_in"] = flow_in; + var op = _op_def_lib._apply_op_helper("TensorArrayWriteV3", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that emits components as a tuple of tensors once. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorDataset'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_dataset (Tensor[] components, TensorShape[] output_shapes, string name = "TensorDataset") + { + var dict = new Dictionary(); + dict["components"] = components; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("TensorDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// The shape of the elements of the given list, as a tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListElementShape'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// input_handle: the list + /// element_shape: the shape of elements of the list + /// + public static Tensor tensor_list_element_shape (Tensor input_handle, TF_DataType shape_type, string name = "TensorListElementShape") + { + var dict = new Dictionary(); + dict["input_handle"] = input_handle; + dict["shape_type"] = shape_type; + var op = _op_def_lib._apply_op_helper("TensorListElementShape", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a TensorList which, when stacked, has the value of tensor. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListFromTensor'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Each tensor in the result list corresponds to one row of the input tensor. + /// + /// tensor: The input tensor. + /// output_handle: The list. + /// + public static Tensor tensor_list_from_tensor (Tensor tensor, Tensor element_shape, string name = "TensorListFromTensor") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + dict["element_shape"] = element_shape; + var op = _op_def_lib._apply_op_helper("TensorListFromTensor", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a Tensor by indexing into the TensorList. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListGather'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Each row in the produced Tensor corresponds to the element in the TensorList + /// specified by the given index (see tf.gather). + /// + /// input_handle: The input tensor list. + /// indices: The indices used to index into the list. + /// values: The tensor. + /// + public static Tensor tensor_list_gather (Tensor input_handle, Tensor indices, TF_DataType element_dtype, string name = "TensorListGather") + { + var dict = new Dictionary(); + dict["input_handle"] = input_handle; + dict["indices"] = indices; + dict["element_dtype"] = element_dtype; + var op = _op_def_lib._apply_op_helper("TensorListGather", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the item in the list with the given index. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListGetItem'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// input_handle: the list + /// index: the position in the list from which an element will be retrieved + /// item: the element at that position + /// + /// + /// + public static Tensor tensor_list_get_item (Tensor input_handle, Tensor index, TF_DataType element_dtype, string name = "TensorListGetItem") + { + var dict = new Dictionary(); + dict["input_handle"] = input_handle; + dict["index"] = index; + dict["element_dtype"] = element_dtype; + var op = _op_def_lib._apply_op_helper("TensorListGetItem", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the number of tensors in the input tensor list. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListLength'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// input_handle: the input list + /// length: the number of tensors in the list + /// + public static Tensor tensor_list_length (Tensor input_handle, string name = "TensorListLength") + { + var dict = new Dictionary(); + dict["input_handle"] = input_handle; + var op = _op_def_lib._apply_op_helper("TensorListLength", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the last element of the input list as well as a list with all but that element. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListPopBack'. + /// + /// + /// Optional argument + /// + /// + /// Returns a tuple with multiple values, as follows: + /// output_handle : + /// tensor : + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Fails if the list is empty. + /// + /// input_handle: the input list + /// tensor: the withdrawn last element of the list + /// element_dtype: the type of elements in the list + /// element_shape: the shape of the output tensor + /// + public static (Tensor output_handle, Tensor tensor) tensor_list_pop_back (Tensor input_handle, TF_DataType element_dtype, string name = "TensorListPopBack") + { + var dict = new Dictionary(); + dict["input_handle"] = input_handle; + dict["element_dtype"] = element_dtype; + var op = _op_def_lib._apply_op_helper("TensorListPopBack", name: name, keywords: dict); + int _idx = 0; + var output_handle = op.outputs[_idx++]; + var tensor = op.outputs[_idx++]; + return (output_handle, tensor); + } + + /// + /// Returns a list list which has the passed-in Tensor as last element and the other elements of the given list in input_handle. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListPushBack'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// tensor: The tensor to put on the list. + /// input_handle: The old list. + /// output_handle: A list with the elements of the old list followed by tensor. + /// element_dtype: the type of elements in the list. + /// element_shape: a shape compatible with that of elements in the list. + /// + public static Tensor tensor_list_push_back (Tensor input_handle, Tensor tensor, string name = "TensorListPushBack") + { + var dict = new Dictionary(); + dict["input_handle"] = input_handle; + dict["tensor"] = tensor; + var op = _op_def_lib._apply_op_helper("TensorListPushBack", name: name, keywords: dict); + return op.output; + } + + /// + /// List of the given size with empty elements. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListReserve'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// element_shape: the shape of the future elements of the list + /// num_elements: the number of elements to reserve + /// handle: the output list + /// element_dtype: the desired type of elements in the list. + /// + public static Tensor tensor_list_reserve (Tensor element_shape, Tensor num_elements, TF_DataType element_dtype, string name = "TensorListReserve") + { + var dict = new Dictionary(); + dict["element_shape"] = element_shape; + dict["num_elements"] = num_elements; + dict["element_dtype"] = element_dtype; + var op = _op_def_lib._apply_op_helper("TensorListReserve", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a TensorList by indexing into a Tensor. + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListScatter'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Each member of the TensorList corresponds to one row of the input tensor, + /// specified by the given index (see tf.gather). + /// + /// tensor: The input tensor. + /// indices: The indices used to index into the list. + /// element_shape: The shape of the elements in the list (can be less specified than + /// the shape of the tensor). + /// output_handle: The TensorList. + /// + public static Tensor tensor_list_scatter (Tensor tensor, Tensor indices, Tensor element_shape, string name = "TensorListScatter") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + dict["indices"] = indices; + dict["element_shape"] = element_shape; + var op = _op_def_lib._apply_op_helper("TensorListScatter", name: name, keywords: dict); + return op.output; + } + + /// + /// Sets the index-th position of the list to contain the given tensor. + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListSetItem'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// input_handle: the list + /// index: the position in the list to which the tensor will be assigned + /// item: the element to be assigned to that position + /// output_handle: the new list, with the element in the proper position + /// + /// + public static Tensor tensor_list_set_item (Tensor input_handle, Tensor index, Tensor item, string name = "TensorListSetItem") + { + var dict = new Dictionary(); + dict["input_handle"] = input_handle; + dict["index"] = index; + dict["item"] = item; + var op = _op_def_lib._apply_op_helper("TensorListSetItem", name: name, keywords: dict); + return op.output; + } + + /// + /// Stacks all tensors in the list. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListStack'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Requires that all tensors have the same shape. + /// + /// input_handle: the input list + /// tensor: the gathered result + /// num_elements: optional. If not -1, the number of elements in the list. + /// + /// + public static Tensor tensor_list_stack (Tensor input_handle, TF_DataType element_dtype, int? num_elements = null, string name = "TensorListStack") + { + var dict = new Dictionary(); + dict["input_handle"] = input_handle; + dict["element_dtype"] = element_dtype; + if (num_elements.HasValue) + dict["num_elements"] = num_elements.Value; + var op = _op_def_lib._apply_op_helper("TensorListStack", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that emits each dim-0 slice of components once. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorSliceDataset'. + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_slice_dataset (Tensor[] components, TensorShape[] output_shapes, string name = "TensorSliceDataset") + { + var dict = new Dictionary(); + dict["components"] = components; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("TensorSliceDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs a Summary protocol buffer with a tensor. + /// + /// + /// A tensor to serialize. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorSummary'. + /// + /// + /// A json-encoded SummaryDescription proto. + /// + /// + /// An unused list of strings. + /// + /// + /// An unused string. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This op is being phased out in favor of TensorSummaryV2, which lets callers pass + /// a tag as well as a serialized SummaryMetadata proto string that contains + /// plugin-specific data. We will keep this op to maintain backwards compatibility. + /// + public static Tensor tensor_summary (Tensor tensor, string description = null, string[] labels = null, string display_name = null, string name = "TensorSummary") + { + var dict = new Dictionary(); + dict["tensor"] = tensor; + if (description != null) + dict["description"] = description; + if (labels != null) + dict["labels"] = labels; + if (display_name != null) + dict["display_name"] = display_name; + var op = _op_def_lib._apply_op_helper("TensorSummary", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs a Summary protocol buffer with a tensor and per-plugin data. + /// + /// + /// A string attached to this summary. Used for organization in TensorBoard. + /// + /// + /// A tensor to serialize. + /// + /// + /// A serialized SummaryMetadata proto. Contains plugin + /// data. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorSummaryV2'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor tensor_summary_v2 (Tensor tag, Tensor tensor, Tensor serialized_summary_metadata, string name = "TensorSummaryV2") + { + var dict = new Dictionary(); + dict["tag"] = tag; + dict["tensor"] = tensor; + dict["serialized_summary_metadata"] = serialized_summary_metadata; + var op = _op_def_lib._apply_op_helper("TensorSummaryV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that emits the lines of one or more text files. + /// + /// + /// A scalar or a vector containing the name(s) of the file(s) to be + /// read. + /// + /// + /// A scalar containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + /// + /// + /// A scalar containing the number of bytes to buffer. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TextLineDataset'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor text_line_dataset (Tensor filenames, Tensor compression_type, Tensor buffer_size, string name = "TextLineDataset") + { + var dict = new Dictionary(); + dict["filenames"] = filenames; + dict["compression_type"] = compression_type; + dict["buffer_size"] = buffer_size; + var op = _op_def_lib._apply_op_helper("TextLineDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs the lines of a file delimited by '\n'. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TextLineReader'. + /// + /// + /// Number of lines to skip from the beginning of every file. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor text_line_reader (int? skip_header_lines = null, string container = null, string shared_name = null, string name = "TextLineReader") + { + var dict = new Dictionary(); + if (skip_header_lines.HasValue) + dict["skip_header_lines"] = skip_header_lines.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("TextLineReader", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs the lines of a file delimited by '\n'. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TextLineReaderV2'. + /// + /// + /// Number of lines to skip from the beginning of every file. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor text_line_reader_v2 (int? skip_header_lines = null, string container = null, string shared_name = null, string name = "TextLineReaderV2") + { + var dict = new Dictionary(); + if (skip_header_lines.HasValue) + dict["skip_header_lines"] = skip_header_lines.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("TextLineReaderV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Generates labels for candidate sampling with a learned unigram distribution. + /// + /// + /// A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ThreadUnsafeUnigramCandidateSampler'. + /// + /// + /// Optional argument + /// Number of true labels per context. + /// + /// + /// Optional argument + /// Number of candidates to randomly sample. + /// + /// + /// Optional argument + /// If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// + /// + /// Optional argument + /// The sampler will sample integers from the interval [0, range_max). + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sampled_candidates : A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// true_expected_count : A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// sampled_expected_count : A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + public static (Tensor sampled_candidates, Tensor true_expected_count, Tensor sampled_expected_count) thread_unsafe_unigram_candidate_sampler (Tensor true_classes, int num_true, int num_sampled, bool unique, int range_max, int? seed = null, int? seed2 = null, string name = "ThreadUnsafeUnigramCandidateSampler") + { + var dict = new Dictionary(); + dict["true_classes"] = true_classes; + dict["num_true"] = num_true; + dict["num_sampled"] = num_sampled; + dict["unique"] = unique; + dict["range_max"] = range_max; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("ThreadUnsafeUnigramCandidateSampler", name: name, keywords: dict); + int _idx = 0; + var sampled_candidates = op.outputs[_idx++]; + var true_expected_count = op.outputs[_idx++]; + var sampled_expected_count = op.outputs[_idx++]; + return (sampled_candidates, true_expected_count, sampled_expected_count); + } + + /// + /// Constructs a tensor by tiling a given tensor. + /// + /// + /// 1-D or higher. + /// + /// + /// 1-D. Length must be the same as the number of dimensions in input + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Tile'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation creates a new tensor by replicating input multiples times. + /// The output tensor's i'th dimension has input.dims(i) * multiples[i] elements, + /// and the values of input are replicated multiples[i] times along the 'i'th + /// dimension. For example, tiling [a b c d] by [2] produces + /// [a b c d a b c d]. + /// + public static Tensor tile (Tensor input, Tensor multiples, string name = "Tile") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["multiples"] = multiples; + var op = _op_def_lib._apply_op_helper("Tile", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the gradient of Tile. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TileGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Since Tile takes an input and repeats the input multiples times + /// along each dimension, TileGrad takes in multiples and aggregates + /// each repeated tile of input into output. + /// + public static Tensor tile_grad (Tensor input, Tensor multiples, string name = "TileGrad") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["multiples"] = multiples; + var op = _op_def_lib._apply_op_helper("TileGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Provides the time since epoch in seconds. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Timestamp'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Returns the timestamp as a float64 for seconds since the Unix epoch. + /// + /// Note: the timestamp is computed when the op is executed, not when it is added + /// to the graph. + /// + public static Tensor timestamp (string name = "Timestamp") + { + var dict = new Dictionary(); + var op = _op_def_lib._apply_op_helper("Timestamp", name: name, keywords: dict); + return op.output; + } + + /// + /// Finds values and indices of the k largest elements for the last dimension. + /// + /// + /// 1-D or higher with last dimension at least k. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TopK'. + /// + /// + /// Optional argument + /// Number of top elements to look for along the last dimension (along each + /// row for matrices). + /// + /// + /// If true the resulting k elements will be sorted by the values in + /// descending order. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// values : The k largest elements along each last dimensional slice. + /// indices : The indices of values within the last dimension of input. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// If the input is a vector (rank-1), finds the k largest entries in the vector + /// and outputs their values and indices as vectors. Thus values[j] is the + /// j-th largest entry in input, and its index is indices[j]. + /// + /// For matrices (resp. higher rank input), computes the top k entries in each + /// row (resp. vector along the last dimension). Thus, + /// + /// values.shape = indices.shape = input.shape[:-1] + [k] + /// + /// If two elements are equal, the lower-index element appears first. + /// + /// If k varies dynamically, use TopKV2 below. + /// + public static (Tensor values, Tensor indices) top_k (Tensor input, int k, bool? sorted = null, string name = "TopK") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["k"] = k; + if (sorted.HasValue) + dict["sorted"] = sorted.Value; + var op = _op_def_lib._apply_op_helper("TopK", name: name, keywords: dict); + int _idx = 0; + var values = op.outputs[_idx++]; + var indices = op.outputs[_idx++]; + return (values, indices); + } + + /// + /// Finds values and indices of the k largest elements for the last dimension. + /// + /// + /// 1-D or higher with last dimension at least k. + /// + /// + /// 0-D. Number of top elements to look for along the last dimension (along each + /// row for matrices). + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TopKV2'. + /// + /// + /// If true the resulting k elements will be sorted by the values in + /// descending order. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// values : The k largest elements along each last dimensional slice. + /// indices : The indices of values within the last dimension of input. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// If the input is a vector (rank-1), finds the k largest entries in the vector + /// and outputs their values and indices as vectors. Thus values[j] is the + /// j-th largest entry in input, and its index is indices[j]. + /// + /// For matrices (resp. higher rank input), computes the top k entries in each + /// row (resp. vector along the last dimension). Thus, + /// + /// values.shape = indices.shape = input.shape[:-1] + [k] + /// + /// If two elements are equal, the lower-index element appears first. + /// + public static (Tensor values, Tensor indices) top_k_v2 (Tensor input, Tensor k, bool? sorted = null, string name = "TopKV2") + { + var dict = new Dictionary(); + dict["input"] = input; + dict["k"] = k; + if (sorted.HasValue) + dict["sorted"] = sorted.Value; + var op = _op_def_lib._apply_op_helper("TopKV2", name: name, keywords: dict); + int _idx = 0; + var values = op.outputs[_idx++]; + var indices = op.outputs[_idx++]; + return (values, indices); + } + + /// + /// Shuffle dimensions of x according to a permutation. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Transpose'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The output y has the same rank as x. The shapes of x and y satisfy: + /// y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1] + /// + public static Tensor transpose (Tensor x, Tensor perm, string name = "Transpose") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["perm"] = perm; + var op = _op_def_lib._apply_op_helper("Transpose", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns x / y element-wise for integer types. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TruncateDiv'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Truncation designates that negative numbers will round fractional quantities + /// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different + /// than Python semantics. See FloorDiv for a division function that matches + /// Python Semantics. + /// + /// *NOTE*: TruncateDiv supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor truncate_div (Tensor x, Tensor y, string name = "TruncateDiv") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("TruncateDiv", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns element-wise remainder of division. This emulates C semantics in that + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TruncateMod'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// the result here is consistent with a truncating divide. E.g. truncate(x / y) * + /// y + truncate_mod(x, y) = x. + /// + /// *NOTE*: TruncateMod supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor truncate_mod (Tensor x, Tensor y, string name = "TruncateMod") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["y"] = y; + var op = _op_def_lib._apply_op_helper("TruncateMod", name: name, keywords: dict); + return op.output; + } + + /// + /// Outputs random values from a truncated normal distribution. + /// + /// + /// The shape of the output tensor. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TruncatedNormal'. + /// + /// + /// Optional argument + /// The type of the output. + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// A second seed to avoid seed collision. + /// + /// + /// A tensor of the specified shape filled with random truncated normal + /// values. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The generated values follow a normal distribution with mean 0 and standard + /// deviation 1, except that values whose magnitude is more than 2 standard + /// deviations from the mean are dropped and re-picked. + /// + public static Tensor truncated_normal (Tensor shape, TF_DataType dtype, int? seed = null, int? seed2 = null, string name = "TruncatedNormal") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["dtype"] = dtype; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("TruncatedNormal", name: name, keywords: dict); + return op.output; + } + + /// + /// Perform batches of RPC requests. + /// + /// + /// 0-D or 1-D. The address (i.e. host_name:port) of the RPC server. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with method and request. + /// + /// + /// 0-D or 1-D. The method address on the RPC server. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with address and request. + /// + /// + /// 0-D or 1-D. Serialized proto strings: the rpc request argument. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with address and method. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'TryRpc'. + /// + /// + /// RPC protocol to use. Empty string means use the default protocol. + /// Options include 'grpc'. + /// + /// + /// boolean. If true (default), then failures to connect + /// (i.e., the server does not immediately respond) cause an RPC failure. + /// + /// + /// int. If 0 (default), then the kernel will run the RPC + /// request and only time out if the RPC deadline passes or the session times out. + /// If this value is greater than 0, then the op will raise an exception if + /// the RPC takes longer than timeout_in_ms. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// response : Same shape as request. Serialized proto strings: the rpc responses. + /// status_code : Same shape as request. Values correspond to tensorflow Status enum codes. + /// status_message : Same shape as request. Values correspond to Status messages + /// returned from the RPC calls. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This op asynchronously performs either a single RPC request, or a batch + /// of requests. RPC requests are defined by three main parameters: + /// + /// - address (the host+port or BNS address of the request) + /// - method (the method name for the request) + /// - request (the serialized proto string, or vector of strings, + /// of the RPC request argument). + /// + /// For example, if you have an RPC service running on port localhost:2345, + /// and its interface is configured with the following proto declaration: + /// + /// + /// service MyService { + /// rpc MyMethod(MyRequestProto) returns (MyResponseProto) { + /// } + /// }; + /// + /// + /// then call this op with arguments: + /// + /// + /// address = "localhost:2345" + /// method = "MyService/MyMethod" + /// + /// + /// The request tensor is a string tensor representing serialized MyRequestProto + /// strings; and the output string tensor response will have the same shape + /// and contain (upon successful completion) corresponding serialized + /// MyResponseProto strings. + /// + /// For example, to send a single, empty, MyRequestProto, call + /// this op with request = "". To send 5 **parallel** empty requests, + /// call this op with request = ["", "", "", "", ""]. + /// + /// More generally, one can create a batch of MyRequestProto serialized protos + /// from regular batched tensors using the encode_proto op, and convert + /// the response MyResponseProto serialized protos to batched tensors + /// using the decode_proto op. + /// + /// **NOTE** Working with serialized proto strings is faster than instantiating + /// actual proto objects in memory, so no performance degradation is expected + /// compared to writing custom kernels for this workflow. + /// + /// Unlike the standard Rpc op, if the connection fails or the remote worker + /// returns an error status, this op does **not** reraise the exception. + /// Instead, the status_code and status_message entry for the corresponding RPC + /// call is set with the error returned from the RPC call. The response tensor + /// will contain valid response values for those minibatch entries whose RPCs did + /// not fail; the rest of the entries will have empty strings. + /// + public static (Tensor response, Tensor status_code, Tensor status_message) try_rpc (Tensor address, Tensor method, Tensor request, string protocol = null, bool? fail_fast = null, int? timeout_in_ms = null, string name = "TryRpc") + { + var dict = new Dictionary(); + dict["address"] = address; + dict["method"] = method; + dict["request"] = request; + if (protocol != null) + dict["protocol"] = protocol; + if (fail_fast.HasValue) + dict["fail_fast"] = fail_fast.Value; + if (timeout_in_ms.HasValue) + dict["timeout_in_ms"] = timeout_in_ms.Value; + var op = _op_def_lib._apply_op_helper("TryRpc", name: name, keywords: dict); + int _idx = 0; + var response = op.outputs[_idx++]; + var status_code = op.outputs[_idx++]; + var status_message = op.outputs[_idx++]; + return (response, status_code, status_message); + } + + /// + /// Reverses the operation of Batch for a single output Tensor. + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Unbatch'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// An instance of Unbatch either receives an empty batched_tensor, in which case it + /// asynchronously waits until the values become available from a concurrently + /// running instance of Unbatch with the same container and shared_name, or receives + /// a non-empty batched_tensor in which case it finalizes all other concurrently + /// running instances and outputs its own element from the batch. + /// + /// batched_tensor: The possibly transformed output of Batch. The size of the first + /// dimension should remain unchanged by the transformations for the operation to + /// work. + /// batch_index: The matching batch_index obtained from Batch. + /// id: The id scalar emitted by Batch. + /// unbatched_tensor: The Tensor corresponding to this execution. + /// timeout_micros: Maximum amount of time (in microseconds) to wait to receive the + /// batched input tensor associated with a given invocation of the op. + /// container: Container to control resource sharing. + /// shared_name: Instances of Unbatch with the same container and shared_name are + /// assumed to possibly belong to the same batch. If left empty, the op name will + /// be used as the shared name. + /// + public static Tensor unbatch (Tensor batched_tensor, Tensor batch_index, Tensor id, int timeout_micros, string container = null, string shared_name = null, string name = "Unbatch") + { + var dict = new Dictionary(); + dict["batched_tensor"] = batched_tensor; + dict["batch_index"] = batch_index; + dict["id"] = id; + dict["timeout_micros"] = timeout_micros; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("Unbatch", name: name, keywords: dict); + return op.output; + } + + /// + /// A dataset that splits the elements of its input into multiple elements. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnbatchDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor unbatch_dataset (Tensor input_dataset, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "UnbatchDataset") + { + var dict = new Dictionary(); + dict["input_dataset"] = input_dataset; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("UnbatchDataset", name: name, keywords: dict); + return op.output; + } + + /// + /// Gradient of Unbatch. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnbatchGrad'. + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Acts like Batch but using the given batch_index index of batching things as they + /// become available. This ensures that the gradients are propagated back in the + /// same session which did the forward pass. + /// + /// original_input: The input to the Unbatch operation this is the gradient of. + /// batch_index: The batch_index given to the Unbatch operation this is the gradient + /// of. + /// grad: The downstream gradient. + /// id: The id scalar emitted by Batch. + /// batched_grad: The return value, either an empty tensor or the batched gradient. + /// container: Container to control resource sharing. + /// shared_name: Instances of UnbatchGrad with the same container and shared_name + /// are assumed to possibly belong to the same batch. If left empty, the op name + /// will be used as the shared name. + /// + public static Tensor unbatch_grad (Tensor original_input, Tensor batch_index, Tensor grad, Tensor id, string container = null, string shared_name = null, string name = "UnbatchGrad") + { + var dict = new Dictionary(); + dict["original_input"] = original_input; + dict["batch_index"] = batch_index; + dict["grad"] = grad; + dict["id"] = id; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("UnbatchGrad", name: name, keywords: dict); + return op.output; + } + + /// + /// Generates labels for candidate sampling with a uniform distribution. + /// + /// + /// A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UniformCandidateSampler'. + /// + /// + /// Optional argument + /// Number of true labels per context. + /// + /// + /// Optional argument + /// Number of candidates to randomly sample. + /// + /// + /// Optional argument + /// If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// + /// + /// Optional argument + /// The sampler will sample integers from the interval [0, range_max). + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// sampled_candidates : A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// true_expected_count : A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// sampled_expected_count : A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + public static (Tensor sampled_candidates, Tensor true_expected_count, Tensor sampled_expected_count) uniform_candidate_sampler (Tensor true_classes, int num_true, int num_sampled, bool unique, int range_max, int? seed = null, int? seed2 = null, string name = "UniformCandidateSampler") + { + var dict = new Dictionary(); + dict["true_classes"] = true_classes; + dict["num_true"] = num_true; + dict["num_sampled"] = num_sampled; + dict["unique"] = unique; + dict["range_max"] = range_max; + if (seed.HasValue) + dict["seed"] = seed.Value; + if (seed2.HasValue) + dict["seed2"] = seed2.Value; + var op = _op_def_lib._apply_op_helper("UniformCandidateSampler", name: name, keywords: dict); + int _idx = 0; + var sampled_candidates = op.outputs[_idx++]; + var true_expected_count = op.outputs[_idx++]; + var sampled_expected_count = op.outputs[_idx++]; + return (sampled_candidates, true_expected_count, sampled_expected_count); + } + + /// + /// Finds unique elements in a 1-D tensor. + /// + /// + /// 1-D. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Unique'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// y : 1-D. + /// idx : 1-D. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This operation returns a tensor y containing all of the unique elements of x + /// sorted in the same order that they occur in x. This operation also returns a + /// tensor idx the same size as x that contains the index of each value of x + /// in the unique output y. In other words: + /// + /// y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1] + /// + /// For example: + /// + /// + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx = unique(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// + /// + public static (Tensor y, Tensor idx) unique (Tensor x, TF_DataType? out_idx = null, string name = "Unique") + { + var dict = new Dictionary(); + dict["x"] = x; + if (out_idx.HasValue) + dict["out_idx"] = out_idx.Value; + var op = _op_def_lib._apply_op_helper("Unique", name: name, keywords: dict); + int _idx = 0; + var y = op.outputs[_idx++]; + var idx = op.outputs[_idx++]; + return (y, idx); + } + + /// + /// Finds unique elements along an axis of a tensor. + /// + /// + /// A Tensor. + /// + /// + /// A Tensor of type int32 (default: None). The axis of the Tensor to + /// find the unique elements. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UniqueV2'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// y : A Tensor. Unique elements along the axis of Tensor x. + /// idx : A 1-D Tensor. Has the same type as x that contains the index of each + /// value of x in the output y. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This operation either returns a tensor y containing unique elements + /// along the axis of a tensor. The returned unique elements is sorted + /// in the same order as they occur along axis in x. + /// This operation also returns a tensor idx that is the same size as + /// the number of the elements in x along the axis dimension. It + /// contains the index in the unique output y. + /// In other words, for an 1-D tensor x with axis = None: + /// + /// y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1] + /// + /// For example: + /// + /// + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx = unique(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// + /// + /// For an 2-D tensor x with axis = 0: + /// + /// + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx = unique(x, axis=0) + /// y ==> [[1, 0, 0], + /// [2, 0, 0]] + /// idx ==> [0, 0, 1] + /// + /// + /// For an 2-D tensor x with axis = 1: + /// + /// + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx = unique(x, axis=1) + /// y ==> [[1, 0], + /// [1, 0], + /// [2, 0]] + /// idx ==> [0, 1, 1] + /// + /// + public static (Tensor y, Tensor idx) unique_v2 (Tensor x, Tensor axis, TF_DataType? out_idx = null, string name = "UniqueV2") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["axis"] = axis; + if (out_idx.HasValue) + dict["out_idx"] = out_idx.Value; + var op = _op_def_lib._apply_op_helper("UniqueV2", name: name, keywords: dict); + int _idx = 0; + var y = op.outputs[_idx++]; + var idx = op.outputs[_idx++]; + return (y, idx); + } + + /// + /// Finds unique elements in a 1-D tensor. + /// + /// + /// 1-D. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UniqueWithCounts'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// y : 1-D. + /// idx : 1-D. + /// count : 1-D. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This operation returns a tensor y containing all of the unique elements of x + /// sorted in the same order that they occur in x. This operation also returns a + /// tensor idx the same size as x that contains the index of each value of x + /// in the unique output y. Finally, it returns a third tensor count that + /// contains the count of each element of y in x. In other words: + /// + /// y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1] + /// + /// For example: + /// + /// + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx, count = unique_with_counts(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// count ==> [2, 1, 3, 1, 2] + /// + /// + public static (Tensor y, Tensor idx, Tensor count) unique_with_counts (Tensor x, TF_DataType? out_idx = null, string name = "UniqueWithCounts") + { + var dict = new Dictionary(); + dict["x"] = x; + if (out_idx.HasValue) + dict["out_idx"] = out_idx.Value; + var op = _op_def_lib._apply_op_helper("UniqueWithCounts", name: name, keywords: dict); + int _idx = 0; + var y = op.outputs[_idx++]; + var idx = op.outputs[_idx++]; + var count = op.outputs[_idx++]; + return (y, idx, count); + } + + /// + /// Finds unique elements along an axis of a tensor. + /// + /// + /// A Tensor. + /// + /// + /// A Tensor of type int32 (default: None). The axis of the Tensor to + /// find the unique elements. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UniqueWithCountsV2'. + /// + /// + /// + /// + /// Returns a tuple with multiple values, as follows: + /// y : A Tensor. Unique elements along the axis of Tensor x. + /// idx : A 1-D Tensor. Has the same type as x that contains the index of each + /// value of x in the output y. + /// count : A 1-D Tensor. The count of each value of x in the output y. + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// This operation either returns a tensor y containing unique elements + /// along the axis of a tensor. The returned unique elements is sorted + /// in the same order as they occur along axis in x. + /// This operation also returns a tensor idx and a tensor count + /// that are the same size as the number of the elements in x along the + /// axis dimension. The idx contains the index in the unique output y + /// and the count contains the count in the unique output y. + /// In other words, for an 1-D tensor x with axis = None: + /// + /// y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1] + /// + /// For example: + /// + /// + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx, count = unique_with_counts(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// count ==> [2, 1, 3, 1, 2] + /// + /// + /// For an 2-D tensor x with axis = 0: + /// + /// + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx, count = unique_with_counts(x, axis=0) + /// y ==> [[1, 0, 0], + /// [2, 0, 0]] + /// idx ==> [0, 0, 1] + /// count ==> [2, 1] + /// + /// + /// For an 2-D tensor x with axis = 1: + /// + /// + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx, count = unique_with_counts(x, axis=1) + /// y ==> [[1, 0], + /// [1, 0], + /// [2, 0]] + /// idx ==> [0, 1, 1] + /// count ==> [1, 2] + /// + /// + public static (Tensor y, Tensor idx, Tensor count) unique_with_counts_v2 (Tensor x, Tensor axis, TF_DataType? out_idx = null, string name = "UniqueWithCountsV2") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["axis"] = axis; + if (out_idx.HasValue) + dict["out_idx"] = out_idx.Value; + var op = _op_def_lib._apply_op_helper("UniqueWithCountsV2", name: name, keywords: dict); + int _idx = 0; + var y = op.outputs[_idx++]; + var idx = op.outputs[_idx++]; + var count = op.outputs[_idx++]; + return (y, idx, count); + } + + /// + /// Unpacks a given dimension of a rank-R tensor into num rank-(R-1) tensors. + /// + /// + /// 1-D or higher, with axis dimension size equal to num. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Unpack'. + /// + /// + /// Optional argument + /// + /// + /// Dimension along which to unpack. Negative values wrap around, so the + /// valid range is [-R, R). + /// + /// + /// The list of tensors unpacked from value. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Unpacks num tensors from value by chipping it along the axis dimension. + /// For example, given a tensor of shape (A, B, C, D); + /// + /// If axis == 0 then the i'th tensor in output is the slice value[i, :, :, :] + /// and each tensor in output will have shape (B, C, D). (Note that the + /// dimension unpacked along is gone, unlike split). + /// + /// If axis == 1 then the i'th tensor in output is the slice value[:, i, :, :] + /// and each tensor in output will have shape (A, C, D). + /// Etc. + /// + /// This is the opposite of pack. + /// + public static Tensor[] unpack (Tensor value, int num, int? axis = null, string name = "Unpack") + { + var dict = new Dictionary(); + dict["value"] = value; + dict["num"] = num; + if (axis.HasValue) + dict["axis"] = axis.Value; + var op = _op_def_lib._apply_op_helper("Unpack", name: name, keywords: dict); + int _idx = 0; + var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray(); + return (output); + } + + /// + /// Converts a flat index or array of flat indices into a tuple of + /// + /// + /// An 0-D or 1-D int Tensor whose elements are indices into the + /// flattened version of an array of dimensions dims. + /// + /// + /// An 1-D int Tensor. The shape of the array to use for unraveling + /// indices. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnravelIndex'. + /// + /// + /// An 2-D (or 1-D if indices is 0-D) tensor where each row has the + /// same shape as the indices array. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// coordinate arrays. + /// + /// @compatibility(numpy) + /// Equivalent to np.unravel_index + /// @end_compatibility + /// + public static Tensor unravel_index (Tensor indices, Tensor dims, string name = "UnravelIndex") + { + var dict = new Dictionary(); + dict["indices"] = indices; + dict["dims"] = dims; + var op = _op_def_lib._apply_op_helper("UnravelIndex", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the maximum along segments of a tensor. + /// + /// + /// + /// + /// A tensor whose shape is a prefix of data.shape.END + /// } + /// out_arg { + /// name: "output" + /// description: <<END + /// Has same shape as data, except for the first segment_ids.rank + /// dimensions, which are replaced with a single dimension which has size + /// num_segments. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnsortedSegmentMax'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to the unsorted segment sum operator found + /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + /// Instead of computing the sum over segments, it computes the maximum such that: + /// + /// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples j... such + /// that segment_ids[j...] == i. + /// + /// If the maximum is empty for a given segment ID i, it outputs the smallest + /// possible value for the specific numeric type, + /// output[i] = numeric_limits<T>::lowest(). + /// + /// If the given segment ID i is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt> + /// </div> + /// + public static Tensor unsorted_segment_max (Tensor data, Tensor segment_ids, Tensor num_segments, string name = "UnsortedSegmentMax") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + dict["num_segments"] = num_segments; + var op = _op_def_lib._apply_op_helper("UnsortedSegmentMax", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the minimum along segments of a tensor. + /// + /// + /// + /// + /// A tensor whose shape is a prefix of data.shape. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnsortedSegmentMin'. + /// + /// + /// Has same shape as data, except for the first segment_ids.rank + /// dimensions, which are replaced with a single dimension which has size + /// num_segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to the unsorted segment sum operator found + /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + /// Instead of computing the sum over segments, it computes the minimum such that: + /// + /// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples j... such + /// that segment_ids[j...] == i. + /// + /// If the minimum is empty for a given segment ID i, it outputs the largest + /// possible value for the specific numeric type, + /// output[i] = numeric_limits<T>::max(). + /// + /// If the given segment ID i is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + public static Tensor unsorted_segment_min (Tensor data, Tensor segment_ids, Tensor num_segments, string name = "UnsortedSegmentMin") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + dict["num_segments"] = num_segments; + var op = _op_def_lib._apply_op_helper("UnsortedSegmentMin", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the product along segments of a tensor. + /// + /// + /// + /// + /// A tensor whose shape is a prefix of data.shape. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnsortedSegmentProd'. + /// + /// + /// Has same shape as data, except for the first segment_ids.rank + /// dimensions, which are replaced with a single dimension which has size + /// num_segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to the unsorted segment sum operator found + /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + /// Instead of computing the sum over segments, it computes the product of all + /// entries belonging to a segment such that: + /// + /// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples + /// j... such that segment_ids[j...] == i. + /// + /// If there is no entry for a given segment ID i, it outputs 1. + /// + /// If the given segment ID i is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + public static Tensor unsorted_segment_prod (Tensor data, Tensor segment_ids, Tensor num_segments, string name = "UnsortedSegmentProd") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + dict["num_segments"] = num_segments; + var op = _op_def_lib._apply_op_helper("UnsortedSegmentProd", name: name, keywords: dict); + return op.output; + } + + /// + /// Computes the sum along segments of a tensor. + /// + /// + /// + /// + /// A tensor whose shape is a prefix of data.shape. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnsortedSegmentSum'. + /// + /// + /// Has same shape as data, except for the first segment_ids.rank + /// dimensions, which are replaced with a single dimension which has size + /// num_segments. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples j... such + /// that segment_ids[j...] == i. Unlike SegmentSum, segment_ids + /// need not be sorted and need not cover all values in the full + /// range of valid values. + /// + /// If the sum is empty for a given segment ID i, output[i] = 0. + /// If the given segment ID i is negative, the value is dropped and will not be + /// added to the sum of the segment. + /// + /// num_segments should equal the number of distinct segment IDs. + /// + /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> + /// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt> + /// </div> + /// + public static Tensor unsorted_segment_sum (Tensor data, Tensor segment_ids, Tensor num_segments, string name = "UnsortedSegmentSum") + { + var dict = new Dictionary(); + dict["data"] = data; + dict["segment_ids"] = segment_ids; + dict["num_segments"] = num_segments; + var op = _op_def_lib._apply_op_helper("UnsortedSegmentSum", name: name, keywords: dict); + return op.output; + } + + /// + /// Op is similar to a lightweight Dequeue. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Unstage'. + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The basic functionality is similar to dequeue with many fewer + /// capabilities and options. This Op is optimized for performance. + /// + public static Tensor[] unstage (TF_DataType[] dtypes, int? capacity = null, int? memory_limit = null, string container = null, string shared_name = null, string name = "Unstage") + { + var dict = new Dictionary(); + dict["dtypes"] = dtypes; + if (capacity.HasValue) + dict["capacity"] = capacity.Value; + if (memory_limit.HasValue) + dict["memory_limit"] = memory_limit.Value; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("Unstage", name: name, keywords: dict); + int _idx = 0; + var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray(); + return (values); + } + + /// + /// Creates a handle to a Variable resource. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'VarHandleOp'. + /// + /// + /// Optional argument + /// the type of this variable. Must agree with the dtypes + /// of all ops using this variable. + /// + /// + /// Optional argument + /// The (possibly partially specified) shape of this variable. + /// + /// + /// the container this variable is placed in. + /// + /// + /// the name by which this variable is referred to. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor var_handle_op (TF_DataType dtype, TensorShape shape, string container = null, string shared_name = null, string name = "VarHandleOp") + { + var dict = new Dictionary(); + dict["dtype"] = dtype; + dict["shape"] = shape; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("VarHandleOp", name: name, keywords: dict); + return op.output; + } + + /// + /// Checks whether a resource handle-based variable has been initialized. + /// + /// + /// the input resource handle. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'VarIsInitializedOp'. + /// + /// + /// a scalar boolean which is true if the variable has been + /// initialized. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor var_is_initialized_op (Tensor resource, string name = "VarIsInitializedOp") + { + var dict = new Dictionary(); + dict["resource"] = resource; + var op = _op_def_lib._apply_op_helper("VarIsInitializedOp", name: name, keywords: dict); + return op.output; + } + + /// + /// Use VariableV2 instead. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Variable'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor variable (TensorShape shape, TF_DataType dtype, string container = null, string shared_name = null, string name = "Variable") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["dtype"] = dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("Variable", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns the shape of the variable pointed to by resource. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'VariableShape'. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns a 1-D integer tensor representing the shape of input. + /// + /// For example: + /// + /// + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// shape(t) ==> [2, 2, 3] + /// + /// + public static Tensor variable_shape (Tensor input, TF_DataType? out_type = null, string name = "VariableShape") + { + var dict = new Dictionary(); + dict["input"] = input; + if (out_type.HasValue) + dict["out_type"] = out_type.Value; + var op = _op_def_lib._apply_op_helper("VariableShape", name: name, keywords: dict); + return op.output; + } + + /// + /// Holds state in the form of a tensor that persists across steps. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'VariableV2'. + /// + /// + /// Optional argument + /// The shape of the variable tensor. + /// + /// + /// Optional argument + /// The type of elements in the variable tensor. + /// + /// + /// If non-empty, this variable is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this variable is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// A reference to the variable tensor. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Outputs a ref to the tensor state so it may be read or modified. + /// TODO(zhifengc/mrry): Adds a pointer to a more detail document + /// about sharing states in tensorflow. + /// + public static Tensor variable_v2 (TensorShape shape, TF_DataType dtype, string container = null, string shared_name = null, string name = "VariableV2") + { + var dict = new Dictionary(); + dict["shape"] = shape; + dict["dtype"] = dtype; + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("VariableV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Returns locations of nonzero / true values in a tensor. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Where'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// This operation returns the coordinates of true elements in condition. The + /// coordinates are returned in a 2-D tensor where the first dimension (rows) + /// represents the number of true elements, and the second dimension (columns) + /// represents the coordinates of the true elements. Keep in mind, the shape of + /// the output tensor can vary depending on how many true values there are in + /// condition. Indices are output in row-major order. + /// + /// For example: + /// + /// + /// # 'input' tensor is [[True, False] + /// # [True, False]] + /// # 'input' has two true values, so output has two coordinates. + /// # 'input' has rank of 2, so coordinates have two indices. + /// where(input) ==> [[0, 0], + /// [1, 0]] + /// + /// # condition tensor is [[[True, False] + /// # [True, False]] + /// # [[False, True] + /// # [False, True]] + /// # [[False, False] + /// # [False, True]]] + /// # 'input' has 5 true values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// # condition tensor is [[[1.5, 0.0] + /// # [-0.5, 0.0]] + /// # [[0.0, 0.25] + /// # [0.0, 0.75]] + /// # [[0.0, 0.0] + /// # [0.0, 0.01]]] + /// # 'input' has 5 nonzero values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// # condition tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] + /// # [0.0 + 0.5j, 0.0 + 0.0j]] + /// # [[0.0 + 0.0j, 0.25 + 1.5j] + /// # [0.0 + 0.0j, 0.75 + 0.0j]] + /// # [[0.0 + 0.0j, 0.0 + 0.0j] + /// # [0.0 + 0.0j, 0.01 + 0.0j]]] + /// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// + public static Tensor where (Tensor input, string name = "Where") + { + var dict = new Dictionary(); + dict["input"] = input; + var op = _op_def_lib._apply_op_helper("Where", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs the entire contents of a file as a value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'WholeFileReader'. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// To use, enqueue filenames in a Queue. The output of ReaderRead will + /// be a filename (key) and the contents of that file (value). + /// + public static Tensor whole_file_reader (string container = null, string shared_name = null, string name = "WholeFileReader") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("WholeFileReader", name: name, keywords: dict); + return op.output; + } + + /// + /// A Reader that outputs the entire contents of a file as a value. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'WholeFileReaderV2'. + /// + /// + /// If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// + /// + /// If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// + /// The handle to reference the Reader. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// To use, enqueue filenames in a Queue. The output of ReaderRead will + /// be a filename (key) and the contents of that file (value). + /// + public static Tensor whole_file_reader_v2 (string container = null, string shared_name = null, string name = "WholeFileReaderV2") + { + var dict = new Dictionary(); + if (container != null) + dict["container"] = container; + if (shared_name != null) + dict["shared_name"] = shared_name; + var op = _op_def_lib._apply_op_helper("WholeFileReaderV2", name: name, keywords: dict); + return op.output; + } + + /// + /// Worker heartbeat op. + /// + /// + /// A string tensor containing a serialized WorkerHeartbeatRequest + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'WorkerHeartbeat'. + /// + /// + /// A string tensor containing a serialized WorkerHeartbeatResponse + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Heartbeats may be sent periodically to indicate the coordinator is still active, + /// to retrieve the current worker status and to expedite shutdown when necessary. + /// + public static Tensor worker_heartbeat (Tensor request, string name = "WorkerHeartbeat") + { + var dict = new Dictionary(); + dict["request"] = request; + var op = _op_def_lib._apply_op_helper("WorkerHeartbeat", name: name, keywords: dict); + return op.output; + } + + /// + /// Writes contents to the file at input filename. Creates file and recursively + /// + /// + /// scalar. The name of the file to which we write the contents. + /// + /// + /// scalar. The content to be written to the output file. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'WriteFile'. + /// + /// + /// Returns the description of the operation + /// + /// + /// creates directory if not existing. + /// + public static Operation write_file (Tensor filename, Tensor contents, string name = "WriteFile") + { + var dict = new Dictionary(); + dict["filename"] = filename; + dict["contents"] = contents; + var op = _op_def_lib._apply_op_helper("WriteFile", name: name, keywords: dict); + return op; + } + + /// + /// Returns a tensor of zeros with the same shape and type as x. + /// + /// + /// a tensor of type T. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ZerosLike'. + /// + /// + /// a tensor of the same shape and type as x but filled with zeros. + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor zeros_like (Tensor x, string name = "ZerosLike") + { + var dict = new Dictionary(); + dict["x"] = x; + var op = _op_def_lib._apply_op_helper("ZerosLike", name: name, keywords: dict); + return op.output; + } + + /// + /// Compute the Hurwitz zeta function \\(\zeta(x, q)\\). + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Zeta'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// The Hurwitz zeta function is defined as: + /// + /// + /// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) + /// + public static Tensor zeta (Tensor x, Tensor q, string name = "Zeta") + { + var dict = new Dictionary(); + dict["x"] = x; + dict["q"] = q; + var op = _op_def_lib._apply_op_helper("Zeta", name: name, keywords: dict); + return op.output; + } + + /// + /// Creates a dataset that zips together input_datasets. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ZipDataset'. + /// + /// + /// Optional argument + /// + /// + /// Optional argument + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor zip_dataset (Tensor[] input_datasets, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "ZipDataset") + { + var dict = new Dictionary(); + dict["input_datasets"] = input_datasets; + dict["output_types"] = output_types; + dict["output_shapes"] = output_shapes; + var op = _op_def_lib._apply_op_helper("ZipDataset", name: name, keywords: dict); + return op.output; + } + } +}