/// <param name="use_bias">Boolean, whether the layer uses a bias vector.</param>
/// <param name="bias_initializer">Initializer for the bias vector.</param>
/// <param name="input_shape">N-D tensor with shape: (batch_size, ..., input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim).</param>
/// <param name="kernel_regularizer">Regularizer instance for the kernel matrix (callable).</param>
/// <param name="bias_regularizer">Regularizer instance for the bias (callable).</param>
/// <param name="activity_regularizer">Regularizer instance for the output (callable).</param>
/// <param name="kernel_constraint">Constraint function for the kernel matrix.</param>
/// <param name="bias_constraint">Constraint function for the bias.</param>
/// <returns>N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units).</returns>
/// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the
/// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer,
/// and bias is a bias vector created by the layer (only applicable if use_bias is True).
/// </summary>
/// <param name="units">Positive integer, dimensionality of the output space.</param>
/// <returns>N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units).</returns>
/// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the
/// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer,
/// and bias is a bias vector created by the layer (only applicable if use_bias is True).
/// </summary>
/// <param name="units">Positive integer, dimensionality of the output space.</param>
/// <param name="activation">Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x).</param>
/// <param name="input_shape">N-D tensor with shape: (batch_size, ..., input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim).</param>
/// <returns>N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units).</returns>
/// <param name="units">Python integer, dimensionality of the output space.</param>
/// <param name="activation"></param>
/// <param name="use_bias">Boolean, whether the layer uses a bias.</param>
/// <param name="kernel_initializer"></param>
/// <param name="bias_initializer"></param>
/// <param name="activation">Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x).</param>
/// <param name="kernel_initializer">Initializer for the kernel weights matrix.</param>
/// <param name="use_bias">Boolean, whether the layer uses a bias vector.</param>
/// <param name="bias_initializer">Initializer for the bias vector.</param>
/// <param name="kernel_regularizer">Regularizer instance for the kernel matrix (callable).</param>
/// <param name="bias_regularizer">Regularizer instance for the bias (callable).</param>
/// <param name="activity_regularizer">Regularizer instance for the output (callable).</param>
/// <param name="kernel_constraint">Constraint function for the kernel matrix.</param>
/// <param name="bias_constraint">Constraint function for the bias.</param>