| @@ -63,7 +63,7 @@ namespace Tensorflow | |||
| trainable: trainable, | |||
| name: name); | |||
| return layer.apply(inputs); | |||
| return layer.apply(inputs).Item1; | |||
| } | |||
| /// <summary> | |||
| @@ -117,7 +117,7 @@ namespace Tensorflow | |||
| trainable: trainable, | |||
| name: name); | |||
| return layer.apply(inputs, training: training); | |||
| return layer.apply(inputs, training: training).Item1; | |||
| } | |||
| /// <summary> | |||
| @@ -143,7 +143,7 @@ namespace Tensorflow | |||
| data_format: data_format, | |||
| name: name); | |||
| return layer.apply(inputs); | |||
| return layer.apply(inputs).Item1; | |||
| } | |||
| /// <summary> | |||
| @@ -179,7 +179,7 @@ namespace Tensorflow | |||
| kernel_initializer: kernel_initializer, | |||
| trainable: trainable); | |||
| return layer.apply(inputs); | |||
| return layer.apply(inputs).Item1; | |||
| } | |||
| /// <summary> | |||
| @@ -76,7 +76,7 @@ namespace Tensorflow | |||
| /// <param name="swap_memory"></param> | |||
| /// <param name="time_major"></param> | |||
| /// <returns>A pair (outputs, state)</returns> | |||
| public (Tensor, Tensor) dynamic_rnn(RNNCell cell, Tensor inputs, | |||
| public (Tensor, Tensor) dynamic_rnn(RnnCell cell, Tensor inputs, | |||
| Tensor sequence_length = null, TF_DataType dtype = TF_DataType.DtInvalid, | |||
| int? parallel_iterations = null, bool swap_memory = false, bool time_major = false) | |||
| => rnn.dynamic_rnn(cell, inputs, sequence_length: sequence_length, dtype: dtype, | |||
| @@ -18,6 +18,7 @@ using System.Collections.Generic; | |||
| using System.Diagnostics.CodeAnalysis; | |||
| using System.Linq; | |||
| using Tensorflow.Operations; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -262,15 +262,11 @@ namespace Tensorflow | |||
| if (string.IsNullOrEmpty(name)) | |||
| name = op_type; | |||
| // If a names ends with a '/' it is a "name scope" and we use it as-is, | |||
| // after removing the trailing '/'. | |||
| name = name.EndsWith("/") ? ops.name_from_scope_name(name) : unique_name(name); | |||
| var node_def = ops._NodeDef(op_type, name, device: "", attrs: attrs); | |||
| if (name.Contains("define_loss/bigger_box_loss/mul_13")) | |||
| { | |||
| } | |||
| var input_ops = inputs.Select(x => x.op).ToArray(); | |||
| var control_inputs = _control_dependencies_for_inputs(input_ops); | |||
| @@ -377,7 +373,11 @@ namespace Tensorflow | |||
| /// <returns>A string to be passed to `create_op()` that will be used | |||
| /// to name the operation being created.</returns> | |||
| public string unique_name(string name, bool mark_as_used = true) | |||
| { | |||
| { | |||
| if (name.EndsWith("basic_r_n_n_cell")) | |||
| { | |||
| } | |||
| if (!String.IsNullOrEmpty(_name_stack)) | |||
| name = _name_stack + "/" + name; | |||
| // For the sake of checking for names in use, we treat names as case | |||
| @@ -405,7 +405,7 @@ namespace Tensorflow | |||
| // Return the new name with the original capitalization of the given name. | |||
| name = $"{name}_{i-1}"; | |||
| } | |||
| } | |||
| return name; | |||
| } | |||
| @@ -16,6 +16,7 @@ | |||
| using System.Collections.Generic; | |||
| using Tensorflow.Operations; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -4,8 +4,8 @@ using System.Text; | |||
| namespace Tensorflow | |||
| { | |||
| public interface IPackable | |||
| public interface IPackable<T> | |||
| { | |||
| void Pack(object[] sequences); | |||
| T Pack(object[] sequences); | |||
| } | |||
| } | |||
| @@ -139,14 +139,14 @@ namespace Tensorflow.Keras.Layers | |||
| built = true; | |||
| } | |||
| protected override Tensor call(Tensor inputs, Tensor training = null) | |||
| protected override (Tensor, Tensor) call(Tensor inputs, Tensor training = null) | |||
| { | |||
| Tensor outputs = null; | |||
| if (fused) | |||
| { | |||
| outputs = _fused_batch_norm(inputs, training: training); | |||
| return outputs; | |||
| return (outputs, outputs); | |||
| } | |||
| throw new NotImplementedException("BatchNormalization call"); | |||
| @@ -108,7 +108,7 @@ namespace Tensorflow.Keras.Layers | |||
| built = true; | |||
| } | |||
| protected override Tensor call(Tensor inputs, Tensor training = null) | |||
| protected override (Tensor, Tensor) call(Tensor inputs, Tensor training = null) | |||
| { | |||
| var outputs = _convolution_op.__call__(inputs, kernel); | |||
| if (use_bias) | |||
| @@ -124,9 +124,9 @@ namespace Tensorflow.Keras.Layers | |||
| } | |||
| if (activation != null) | |||
| return activation.Activate(outputs); | |||
| outputs = activation.Activate(outputs); | |||
| return outputs; | |||
| return (outputs, outputs); | |||
| } | |||
| } | |||
| } | |||
| @@ -72,7 +72,7 @@ namespace Tensorflow.Keras.Layers | |||
| built = true; | |||
| } | |||
| protected override Tensor call(Tensor inputs, Tensor training = null) | |||
| protected override (Tensor, Tensor) call(Tensor inputs, Tensor training = null) | |||
| { | |||
| Tensor outputs = null; | |||
| var rank = inputs.rank; | |||
| @@ -88,9 +88,9 @@ namespace Tensorflow.Keras.Layers | |||
| if (use_bias) | |||
| outputs = tf.nn.bias_add(outputs, bias); | |||
| if (activation != null) | |||
| return activation.Activate(outputs); | |||
| outputs = activation.Activate(outputs); | |||
| return outputs; | |||
| return (outputs, outputs); | |||
| } | |||
| } | |||
| } | |||
| @@ -50,14 +50,14 @@ namespace Tensorflow.Keras.Layers | |||
| built = true; | |||
| } | |||
| protected override Tensor call(Tensor inputs, Tensor training = null) | |||
| protected override (Tensor, Tensor) call(Tensor inputs, Tensor training = null) | |||
| { | |||
| var dtype = inputs.dtype; | |||
| if (dtype != tf.int32 && dtype != tf.int64) | |||
| inputs = math_ops.cast(inputs, tf.int32); | |||
| var @out = embedding_ops.embedding_lookup(embeddings, inputs); | |||
| return @out; | |||
| return (@out, @out); | |||
| } | |||
| } | |||
| } | |||
| @@ -101,7 +101,7 @@ namespace Tensorflow.Keras.Layers | |||
| _inbound_nodes = new List<Node>(); | |||
| } | |||
| public Tensor __call__(Tensor[] inputs, | |||
| public (Tensor, Tensor) __call__(Tensor[] inputs, | |||
| Tensor training = null, | |||
| VariableScope scope = null) | |||
| { | |||
| @@ -139,14 +139,14 @@ namespace Tensorflow.Keras.Layers | |||
| // overridden). | |||
| _maybe_build(inputs[0]); | |||
| outputs = call(inputs[0], training: training); | |||
| (input, outputs) = call(inputs[0], training: training); | |||
| (input, outputs) = _set_connectivity_metadata_(input, outputs); | |||
| _handle_activity_regularization(inputs[0], outputs); | |||
| _set_mask_metadata(inputs[0], outputs, null); | |||
| }); | |||
| } | |||
| return outputs; | |||
| return (input, outputs); | |||
| } | |||
| private (Tensor, Tensor) _set_connectivity_metadata_(Tensor inputs, Tensor outputs) | |||
| @@ -173,9 +173,9 @@ namespace Tensorflow.Keras.Layers | |||
| return null; | |||
| } | |||
| protected virtual Tensor call(Tensor inputs, Tensor training = null) | |||
| protected virtual (Tensor, Tensor) call(Tensor inputs, Tensor training = null) | |||
| { | |||
| return inputs; | |||
| return (inputs, inputs); | |||
| } | |||
| protected virtual string _name_scope() | |||
| @@ -43,7 +43,7 @@ namespace Tensorflow.Keras.Layers | |||
| this.input_spec = new InputSpec(ndim: 4); | |||
| } | |||
| protected override Tensor call(Tensor inputs, Tensor training = null) | |||
| protected override (Tensor, Tensor) call(Tensor inputs, Tensor training = null) | |||
| { | |||
| int[] pool_shape; | |||
| if (data_format == "channels_last") | |||
| @@ -64,7 +64,7 @@ namespace Tensorflow.Keras.Layers | |||
| padding: padding.ToUpper(), | |||
| data_format: conv_utils.convert_data_format(data_format, 4)); | |||
| return outputs; | |||
| return (outputs, outputs); | |||
| } | |||
| } | |||
| } | |||
| @@ -47,12 +47,12 @@ namespace Tensorflow.Layers | |||
| _keras_style = false; | |||
| } | |||
| public virtual Tensor apply(Tensor inputs, Tensor training = null) | |||
| public virtual (Tensor, Tensor) apply(Tensor inputs, Tensor training = null) | |||
| { | |||
| return __call__(inputs, training: training); | |||
| } | |||
| public Tensor __call__(Tensor inputs, | |||
| public (Tensor, Tensor) __call__(Tensor inputs, | |||
| Tensor training = null, | |||
| VariableScope scope = null) | |||
| { | |||
| @@ -71,7 +71,7 @@ namespace Tensorflow.Layers | |||
| auxiliary_name_scope: false); | |||
| } | |||
| Tensor outputs = null; | |||
| (Tensor, Tensor) outputs = (null, null); | |||
| tf_with(scope_context_manager, scope2 => | |||
| { | |||
| _current_scope = scope2; | |||
| @@ -16,18 +16,23 @@ | |||
| using System; | |||
| using Tensorflow.Keras.Engine; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| public class BasicRNNCell : LayerRNNCell | |||
| public class BasicRnnCell : LayerRnnCell | |||
| { | |||
| int _num_units; | |||
| Func<Tensor, string, Tensor> _activation; | |||
| public override int state_size => _num_units; | |||
| public override int output_size => _num_units; | |||
| public VariableV1 _kernel; | |||
| string _WEIGHTS_VARIABLE_NAME = "kernel"; | |||
| public VariableV1 _bias; | |||
| string _BIAS_VARIABLE_NAME = "bias"; | |||
| public BasicRNNCell(int num_units, | |||
| public BasicRnnCell(int num_units, | |||
| Func<Tensor, string, Tensor> activation = null, | |||
| bool? reuse = null, | |||
| string name = null, | |||
| @@ -44,5 +49,29 @@ namespace Tensorflow | |||
| else | |||
| _activation = activation; | |||
| } | |||
| protected override void build(TensorShape inputs_shape) | |||
| { | |||
| var input_depth = inputs_shape.dims[inputs_shape.ndim - 1]; | |||
| _kernel = add_weight( | |||
| _WEIGHTS_VARIABLE_NAME, | |||
| shape: new[] { input_depth + _num_units, _num_units }); | |||
| _bias = add_weight( | |||
| _BIAS_VARIABLE_NAME, | |||
| shape: new[] { _num_units }, | |||
| initializer: tf.zeros_initializer); | |||
| built = true; | |||
| } | |||
| protected override (Tensor, Tensor) call(Tensor inputs, Tensor state = null) | |||
| { | |||
| // Most basic RNN: output = new_state = act(W * input + U * state + B). | |||
| var concat = array_ops.concat(new[] { inputs, state }, 1); | |||
| var gate_inputs = math_ops.matmul(concat, _kernel as RefVariable); | |||
| return (inputs, inputs); | |||
| } | |||
| } | |||
| } | |||
| @@ -136,27 +136,6 @@ namespace Tensorflow.Operations | |||
| graph._set_control_flow_context(this); | |||
| } | |||
| protected virtual Tensor _Enter(Tensor data, string frame_name, | |||
| bool is_constant = false, | |||
| int parallel_iterations = 10, | |||
| bool use_ref = true, | |||
| bool use_input_shape = true, | |||
| string name = null) | |||
| { | |||
| Tensor result; | |||
| data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref: true); | |||
| if (data.dtype.is_ref_dtype() && use_ref) | |||
| throw new NotImplementedException("_Enter"); | |||
| else | |||
| result = gen_control_flow_ops.enter( | |||
| data, frame_name, is_constant, parallel_iterations, name: name); | |||
| if (use_input_shape) | |||
| result.set_shape(data.TensorShape); | |||
| return result; | |||
| } | |||
| /// <summary> | |||
| /// Exit this control flow context. | |||
| /// </summary> | |||
| @@ -5,7 +5,7 @@ using System.Text; | |||
| namespace Tensorflow.Operations | |||
| { | |||
| internal class LoopVar<TItem> : ICanBeFlattened, IPackable | |||
| internal class LoopVar<TItem> : ICanBeFlattened, IPackable<LoopVar<TItem>> | |||
| { | |||
| public Tensor Counter { get; set; } | |||
| public TItem Item { get; set; } | |||
| @@ -26,11 +26,13 @@ namespace Tensorflow.Operations | |||
| return elements.ToArray(); | |||
| } | |||
| public void Pack(object[] sequences) | |||
| public LoopVar<TItem> Pack(object[] sequences) | |||
| { | |||
| Counter = sequences[0] as Tensor; | |||
| if (typeof(TItem).GetInterface(typeof(IPackable).Name) != null) | |||
| (Item as IPackable).Pack(sequences.Skip(1).ToArray()); | |||
| var counter = sequences[0] as Tensor; | |||
| var item = default(TItem); | |||
| if (typeof(TItem).GetInterface(typeof(IPackable<TItem>).Name) != null) | |||
| item = (Item as IPackable<TItem>).Pack(sequences.Skip(1).ToArray()); | |||
| return new LoopVar<TItem>(counter, item); | |||
| } | |||
| public static implicit operator (Tensor, TItem)(LoopVar<TItem> loopVar) | |||
| @@ -184,7 +184,7 @@ namespace Tensorflow.Operations | |||
| Tensor[] enter_vars = null; | |||
| tf_with(ops.control_dependencies(null), delegate | |||
| { | |||
| enter_vars = real_vars.Select(x => _Enter(x, | |||
| enter_vars = real_vars.Select(x => control_flow_ops._Enter(x, | |||
| _name, | |||
| is_constant: false, | |||
| parallel_iterations: _parallel_iterations, | |||
| @@ -294,6 +294,10 @@ namespace Tensorflow.Operations | |||
| } | |||
| } | |||
| /// <summary> | |||
| /// Makes the values known to this context. | |||
| /// </summary> | |||
| /// <param name="values"></param> | |||
| private void _InitializeValues(Tensor[] values) | |||
| { | |||
| _values = new HashSet<string>(); | |||
| @@ -303,8 +307,14 @@ namespace Tensorflow.Operations | |||
| protected override void _AddOpInternal(Operation op) | |||
| { | |||
| if(op.name == "rnn/while/basic_rnn_cell/MatMul" || | |||
| op.name == "rnn/while/TensorArrayReadV3") | |||
| { | |||
| } | |||
| Operation[] external_inputs = new Operation[0]; | |||
| if (op == null) | |||
| if (op.inputs.Length == 0) | |||
| { | |||
| throw new NotImplementedException(""); | |||
| } | |||
| @@ -374,6 +384,11 @@ namespace Tensorflow.Operations | |||
| _AddOpInternal(op); | |||
| } | |||
| /// <summary> | |||
| /// Add `val` to the current context and its outer context recursively. | |||
| /// </summary> | |||
| /// <param name="val"></param> | |||
| /// <returns></returns> | |||
| public override Tensor AddValue(Tensor val) | |||
| { | |||
| var result = val; | |||
| @@ -403,9 +418,9 @@ namespace Tensorflow.Operations | |||
| // Create an Enter to make `result` known to this loop context. | |||
| Tensor enter = null; | |||
| tf_with(ops.control_dependencies(new ITensorOrOperation[0]), delegate | |||
| tf_with(ops.control_dependencies(null), delegate | |||
| { | |||
| enter = _Enter( | |||
| enter = control_flow_ops._Enter( | |||
| result, | |||
| _name, | |||
| is_constant: true, | |||
| @@ -16,9 +16,9 @@ | |||
| namespace Tensorflow | |||
| { | |||
| public class LayerRNNCell : RNNCell | |||
| public class LayerRnnCell : RnnCell | |||
| { | |||
| public LayerRNNCell(bool? _reuse = null, | |||
| public LayerRnnCell(bool? _reuse = null, | |||
| string name = null, | |||
| TF_DataType dtype = TF_DataType.DtInvalid) : base(_reuse: _reuse, | |||
| name: name, | |||
| @@ -20,8 +20,8 @@ namespace Tensorflow.Operations | |||
| { | |||
| public class rnn_cell_impl | |||
| { | |||
| public BasicRNNCell BasicRNNCell(int num_units) | |||
| => new BasicRNNCell(num_units); | |||
| public BasicRnnCell BasicRNNCell(int num_units) | |||
| => new BasicRnnCell(num_units); | |||
| public static Tensor _concat(Tensor prefix, int suffix, bool @static = false) | |||
| { | |||
| @@ -228,6 +228,15 @@ namespace Tensorflow | |||
| output_types.AddRange(types); | |||
| } | |||
| // We add an explicit colocation constraint between | |||
| // the newly created op and any of its reference-typed inputs. | |||
| var must_colocate_inputs = zip(op_def.InputArg, inputs) | |||
| .Where(x => x.Item1.IsRef) | |||
| .Select(x => x.Item2) | |||
| .ToArray(); | |||
| _MaybeColocateWith(must_colocate_inputs); | |||
| // Add Op to graph | |||
| var op = g.create_op(op_type_name, | |||
| inputs.ToArray(), | |||
| @@ -241,6 +250,11 @@ namespace Tensorflow | |||
| }); | |||
| } | |||
| private void _MaybeColocateWith(ITensorOrOperation[] inputs) | |||
| { | |||
| } | |||
| private void SetAttrs(string op_type_name, | |||
| ArgDef input_arg, | |||
| OpDef op_def, | |||
| @@ -15,6 +15,7 @@ | |||
| ******************************************************************************/ | |||
| using Tensorflow.Operations; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -44,14 +44,14 @@ namespace Tensorflow | |||
| [JsonIgnore] | |||
| #endif | |||
| public int NumInputs => c_api.TF_OperationNumInputs(_handle); | |||
| private TF_DataType[] _input_types => _inputs._inputs.Select(x => x.dtype).ToArray(); | |||
| private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray(); | |||
| private InputList _inputs; | |||
| private InputList _inputs_val; | |||
| public InputList inputs | |||
| { | |||
| get | |||
| { | |||
| if (_inputs == null) | |||
| if (_inputs_val == null) | |||
| { | |||
| var retval = new Tensor[NumInputs]; | |||
| @@ -62,10 +62,10 @@ namespace Tensorflow | |||
| retval[i] = op.outputs[tf_output.index]; | |||
| } | |||
| _inputs = new InputList(retval); | |||
| _inputs_val = new InputList(retval); | |||
| } | |||
| return _inputs; | |||
| return _inputs_val; | |||
| } | |||
| } | |||
| @@ -175,8 +175,8 @@ namespace Tensorflow | |||
| // Dict mapping op name to file and line information for op colocation | |||
| // context managers. | |||
| _control_flow_context = graph._get_control_flow_context(); | |||
| _control_flow_context = graph._get_control_flow_context(); | |||
| // This will be set by self.inputs. | |||
| if (op_def == null) | |||
| op_def = g.GetOpDef(node_def.Op); | |||
| @@ -305,7 +305,7 @@ namespace Tensorflow | |||
| var output = tensor._as_tf_output(); | |||
| // Reset cached inputs. | |||
| _inputs = null; | |||
| _inputs_val = null; | |||
| // after the c_api call next time _inputs is accessed | |||
| // the updated inputs are reloaded from the c_api | |||
| lock (Locks.ProcessWide) | |||
| @@ -675,5 +675,36 @@ namespace Tensorflow | |||
| throw new NotImplementedException("while_loop"); | |||
| } | |||
| /// <summary> | |||
| /// Creates or finds a child frame, and makes `data` available to it. | |||
| /// </summary> | |||
| /// <param name="data"></param> | |||
| /// <param name="frame_name"></param> | |||
| /// <param name="is_constant"></param> | |||
| /// <param name="parallel_iterations"></param> | |||
| /// <param name="use_ref"></param> | |||
| /// <param name="use_input_shape"></param> | |||
| /// <param name="name"></param> | |||
| /// <returns></returns> | |||
| public static Tensor _Enter(Tensor data, string frame_name, | |||
| bool is_constant = false, | |||
| int parallel_iterations = 10, | |||
| bool use_ref = true, | |||
| bool use_input_shape = true, | |||
| string name = null) | |||
| { | |||
| Tensor result; | |||
| data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref: true); | |||
| if (data.dtype.is_ref_dtype() && use_ref) | |||
| throw new NotImplementedException("_Enter"); | |||
| else | |||
| result = gen_control_flow_ops.enter( | |||
| data, frame_name, is_constant, parallel_iterations, name: name); | |||
| if (use_input_shape) | |||
| result.set_shape(data.TensorShape); | |||
| return result; | |||
| } | |||
| } | |||
| } | |||
| @@ -568,7 +568,7 @@ namespace Tensorflow | |||
| { | |||
| var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); | |||
| return _op.outputs[0]; | |||
| return _op.output; | |||
| } | |||
| /// <summary> | |||
| @@ -543,6 +543,23 @@ namespace Tensorflow | |||
| public static Tensor maximum<Tx, Ty>(Tx x, Ty y, string name = null) | |||
| => gen_math_ops.maximum(x, y, name: name); | |||
| /// <summary> | |||
| /// Multiplies matrix `a` by matrix `b`, producing `a` * `b`. | |||
| /// </summary> | |||
| /// <param name="a"></param> | |||
| /// <param name="b"></param> | |||
| /// <param name="transpose_a">If `True`, `a` is transposed before multiplication.</param> | |||
| /// <param name="transpose_b">If `True`, `b` is transposed before multiplication.</param> | |||
| /// <param name="adjoint_a">If `True`, `a` is conjugated and transposed before multiplication.</param> | |||
| /// <param name="adjoint_b">If `True`, `b` is conjugated and transposed before multiplication.</param> | |||
| /// <param name="a_is_sparse">If `True`, `a` is treated as a sparse matrix.</param> | |||
| /// <param name="b_is_sparse">If `True`, `b` is treated as a sparse matrix.</param> | |||
| /// <param name="name">Name for the operation (optional).</param> | |||
| /// <returns> | |||
| /// A `Tensor` of the same type as `a` and `b` where each inner-most matrix is | |||
| /// the product of the corresponding matrices in `a` and `b`, e.g. if all | |||
| /// transpose or adjoint attributes are `False`: | |||
| /// </returns> | |||
| public static Tensor matmul(Tensor a, Tensor b, | |||
| bool transpose_a = false, bool transpose_b = false, | |||
| bool adjoint_a = false, bool adjoint_b = false, | |||
| @@ -402,11 +402,8 @@ namespace Tensorflow.Util | |||
| private static int len(IEnumerable<object> x) => x.Count(); | |||
| public static T pack_sequence_as2<T>(T structure, object[] flat_sequence, bool expand_composites = false) | |||
| where T : IPackable | |||
| { | |||
| structure.Pack(flat_sequence); | |||
| return structure; | |||
| } | |||
| where T : IPackable<T> | |||
| => structure.Pack(flat_sequence); | |||
| /// <summary> | |||
| /// Returns a given flattened sequence packed into a given structure. | |||
| @@ -508,6 +508,8 @@ namespace Tensorflow | |||
| return null; | |||
| case TensorShape ts: | |||
| return constant_op.constant(ts.dims, dtype: dtype, name: name); | |||
| case int[] dims: | |||
| return constant_op.constant(dims, dtype: dtype, name: name); | |||
| case object[] objects: | |||
| return array_ops._autopacking_conversion_function(objects, dtype: dtype, name: name); | |||
| default: | |||
| @@ -45,7 +45,10 @@ namespace Tensorflow | |||
| public void __enter__() | |||
| { | |||
| _name = _name ?? _default_name; | |||
| if (_name.EndsWith("basic_r_n_n_cell")) | |||
| { | |||
| } | |||
| Graph g = null; | |||
| if (_values is List<Tensor> vList) | |||