| @@ -0,0 +1,124 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow.Gradients; | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.tensorflow; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerRunner | |||
| { | |||
| bool RecordGradient(string op_name, | |||
| Tensor[] inputs, | |||
| object[] attrs, | |||
| Tensor[] results) | |||
| { | |||
| var input_ids = MakeTensorIDList(inputs); | |||
| var input_dtypes = MakeTensorDtypeList(inputs); | |||
| bool should_record = false; | |||
| foreach (var tape in tf.GetTapeSet()) | |||
| { | |||
| if(tape.ShouldRecord(input_ids, input_dtypes)) | |||
| { | |||
| should_record = true; | |||
| break; | |||
| } | |||
| } | |||
| if (!should_record) | |||
| { | |||
| /*for (TFE_Py_ForwardAccumulator* accumulator : SafeAccumulatorSet()) | |||
| { | |||
| if (accumulator->accumulator->ShouldRecord(input_ids, input_dtypes)) | |||
| { | |||
| should_record = true; | |||
| break; | |||
| } | |||
| }*/ | |||
| } | |||
| if (!should_record) return should_record; | |||
| Tensor[] op_outputs; | |||
| bool op_outputs_tuple_created = false; | |||
| var unused_output_indices = gradient_exclustions.OpGradientUnusedOutputIndices(op_name); | |||
| if (unused_output_indices != null) | |||
| { | |||
| if (unused_output_indices.Length == 0) | |||
| op_outputs = new Tensor[0]; | |||
| else | |||
| { | |||
| op_outputs_tuple_created = true; | |||
| // op_outputs = CopySequenceSettingIndicesToNull(results, *unused_output_indices); | |||
| } | |||
| } | |||
| else | |||
| op_outputs = results; | |||
| Tensor[] op_inputs; | |||
| bool op_inputs_tuple_created = false; | |||
| var unused_input_indices = gradient_exclustions.OpGradientUnusedInputIndices(op_name); | |||
| if(unused_input_indices != null) | |||
| { | |||
| if (unused_input_indices.Length == 0) | |||
| op_inputs = new Tensor[0]; | |||
| else | |||
| { | |||
| op_inputs_tuple_created = true; | |||
| // op_inputs = CopySequenceSettingIndicesToNull(inputs, *unused_input_indices); | |||
| } | |||
| } | |||
| else | |||
| op_inputs = inputs; | |||
| TapeSetRecordOperation(op_name, inputs, results, input_ids, input_dtypes, | |||
| () => GetGradientFunction(op_name, inputs, attrs, results)); | |||
| return true; | |||
| } | |||
| BackwardFunction GetGradientFunction(string op_name, | |||
| Tensor[] op_inputs, | |||
| object[] attrs, | |||
| Tensor[] op_outputs) | |||
| => (output_grads, unneeded_gradients) => | |||
| { | |||
| var gradients = ops.gradientFunctions[op_name](new EagerOperation | |||
| { | |||
| Name = op_name, | |||
| NumInputs = op_inputs.Length, | |||
| Inputs = op_inputs, | |||
| NumOutputs = op_outputs.Length, | |||
| Outputs = op_outputs, | |||
| SkipInputIndices = unneeded_gradients, | |||
| Attrs = attrs | |||
| }, output_grads); | |||
| return gradients; | |||
| }; | |||
| bool CouldForwardprop() | |||
| { | |||
| return HasAccumulator(); | |||
| } | |||
| bool CouldBackprop() | |||
| { | |||
| return HasGradientTape(); | |||
| } | |||
| long[] MakeTensorIDList(Tensor[] tensors) | |||
| { | |||
| return tensors.Select(x => x.Id).ToArray(); | |||
| } | |||
| TF_DataType[] MakeTensorDtypeList(Tensor[] tensors) | |||
| { | |||
| return tensors.Select(x => x.dtype).ToArray(); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,32 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerRunner | |||
| { | |||
| bool RunCallbacks(FastPathOpExecInfo op_exec_info, | |||
| int num_inferred_attrs, | |||
| Tensor[] inputs, | |||
| object[] attrs, | |||
| Tensor[] flattened_result) | |||
| { | |||
| if (op_exec_info.run_gradient_callback) | |||
| { | |||
| if (!RecordGradient(op_exec_info.op_name, inputs, attrs, | |||
| flattened_result)) | |||
| { | |||
| return false; | |||
| } | |||
| } | |||
| if (op_exec_info.run_post_exec_callbacks) | |||
| { | |||
| } | |||
| return true; | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,61 @@ | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| /// <summary> | |||
| /// python\eager\pywrap_tfe_src.cc | |||
| /// </summary> | |||
| public partial class EagerRunner | |||
| { | |||
| public Tensor[] TFE_Execute(Context ctx, | |||
| string device_name, | |||
| string op_name, | |||
| Tensor[] inputs, | |||
| object[] attrs, | |||
| int num_outputs) | |||
| => TFE_ExecuteCancelable(ctx, device_name, op_name, inputs, attrs, num_outputs); | |||
| public Tensor[] TFE_ExecuteCancelable(Context ctx, | |||
| string device_name, | |||
| string op_name, | |||
| Tensor[] inputs, | |||
| object[] attrs, | |||
| int num_outputs) | |||
| { | |||
| var status = tf.status; | |||
| var op = GetOp(ctx, op_name, status); | |||
| status.Check(true); | |||
| c_api.TFE_OpSetDevice(op, device_name, status.Handle); | |||
| if (status.ok()) | |||
| { | |||
| for (int i = 0; i < inputs.Length; ++i) | |||
| { | |||
| IntPtr tensor_handle; | |||
| switch (inputs[i]) | |||
| { | |||
| case EagerTensor et: | |||
| tensor_handle = et.EagerTensorHandle; | |||
| break; | |||
| default: | |||
| tensor_handle = c_api.TFE_NewTensorHandle(inputs[i], status.Handle); | |||
| break; | |||
| } | |||
| c_api.TFE_OpAddInput(op, tensor_handle, status.Handle); | |||
| } | |||
| } | |||
| if (status.ok()) | |||
| SetOpAttrs(op, attrs, status.Handle); | |||
| var outputs = new IntPtr[num_outputs]; | |||
| if (status.ok()) | |||
| { | |||
| c_api.TFE_Execute(op, outputs, ref num_outputs, status.Handle); | |||
| status.Check(true); | |||
| } | |||
| return outputs.Select(x => new EagerTensor(x)).ToArray(); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,321 @@ | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System; | |||
| using static Tensorflow.OpDef.Types; | |||
| using static Tensorflow.Binding; | |||
| using Google.Protobuf.WellKnownTypes; | |||
| using System.Threading; | |||
| using Tensorflow.Util; | |||
| using System.Runtime.InteropServices.ComTypes; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| /// <summary> | |||
| /// python\eager\pywrap_tfe_src.cc | |||
| /// </summary> | |||
| public partial class EagerRunner | |||
| { | |||
| int kFastPathExecuteInputStartIndex = 0; | |||
| public Tensor[] TFE_FastPathExecute(Context ctx, | |||
| string device_name, | |||
| string opName, | |||
| string name, | |||
| Action callbacks, | |||
| params object[] args) | |||
| { | |||
| if (ctx == null) | |||
| throw new ValueError("This function does not handle the case of the path where " + | |||
| "all inputs are not already EagerTensors."); | |||
| int args_size = args.Length; | |||
| var attr_list_sizes = new Dictionary<string, long>(); | |||
| FastPathOpExecInfo op_exec_info = new FastPathOpExecInfo() | |||
| { | |||
| ctx = ctx, | |||
| args = args, | |||
| device_name = device_name, | |||
| op_name = opName, | |||
| name = name, | |||
| }; | |||
| op_exec_info.run_gradient_callback = HasAccumulatorOrTape(); | |||
| op_exec_info.run_post_exec_callbacks = callbacks != null; | |||
| op_exec_info.run_callbacks = op_exec_info.run_gradient_callback || op_exec_info.run_post_exec_callbacks; | |||
| var status = tf.status; | |||
| var op = GetOp(ctx, opName, status); | |||
| var op_def = tf.get_default_graph().GetOpDef(opName); | |||
| // Set non-inferred attrs, including setting defaults if the attr is passed in | |||
| // as None. | |||
| for (int i = kFastPathExecuteInputStartIndex + op_def.InputArg.Count; i < args_size; i += 2) | |||
| { | |||
| var attr_name = args[i].ToString(); | |||
| var attr_value = args[i + 1]; | |||
| var attr = op_def.Attr.FirstOrDefault(x => x.Name == attr_name); | |||
| if(attr != null) | |||
| { | |||
| SetOpAttrWithDefaults(ctx, op, attr, attr_name, attr_value, attr_list_sizes, status); | |||
| status.Check(true); | |||
| } | |||
| } | |||
| var flattened_inputs = args.Take(op_def.InputArg.Count) | |||
| .Select(x => x as Tensor) | |||
| .ToArray(); | |||
| var flattened_attrs = args.Skip(op_def.InputArg.Count).ToArray(); | |||
| c_api.TFE_OpSetDevice(op, device_name, status.Handle); | |||
| status.Check(true); | |||
| // Add inferred attrs and inputs. | |||
| for (int i = 0; i < op_def.InputArg.Count; i++) | |||
| { | |||
| var input_arg = op_def.InputArg[i]; | |||
| if (!string.IsNullOrEmpty(input_arg.NumberAttr)) | |||
| { | |||
| int len = (args[kFastPathExecuteInputStartIndex + i] as object[]).Length; | |||
| c_api.TFE_OpSetAttrInt(op, input_arg.NumberAttr, len); | |||
| attr_list_sizes[input_arg.NumberAttr] = len; | |||
| if (len > 0) | |||
| { | |||
| var fast_input_array = (object[])args[i]; | |||
| // First item adds the type attr. | |||
| if (!AddInputToOp(fast_input_array[i], true, input_arg, op, status)) | |||
| return null; | |||
| for (var j = 1; j < len; j++) | |||
| { | |||
| // Since the list is homogeneous, we don't need to re-add the attr. | |||
| if (!AddInputToOp(fast_input_array[j], false, input_arg, op, status)) | |||
| return null; | |||
| } | |||
| } | |||
| } | |||
| else if (!string.IsNullOrEmpty(input_arg.TypeListAttr)) | |||
| { | |||
| } | |||
| else | |||
| { | |||
| // The item is a single item. | |||
| AddInputToOp(args[i], true, input_arg, op, status); | |||
| } | |||
| } | |||
| int num_retvals = 0; | |||
| for (int i = 0; i < op_def.OutputArg.Count; i++) | |||
| { | |||
| var output_arg = op_def.OutputArg[i]; | |||
| var delta = 1L; | |||
| if (!string.IsNullOrEmpty(output_arg.NumberAttr)) | |||
| delta = attr_list_sizes[output_arg.NumberAttr]; | |||
| else if (!string.IsNullOrEmpty(output_arg.TypeListAttr)) | |||
| delta = attr_list_sizes[output_arg.TypeListAttr]; | |||
| if (delta < 0) | |||
| throw new RuntimeError("Attributes suggest that the size of an output list is less than 0"); | |||
| num_retvals += (int)delta; | |||
| } | |||
| var retVals = new IntPtr[num_retvals]; | |||
| c_api.TFE_Execute(op, retVals, ref num_retvals, status.Handle); | |||
| status.Check(true); | |||
| var flat_result = retVals.Select(x => new EagerTensor(x)).ToArray(); | |||
| if (op_exec_info.run_callbacks) | |||
| { | |||
| if (!RunCallbacks( | |||
| op_exec_info, | |||
| kFastPathExecuteInputStartIndex + op_def.InputArg.Count(), | |||
| flattened_inputs, flattened_attrs, flat_result)) | |||
| { | |||
| return null; | |||
| } | |||
| } | |||
| return flat_result; | |||
| } | |||
| TFE_Op GetOp(Context ctx, string op_or_function_name, Status status) | |||
| { | |||
| if (thread_local_eager_operation_map.find(ctx, out var op)) | |||
| c_api.TFE_OpReset(op, op_or_function_name, ctx.device_name, status.Handle); | |||
| else | |||
| { | |||
| op = c_api.TFE_NewOp(ctx.Handle, op_or_function_name, status.Handle); | |||
| thread_local_eager_operation_map[ctx] = op; | |||
| } | |||
| status.Check(true); | |||
| return op; | |||
| } | |||
| static UnorderedMap<Context, TFE_Op> thread_local_eager_operation_map = new UnorderedMap<Context, TFE_Op>(); | |||
| bool HasAccumulator() | |||
| { | |||
| //return !GetAccumulatorSet()->empty(); | |||
| return false; | |||
| } | |||
| bool HasGradientTape() | |||
| { | |||
| return tf.GetTapeSet().Count > 0; | |||
| } | |||
| bool HasAccumulatorOrTape() | |||
| { | |||
| return HasGradientTape() || HasAccumulator(); | |||
| } | |||
| /// <summary> | |||
| /// Adds input and type attr to the op, and to the list of flattened | |||
| /// inputs/attrs. | |||
| /// </summary> | |||
| /// <param name="inputs"></param> | |||
| /// <param name="add_type_attr"></param> | |||
| /// <param name="input_arg"></param> | |||
| /// <param name="op"></param> | |||
| /// <param name="status"></param> | |||
| /// <returns></returns> | |||
| bool AddInputToOp(object inputs, | |||
| bool add_type_attr, | |||
| ArgDef input_arg, | |||
| IntPtr op, | |||
| Status status) | |||
| { | |||
| IntPtr input_handle; | |||
| // ConvertToTensor(); | |||
| switch (inputs) | |||
| { | |||
| case EagerTensor input: | |||
| input_handle = input.EagerTensorHandle; | |||
| break; | |||
| case EagerTensor[] input_list: | |||
| input_handle = input_list[0].EagerTensorHandle; | |||
| break; | |||
| default: | |||
| var tensor = tf.convert_to_tensor(inputs); | |||
| input_handle = (tensor as EagerTensor).EagerTensorHandle; | |||
| break; | |||
| } | |||
| if (add_type_attr && !string.IsNullOrEmpty(input_arg.TypeAttr)) | |||
| { | |||
| var dtype = c_api.TFE_TensorHandleDataType(input_handle); | |||
| c_api.TFE_OpSetAttrType(op, input_arg.TypeAttr, dtype); | |||
| } | |||
| c_api.TFE_OpAddInput(op, input_handle, status.Handle); | |||
| status.Check(true); | |||
| return true; | |||
| } | |||
| public void SetOpAttrs(TFE_Op op, params object[] attrs) | |||
| { | |||
| var status = tf.status; | |||
| var len = attrs.Length; | |||
| for (int i = 0; i < len; i += 2) | |||
| { | |||
| var key = attrs[i].ToString(); | |||
| var value = attrs[i + 1]; | |||
| byte is_list = 0; | |||
| var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, status.Handle); | |||
| if (!status.ok()) return; | |||
| if (is_list != 0) | |||
| SetOpAttrList(tf.context, op, key, value, type, null, status); | |||
| else | |||
| SetOpAttrScalar(tf.context, op, key, value, type, null, status); | |||
| status.Check(true); | |||
| } | |||
| } | |||
| /// <summary> | |||
| /// This function will set the op attrs required. If an attr has the value of | |||
| /// None, then it will read the AttrDef to get the default value and set that | |||
| /// instead. Any failure in this function will simply fall back to the slow | |||
| /// path. | |||
| /// </summary> | |||
| /// <param name="ctx"></param> | |||
| /// <param name="op"></param> | |||
| /// <param name="attr"></param> | |||
| /// <param name="attr_name"></param> | |||
| /// <param name="attr_value"></param> | |||
| /// <param name="attr_list_sizes"></param> | |||
| /// <param name="status"></param> | |||
| void SetOpAttrWithDefaults(Context ctx, IntPtr op, AttrDef attr, | |||
| string attr_name, object attr_value, | |||
| Dictionary<string, long> attr_list_sizes, | |||
| Status status) | |||
| { | |||
| byte is_list = 0; | |||
| var type = c_api.TFE_OpGetAttrType(op, attr_name, ref is_list, status.Handle); | |||
| if (status.Code != TF_Code.TF_OK) return; | |||
| if(attr_value == null) | |||
| { | |||
| if (is_list != 0) | |||
| ; | |||
| //SetOpAttrListDefault | |||
| else | |||
| ; | |||
| //SetOpAttrScalarDefault | |||
| } | |||
| else | |||
| { | |||
| if (is_list != 0) | |||
| ;// SetOpAttrList | |||
| else | |||
| SetOpAttrScalar(ctx, op, attr_name, attr_value, type, attr_list_sizes, status); | |||
| } | |||
| } | |||
| bool SetOpAttrList(Context ctx, IntPtr op, | |||
| string key, object value, TF_AttrType type, | |||
| Dictionary<string, long> attr_list_sizes, | |||
| Status status) | |||
| { | |||
| return false; | |||
| } | |||
| bool SetOpAttrScalar(Context ctx, IntPtr op, | |||
| string key, object value, TF_AttrType type, | |||
| Dictionary<string, long> attr_list_sizes, | |||
| Status status) | |||
| { | |||
| switch(type) | |||
| { | |||
| case TF_AttrType.TF_ATTR_STRING: | |||
| c_api.TFE_OpSetAttrString(op, key, value.ToString(), (uint)value.ToString().Length); | |||
| break; | |||
| case TF_AttrType.TF_ATTR_TYPE: | |||
| c_api.TFE_OpSetAttrType(op, key, (TF_DataType)value); | |||
| break; | |||
| case TF_AttrType.TF_ATTR_BOOL: | |||
| c_api.TFE_OpSetAttrBool(op, key, Convert.ToBoolean(value)); | |||
| break; | |||
| case TF_AttrType.TF_ATTR_INT: | |||
| c_api.TFE_OpSetAttrInt(op, key, Convert.ToInt64(value)); | |||
| break; | |||
| case TF_AttrType.TF_ATTR_SHAPE: | |||
| var dims = (value as int[]).Select(x => (long)x).ToArray(); | |||
| c_api.TFE_OpSetAttrShape(op, key, dims, dims.Length, status.Handle); | |||
| status.Check(true); | |||
| break; | |||
| default: | |||
| throw new NotImplementedException($"SetOpAttrScalar for {type}"); | |||
| } | |||
| return true; | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,53 @@ | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System; | |||
| using static Tensorflow.OpDef.Types; | |||
| using Tensorflow.Gradients; | |||
| using Tensorflow.Util; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| /// <summary> | |||
| /// python\eager\pywrap_tfe_src.cc | |||
| /// </summary> | |||
| public partial class EagerRunner | |||
| { | |||
| public Tensor[] TFE_TapeGradient(ITape tape, | |||
| Tensor[] target, | |||
| Tensor[] sources, | |||
| Tensor[] output_gradients) | |||
| { | |||
| var target_vec = MakeTensorIDList(target); | |||
| var sources_vec = MakeTensorIDList(sources); | |||
| var sources_set = sources_vec; | |||
| var seq_array = target; | |||
| var source_tensors_that_are_targets = new UnorderedMap<long, TapeTensor>(); | |||
| for (int i = 0; i < target.Length; ++i) | |||
| { | |||
| var target_id = target_vec[i]; | |||
| var tensor = seq_array[i]; | |||
| source_tensors_that_are_targets.Add(target_id, TapeTensorFromTensor(tensor)); | |||
| } | |||
| if(output_gradients != null) | |||
| { | |||
| throw new NotImplementedException(""); | |||
| } | |||
| else | |||
| { | |||
| output_gradients = new Tensor[0]; | |||
| } | |||
| var outgrad_vec = MakeTensorList(output_gradients); | |||
| return tape.ComputeGradient(target_vec, sources_vec, source_tensors_that_are_targets, outgrad_vec); | |||
| } | |||
| Tensor[] MakeTensorList(Tensor[] tensors) | |||
| { | |||
| return tensors; | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,32 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow.Gradients; | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.tensorflow; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerRunner | |||
| { | |||
| void TapeSetRecordBackprop(string op_type, | |||
| Tensor[] input_tensors, | |||
| TapeTensor[] output_tensors, | |||
| long[] input_ids, | |||
| TF_DataType[] input_dtypes, | |||
| Func<BackwardFunction> backward_function_getter) | |||
| { | |||
| if (!CouldBackprop()) | |||
| { | |||
| return; | |||
| } | |||
| foreach(var tape in tf.GetTapeSet()) | |||
| { | |||
| tape.RecordOperation(op_type, input_tensors, output_tensors, | |||
| input_ids, input_dtypes, | |||
| backward_function_getter); | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,26 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow.Gradients; | |||
| using static Tensorflow.tensorflow; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerRunner | |||
| { | |||
| bool TapeSetRecordForwardprop(string op_type, | |||
| Tensor[] input_tensors, | |||
| TapeTensor[] output_tensors, | |||
| long[] input_ids, | |||
| TF_DataType[] input_dtypes, | |||
| Func<BackwardFunction> backward_function_getter) | |||
| { | |||
| if (!CouldForwardprop()) | |||
| { | |||
| return true; | |||
| } | |||
| throw new NotImplementedException(""); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,34 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow.Gradients; | |||
| using static Tensorflow.tensorflow; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerRunner | |||
| { | |||
| bool TapeSetRecordOperation(string op_type, | |||
| Tensor[] input_tensors, | |||
| Tensor[] output_tensors, | |||
| long[] input_ids, | |||
| TF_DataType[] input_dtypes, | |||
| Func<BackwardFunction> backward_function_getter) | |||
| { | |||
| var output_info = new List<TapeTensor>(); | |||
| if (!TapeTensorsFromTensorSequence(output_tensors, output_info)) | |||
| return false; | |||
| if (!TapeSetRecordForwardprop(op_type, input_tensors, output_info.ToArray(), | |||
| input_ids, input_dtypes, backward_function_getter)) | |||
| return false; | |||
| TapeSetRecordBackprop(op_type, input_tensors, output_info.ToArray(), | |||
| input_ids, input_dtypes, backward_function_getter); | |||
| return true; | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,16 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow.Gradients; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerRunner | |||
| { | |||
| TapeTensor TapeTensorFromTensor(Tensor tensor) | |||
| { | |||
| return new TapeTensor(tensor.Id, tensor.dtype, tensor.shape); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,21 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Linq; | |||
| using System.Text; | |||
| using Tensorflow.Gradients; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerRunner | |||
| { | |||
| bool TapeTensorsFromTensorSequence(Tensor[] output_seq, | |||
| List<TapeTensor> output_info) | |||
| { | |||
| for (var i = 0; i < output_seq.Length; ++i) | |||
| { | |||
| output_info.Add(TapeTensorFromTensor(output_seq[i])); | |||
| } | |||
| return true; | |||
| } | |||
| } | |||
| } | |||
| @@ -2,24 +2,15 @@ | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow.Gradients; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public class EagerRunner : IEagerRunner | |||
| /// <summary> | |||
| /// Eager mode runner | |||
| /// </summary> | |||
| public partial class EagerRunner : IEagerRunner | |||
| { | |||
| public Tensor[] TFE_Execute(Context ctx, string device_name, string op_name, Tensor[] inputs, object[] attrs, int num_outputs) | |||
| { | |||
| throw new NotImplementedException(); | |||
| } | |||
| public Tensor[] TFE_FastPathExecute(Context ctx, string device_name, string opName, string name, Action callbacks, params object[] args) | |||
| { | |||
| throw new NotImplementedException(); | |||
| } | |||
| public Tensor[] TFE_TapeGradient(ITape tape, Tensor[] target, Tensor[] sources, Tensor[] output_gradients) | |||
| { | |||
| throw new NotImplementedException(); | |||
| } | |||
| } | |||
| } | |||