| @@ -26,11 +26,11 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr | |||||
| ### How to use | ### How to use | ||||
| | TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.2 | | |||||
| | ----------- | ------- | ------- | ------- | ------ | | |||||
| | tf.net 0.20 | | | x | x | | |||||
| | tf.net 0.15 | | x | x | | | |||||
| | tf.net 0.14 | x | x | | | | |||||
| | TensorFlow | tf native1.14 | tf native 1.15 | tf native 2.3 | | |||||
| | ----------- | ------------- | -------------- | ------------- | | |||||
| | tf.net 0.20 | | x | x | | |||||
| | tf.net 0.15 | x | x | | | |||||
| | tf.net 0.14 | x | | | | |||||
| Install TF.NET and TensorFlow binary through NuGet. | Install TF.NET and TensorFlow binary through NuGet. | ||||
| ```sh | ```sh | ||||
| @@ -138,6 +138,10 @@ Scan QR code to join Tencent TIM group: | |||||
|  |  | ||||
| WeChat Sponsor 微信打赏: | |||||
|  | |||||
| TensorFlow.NET is a part of [SciSharp STACK](https://scisharp.github.io/SciSharp/) | TensorFlow.NET is a part of [SciSharp STACK](https://scisharp.github.io/SciSharp/) | ||||
| <br> | <br> | ||||
| <a href="http://scisharpstack.org"><img src="https://github.com/SciSharp/SciSharp/blob/master/art/scisharp-stack.png" width="391" height="100" /></a> | |||||
| <a href="http://scisharpstack.org"><img src="https://github.com/SciSharp/SciSharp/blob/master/art/scisharp-stack.png" width="391" height="100" /></a> | |||||
| @@ -10,7 +10,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| public void WarmUp() | public void WarmUp() | ||||
| { | { | ||||
| print(tf.VERSION); | |||||
| print($"tensorflow native version: v{tf.VERSION}"); | |||||
| } | } | ||||
| public void Execute(int epoch, int iterate, Action<int> process) | public void Execute(int epoch, int iterate, Action<int> process) | ||||
| @@ -8,7 +8,7 @@ | |||||
| </PropertyGroup> | </PropertyGroup> | ||||
| <ItemGroup> | <ItemGroup> | ||||
| <PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.2.0.2" /> | |||||
| <PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.3.0" /> | |||||
| </ItemGroup> | </ItemGroup> | ||||
| <ItemGroup> | <ItemGroup> | ||||
| @@ -1,64 +0,0 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System.Linq; | |||||
| using Tensorflow.Keras.Layers; | |||||
| namespace Tensorflow | |||||
| { | |||||
| public static partial class keras | |||||
| { | |||||
| public static class layers | |||||
| { | |||||
| public static Embedding Embedding(int input_dim, int output_dim, | |||||
| IInitializer embeddings_initializer = null, | |||||
| bool mask_zero = false) => new Embedding(input_dim, output_dim, | |||||
| embeddings_initializer, | |||||
| mask_zero); | |||||
| public static Tensor[] Input(int[] batch_shape = null, | |||||
| TF_DataType dtype = TF_DataType.DtInvalid, | |||||
| string name = null, | |||||
| bool sparse = false, | |||||
| Tensor tensor = null) | |||||
| { | |||||
| var batch_size = batch_shape[0]; | |||||
| var shape = batch_shape.Skip(1).ToArray(); | |||||
| InputLayer input_layer = null; | |||||
| if (batch_shape != null) | |||||
| input_layer = new InputLayer( | |||||
| batch_input_shape: batch_shape, | |||||
| name: name, | |||||
| dtype: dtype, | |||||
| sparse: sparse, | |||||
| input_tensor: tensor); | |||||
| else | |||||
| input_layer = new InputLayer( | |||||
| input_shape: shape, | |||||
| batch_size: batch_size, | |||||
| name: name, | |||||
| dtype: dtype, | |||||
| sparse: sparse, | |||||
| input_tensor: tensor); | |||||
| var outputs = input_layer.inbound_nodes[0].output_tensors; | |||||
| return outputs; | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -26,5 +26,8 @@ namespace Tensorflow | |||||
| { | { | ||||
| public CompatV1Api v1 { get; } = new CompatV1Api(); | public CompatV1Api v1 { get; } = new CompatV1Api(); | ||||
| } | } | ||||
| public bool executing_eagerly() | |||||
| => Context.executing_eagerly(); | |||||
| } | } | ||||
| } | } | ||||
| @@ -16,7 +16,7 @@ | |||||
| using System; | using System; | ||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using Tensorflow.Eager; | |||||
| using Tensorflow.Contexts; | |||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| namespace Tensorflow | namespace Tensorflow | ||||
| @@ -24,9 +24,7 @@ namespace Tensorflow | |||||
| public class CompatV1Api | public class CompatV1Api | ||||
| { | { | ||||
| public void disable_eager_execution() | public void disable_eager_execution() | ||||
| { | |||||
| tf.context.default_execution_mode = Context.GRAPH_MODE; | |||||
| } | |||||
| => tf.Context.graph_mode(); | |||||
| public IVariableV1 get_variable(string name, | public IVariableV1 get_variable(string name, | ||||
| TensorShape shape = null, | TensorShape shape = null, | ||||
| @@ -18,6 +18,7 @@ using System; | |||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Linq; | using System.Linq; | ||||
| using NumSharp; | using NumSharp; | ||||
| using Tensorflow.Keras; | |||||
| using Tensorflow.Keras.ArgsDefinition; | using Tensorflow.Keras.ArgsDefinition; | ||||
| using Tensorflow.Keras.Layers; | using Tensorflow.Keras.Layers; | ||||
| using Tensorflow.Operations.Activation; | using Tensorflow.Operations.Activation; | ||||
| @@ -164,7 +165,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public Tensor dense(Tensor inputs, | public Tensor dense(Tensor inputs, | ||||
| int units, | int units, | ||||
| IActivation activation = null, | |||||
| Activation activation = null, | |||||
| bool use_bias = true, | bool use_bias = true, | ||||
| IInitializer kernel_initializer = null, | IInitializer kernel_initializer = null, | ||||
| IInitializer bias_initializer = null, | IInitializer bias_initializer = null, | ||||
| @@ -0,0 +1,90 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System; | |||||
| using Tensorflow.Eager; | |||||
| namespace Tensorflow.Contexts | |||||
| { | |||||
| /// <summary> | |||||
| /// Environment in which eager operations execute. | |||||
| /// </summary> | |||||
| public sealed class Context : IDisposable | |||||
| { | |||||
| public const int GRAPH_MODE = 0; | |||||
| public const int EAGER_MODE = 1; | |||||
| int defaultExecutionMode = EAGER_MODE; | |||||
| public string DeviceName { get; set; } = ""; | |||||
| public string ScopeName { get; set; } = ""; | |||||
| bool initialized = false; | |||||
| bool isEager; | |||||
| ContextSwitchStack contextSwitches; | |||||
| public SafeContextHandle Handle { get; } | |||||
| public Context(ContextOptions opts, Status status) | |||||
| { | |||||
| Handle = c_api.TFE_NewContext(opts.Handle, status.Handle); | |||||
| status.Check(true); | |||||
| isEager = defaultExecutionMode == EAGER_MODE; | |||||
| contextSwitches = new ContextSwitchStack(isEager); | |||||
| initialized = true; | |||||
| } | |||||
| /// <summary> | |||||
| /// Initialize handle and devices if not already done so. | |||||
| /// </summary> | |||||
| public void ensure_initialized() | |||||
| { | |||||
| if (initialized) | |||||
| return; | |||||
| initialized = true; | |||||
| } | |||||
| public void start_step() | |||||
| => c_api.TFE_ContextStartStep(Handle); | |||||
| public void end_step() | |||||
| => c_api.TFE_ContextEndStep(Handle); | |||||
| /// <summary> | |||||
| /// Checks whether the current thread has eager execution enabled. | |||||
| /// </summary> | |||||
| /// <returns></returns> | |||||
| public bool executing_eagerly() | |||||
| => isEager; | |||||
| public string shared_name(string name = null) | |||||
| => !string.IsNullOrEmpty(name) || !executing_eagerly() ? | |||||
| name : | |||||
| "cd2c89b7-88b7-44c8-ad83-06c2a9158347"; | |||||
| public void graph_mode() | |||||
| => mode(false); | |||||
| public void eager_mode() | |||||
| => mode(true); | |||||
| void mode(bool mode) | |||||
| { | |||||
| isEager = mode; | |||||
| } | |||||
| public void Dispose() | |||||
| => Handle.Dispose(); | |||||
| } | |||||
| } | |||||
| @@ -15,8 +15,9 @@ | |||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using System; | using System; | ||||
| using Tensorflow.Eager; | |||||
| namespace Tensorflow.Eager | |||||
| namespace Tensorflow.Contexts | |||||
| { | { | ||||
| public sealed class ContextOptions : IDisposable | public sealed class ContextOptions : IDisposable | ||||
| { | { | ||||
| @@ -14,15 +14,24 @@ | |||||
| limitations under the License. | limitations under the License. | ||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using Tensorflow.Keras; | |||||
| using Tensorflow.Keras.Engine; | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace Tensorflow | |||||
| namespace Tensorflow.Contexts | |||||
| { | { | ||||
| public static partial class keras | |||||
| public class ContextSwitch | |||||
| { | { | ||||
| public static Preprocessing preprocessing => new Preprocessing(); | |||||
| public static Sequence sequence = new Sequence(); | |||||
| public static Sequential Sequential() => new Sequential(); | |||||
| /// <summary> | |||||
| /// Whether the context is building a function. | |||||
| /// </summary> | |||||
| public bool IsBuildingFunction { get; set; } | |||||
| /// <summary> | |||||
| /// A callable that executes the context switch. | |||||
| /// </summary> | |||||
| public Action EnterContextFn { get; set; } | |||||
| public string DeviceStack { get; set; } | |||||
| } | } | ||||
| } | } | ||||
| @@ -0,0 +1,40 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace Tensorflow.Contexts | |||||
| { | |||||
| /// <summary> | |||||
| /// Match the semantics of DefaultGraphStack | |||||
| /// </summary> | |||||
| public class ContextSwitchStack | |||||
| { | |||||
| Stack<ContextSwitch> stack; | |||||
| public ContextSwitchStack(bool isEager) | |||||
| { | |||||
| stack = new Stack<ContextSwitch>(); | |||||
| if (isEager) | |||||
| stack.Push(new ContextSwitch | |||||
| { | |||||
| IsBuildingFunction = false | |||||
| }); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -25,7 +25,7 @@ namespace Tensorflow | |||||
| (_seed, _seed2) = random_seed.get_seed_tensor(seed); | (_seed, _seed2) = random_seed.get_seed_tensor(seed); | ||||
| _reshuffle_each_iteration = reshuffle_each_iteration; | _reshuffle_each_iteration = reshuffle_each_iteration; | ||||
| var seed_generator = ops.dummy_seed_generator(); | var seed_generator = ops.dummy_seed_generator(); | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| variant_tensor = ops.shuffle_dataset_v3(input_dataset.variant_tensor, _buffer_size, | variant_tensor = ops.shuffle_dataset_v3(input_dataset.variant_tensor, _buffer_size, | ||||
| _seed, _seed2, seed_generator, | _seed, _seed2, seed_generator, | ||||
| output_types, output_shapes, | output_types, output_shapes, | ||||
| @@ -1,50 +0,0 @@ | |||||
| using System; | |||||
| namespace Tensorflow.Eager | |||||
| { | |||||
| public sealed class Context : IDisposable | |||||
| { | |||||
| public const int GRAPH_MODE = 0; | |||||
| public const int EAGER_MODE = 1; | |||||
| public int default_execution_mode; | |||||
| public string device_name = ""; | |||||
| public string scope_name = ""; | |||||
| bool _initialized = false; | |||||
| public SafeContextHandle Handle { get; } | |||||
| public Context(ContextOptions opts, Status status) | |||||
| { | |||||
| Handle = c_api.TFE_NewContext(opts.Handle, status.Handle); | |||||
| status.Check(true); | |||||
| } | |||||
| /// <summary> | |||||
| /// Initialize handle and devices if not already done so. | |||||
| /// </summary> | |||||
| public void ensure_initialized() | |||||
| { | |||||
| if (_initialized) | |||||
| return; | |||||
| _initialized = true; | |||||
| } | |||||
| public void start_step() | |||||
| => c_api.TFE_ContextStartStep(Handle); | |||||
| public void end_step() | |||||
| => c_api.TFE_ContextEndStep(Handle); | |||||
| public bool executing_eagerly() | |||||
| => default_execution_mode == EAGER_MODE; | |||||
| public string shared_name(string name = null) | |||||
| => !string.IsNullOrEmpty(name) || !executing_eagerly() ? | |||||
| name : | |||||
| "cd2c89b7-88b7-44c8-ad83-06c2a9158347"; | |||||
| public void Dispose() | |||||
| => Handle.Dispose(); | |||||
| } | |||||
| } | |||||
| @@ -53,7 +53,7 @@ namespace Tensorflow.Eager | |||||
| { | { | ||||
| object value = null; | object value = null; | ||||
| byte isList = 0; | byte isList = 0; | ||||
| var attrType = c_api.TFE_OpNameGetAttrType(tf.context.Handle, Name, attr_name, ref isList, tf.status.Handle); | |||||
| var attrType = c_api.TFE_OpNameGetAttrType(tf.Context.Handle, Name, attr_name, ref isList, tf.Status.Handle); | |||||
| switch (attrType) | switch (attrType) | ||||
| { | { | ||||
| case TF_AttrType.TF_ATTR_BOOL: | case TF_AttrType.TF_ATTR_BOOL: | ||||
| @@ -0,0 +1,58 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System.Collections.Generic; | |||||
| using System; | |||||
| using System.Linq; | |||||
| using static Tensorflow.Binding; | |||||
| using Tensorflow.Contexts; | |||||
| namespace Tensorflow.Eager | |||||
| { | |||||
| public partial class EagerRunner | |||||
| { | |||||
| public (TF_DataType, Tensor[]) ArgsToMatchingEager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null) | |||||
| { | |||||
| if (args.Length == 0 && default_dtype != TF_DataType.DtInvalid) | |||||
| return (default_dtype, null); | |||||
| if (args.Count(x => x is Tensor) == args.Length) | |||||
| return ((args[0] as Tensor).dtype, args.Select(x => x as Tensor).ToArray()); | |||||
| var dtype = TF_DataType.DtInvalid; | |||||
| foreach (var x in args) | |||||
| { | |||||
| if (x is Tensor et) | |||||
| dtype = et.dtype; | |||||
| } | |||||
| if (dtype == TF_DataType.DtInvalid) | |||||
| { | |||||
| var ret = new List<Tensor>(); | |||||
| foreach (var t in args) | |||||
| { | |||||
| ret.Add(ops.convert_to_tensor(t, dtype, preferred_dtype: default_dtype, ctx: ctx) as Tensor); | |||||
| if (dtype == TF_DataType.DtInvalid) | |||||
| dtype = ret.Last().dtype; | |||||
| } | |||||
| return (dtype, ret.ToArray()); | |||||
| } | |||||
| else | |||||
| throw new NotImplementedException(""); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -1,11 +1,30 @@ | |||||
| using System.Collections.Generic; | |||||
| using System; | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System.Linq; | using System.Linq; | ||||
| using System; | |||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| using Tensorflow.Contexts; | |||||
| namespace Tensorflow.Eager | namespace Tensorflow.Eager | ||||
| { | { | ||||
| public class Execute | |||||
| /// <summary> | |||||
| /// python\eager\pywrap_tfe_src.cc | |||||
| /// </summary> | |||||
| public partial class EagerRunner | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// Execute a TensorFlow operation. | /// Execute a TensorFlow operation. | ||||
| @@ -28,14 +47,14 @@ namespace Tensorflow.Eager | |||||
| /// <param name="ctx">The value of context.context().</param> | /// <param name="ctx">The value of context.context().</param> | ||||
| /// <param name="name">Customized name for the operation.</param> | /// <param name="name">Customized name for the operation.</param> | ||||
| /// <returns>List of output Tensor objects. The list is empty if there are no outputs</returns> | /// <returns>List of output Tensor objects. The list is empty if there are no outputs</returns> | ||||
| public Tensor[] execute(Context ctx, string op_name, int num_outputs, | |||||
| Tensor[] inputs, object[] attrs, | |||||
| public Tensor[] Execute(Context ctx, string op_name, int num_outputs, | |||||
| Tensor[] inputs, object[] attrs, | |||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| ctx.ensure_initialized(); | ctx.ensure_initialized(); | ||||
| var results = tf.Runner.TFE_Execute(ctx, | var results = tf.Runner.TFE_Execute(ctx, | ||||
| ctx.device_name, | |||||
| ctx.DeviceName, | |||||
| op_name, | op_name, | ||||
| inputs, | inputs, | ||||
| attrs, | attrs, | ||||
| @@ -43,36 +62,5 @@ namespace Tensorflow.Eager | |||||
| return results; | return results; | ||||
| } | } | ||||
| public (TF_DataType, Tensor[]) args_to_matching_eager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null) | |||||
| { | |||||
| if (args.Length == 0 && default_dtype != TF_DataType.DtInvalid) | |||||
| return (default_dtype, null); | |||||
| if (args.Count(x => x is Tensor) == args.Length) | |||||
| return ((args[0] as Tensor).dtype, args.Select(x => x as Tensor).ToArray()); | |||||
| var dtype = TF_DataType.DtInvalid; | |||||
| foreach (var x in args) | |||||
| { | |||||
| if (x is Tensor et) | |||||
| dtype = et.dtype; | |||||
| } | |||||
| if (dtype == TF_DataType.DtInvalid) | |||||
| { | |||||
| var ret = new List<Tensor>(); | |||||
| foreach (var t in args) | |||||
| { | |||||
| ret.Add(ops.convert_to_tensor(t, dtype, preferred_dtype: default_dtype, ctx: ctx) as Tensor); | |||||
| if (dtype == TF_DataType.DtInvalid) | |||||
| dtype = ret.Last().dtype; | |||||
| } | |||||
| return (dtype, ret.ToArray()); | |||||
| } | |||||
| else | |||||
| throw new NotImplementedException(""); | |||||
| } | |||||
| } | } | ||||
| } | |||||
| } | |||||
| @@ -1,7 +1,23 @@ | |||||
| using System.Collections.Generic; | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System.Linq; | using System.Linq; | ||||
| using System; | using System; | ||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| using Tensorflow.Contexts; | |||||
| namespace Tensorflow.Eager | namespace Tensorflow.Eager | ||||
| { | { | ||||
| @@ -25,7 +41,7 @@ namespace Tensorflow.Eager | |||||
| object[] attrs, | object[] attrs, | ||||
| int num_outputs) | int num_outputs) | ||||
| { | { | ||||
| var status = tf.status; | |||||
| var status = tf.Status; | |||||
| var op = GetOp(ctx, op_name, status); | var op = GetOp(ctx, op_name, status); | ||||
| status.Check(true); | status.Check(true); | ||||
| c_api.TFE_OpSetDevice(op, device_name, status.Handle); | c_api.TFE_OpSetDevice(op, device_name, status.Handle); | ||||
| @@ -3,11 +3,9 @@ using System.Linq; | |||||
| using System; | using System; | ||||
| using static Tensorflow.OpDef.Types; | using static Tensorflow.OpDef.Types; | ||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| using Google.Protobuf.WellKnownTypes; | |||||
| using System.Threading; | |||||
| using Tensorflow.Util; | using Tensorflow.Util; | ||||
| using System.Runtime.InteropServices.ComTypes; | |||||
| using System.Runtime.InteropServices; | using System.Runtime.InteropServices; | ||||
| using Tensorflow.Contexts; | |||||
| namespace Tensorflow.Eager | namespace Tensorflow.Eager | ||||
| { | { | ||||
| @@ -17,6 +15,7 @@ namespace Tensorflow.Eager | |||||
| public partial class EagerRunner | public partial class EagerRunner | ||||
| { | { | ||||
| int kFastPathExecuteInputStartIndex = 0; | int kFastPathExecuteInputStartIndex = 0; | ||||
| UnorderedMap<Context, SafeOpHandle> thread_local_eager_operation_map = new UnorderedMap<Context, SafeOpHandle>(); | |||||
| public Tensor[] TFE_FastPathExecute(Context ctx, | public Tensor[] TFE_FastPathExecute(Context ctx, | ||||
| string device_name, | string device_name, | ||||
| @@ -45,7 +44,7 @@ namespace Tensorflow.Eager | |||||
| op_exec_info.run_post_exec_callbacks = callbacks != null; | op_exec_info.run_post_exec_callbacks = callbacks != null; | ||||
| op_exec_info.run_callbacks = op_exec_info.run_gradient_callback || op_exec_info.run_post_exec_callbacks; | op_exec_info.run_callbacks = op_exec_info.run_gradient_callback || op_exec_info.run_post_exec_callbacks; | ||||
| var status = tf.status; | |||||
| var status = tf.Status; | |||||
| var op = GetOp(ctx, opName, status); | var op = GetOp(ctx, opName, status); | ||||
| var op_def = tf.get_default_graph().GetOpDef(opName); | var op_def = tf.get_default_graph().GetOpDef(opName); | ||||
| @@ -173,7 +172,7 @@ namespace Tensorflow.Eager | |||||
| SafeOpHandle GetOp(Context ctx, string op_or_function_name, Status status) | SafeOpHandle GetOp(Context ctx, string op_or_function_name, Status status) | ||||
| { | { | ||||
| if (thread_local_eager_operation_map.find(ctx, out var op)) | if (thread_local_eager_operation_map.find(ctx, out var op)) | ||||
| c_api.TFE_OpReset(op, op_or_function_name, ctx.device_name, status.Handle); | |||||
| c_api.TFE_OpReset(op, op_or_function_name, ctx.DeviceName, status.Handle); | |||||
| else | else | ||||
| { | { | ||||
| op = c_api.TFE_NewOp(ctx.Handle, op_or_function_name, status.Handle); | op = c_api.TFE_NewOp(ctx.Handle, op_or_function_name, status.Handle); | ||||
| @@ -184,8 +183,6 @@ namespace Tensorflow.Eager | |||||
| return op; | return op; | ||||
| } | } | ||||
| static UnorderedMap<Context, SafeOpHandle> thread_local_eager_operation_map = new UnorderedMap<Context, SafeOpHandle>(); | |||||
| bool HasAccumulator() | bool HasAccumulator() | ||||
| { | { | ||||
| //return !GetAccumulatorSet()->empty(); | //return !GetAccumulatorSet()->empty(); | ||||
| @@ -252,7 +249,7 @@ namespace Tensorflow.Eager | |||||
| public void SetOpAttrs(SafeOpHandle op, params object[] attrs) | public void SetOpAttrs(SafeOpHandle op, params object[] attrs) | ||||
| { | { | ||||
| var status = tf.status; | |||||
| var status = tf.Status; | |||||
| var len = attrs.Length; | var len = attrs.Length; | ||||
| for (int i = 0; i < len; i += 2) | for (int i = 0; i < len; i += 2) | ||||
| { | { | ||||
| @@ -263,9 +260,9 @@ namespace Tensorflow.Eager | |||||
| var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, status.Handle); | var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, status.Handle); | ||||
| if (!status.ok()) return; | if (!status.ok()) return; | ||||
| if (is_list != 0) | if (is_list != 0) | ||||
| SetOpAttrList(tf.context, op, key, value as object[], type, null, status); | |||||
| SetOpAttrList(tf.Context, op, key, value as object[], type, null, status); | |||||
| else | else | ||||
| SetOpAttrScalar(tf.context, op, key, value, type, null, status); | |||||
| SetOpAttrScalar(tf.Context, op, key, value, type, null, status); | |||||
| status.Check(true); | status.Check(true); | ||||
| } | } | ||||
| } | } | ||||
| @@ -11,6 +11,6 @@ namespace Tensorflow.Eager | |||||
| /// </summary> | /// </summary> | ||||
| public partial class EagerRunner : IEagerRunner | public partial class EagerRunner : IEagerRunner | ||||
| { | { | ||||
| } | } | ||||
| } | } | ||||
| @@ -22,25 +22,25 @@ namespace Tensorflow.Eager | |||||
| public EagerTensor(string value, string device_name) : base(value) | public EagerTensor(string value, string device_name) : base(value) | ||||
| { | { | ||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle); | |||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle); | |||||
| Resolve(); | Resolve(); | ||||
| } | } | ||||
| public EagerTensor(byte[] value, string device_name, TF_DataType dtype) : base(value, dType: dtype) | public EagerTensor(byte[] value, string device_name, TF_DataType dtype) : base(value, dType: dtype) | ||||
| { | { | ||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle); | |||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle); | |||||
| Resolve(); | Resolve(); | ||||
| } | } | ||||
| public EagerTensor(string[] value, string device_name) : base(value) | public EagerTensor(string[] value, string device_name) : base(value) | ||||
| { | { | ||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle); | |||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle); | |||||
| Resolve(); | Resolve(); | ||||
| } | } | ||||
| public EagerTensor(NDArray value, string device_name) : base(value) | public EagerTensor(NDArray value, string device_name) : base(value) | ||||
| { | { | ||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle); | |||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle); | |||||
| Resolve(); | Resolve(); | ||||
| } | } | ||||
| @@ -49,7 +49,7 @@ namespace Tensorflow.Eager | |||||
| _id = ops.uid(); | _id = ops.uid(); | ||||
| if (_handle == IntPtr.Zero) | if (_handle == IntPtr.Zero) | ||||
| _handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.status.Handle); | |||||
| _handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.Status.Handle); | |||||
| //print($"new Tensor {Id} {_handle.ToString("x16")}"); | //print($"new Tensor {Id} {_handle.ToString("x16")}"); | ||||
| //print($"new TensorHandle {Id} {EagerTensorHandle.ToString("x16")}"); | //print($"new TensorHandle {Id} {EagerTensorHandle.ToString("x16")}"); | ||||
| @@ -13,24 +13,24 @@ namespace Tensorflow.Eager | |||||
| get | get | ||||
| { | { | ||||
| using var _ = EagerTensorHandle.Lease(); | using var _ = EagerTensorHandle.Lease(); | ||||
| return c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(EagerTensorHandle, tf.status.Handle)); | |||||
| return c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(EagerTensorHandle, tf.Status.Handle)); | |||||
| } | } | ||||
| } | } | ||||
| public override int rank => c_api.TFE_TensorHandleNumDims(EagerTensorHandle, tf.status.Handle); | |||||
| public override int rank => c_api.TFE_TensorHandleNumDims(EagerTensorHandle, tf.Status.Handle); | |||||
| public static int GetRank(IntPtr handle) | public static int GetRank(IntPtr handle) | ||||
| { | { | ||||
| var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); | var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); | ||||
| return c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, tf.status.Handle); | |||||
| return c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, tf.Status.Handle); | |||||
| } | } | ||||
| public static int[] GetDims(IntPtr handle) | public static int[] GetDims(IntPtr handle) | ||||
| { | { | ||||
| var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); | var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); | ||||
| var dims = new int[c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, tf.status.Handle)]; | |||||
| var dims = new int[c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, tf.Status.Handle)]; | |||||
| for (int i = 0; i < dims.Length; i++) | for (int i = 0; i < dims.Length; i++) | ||||
| dims[i] = c_api.TFE_TensorHandleDim(tfe_tensor_handle, i, tf.status.Handle); | |||||
| dims[i] = c_api.TFE_TensorHandleDim(tfe_tensor_handle, i, tf.Status.Handle); | |||||
| return dims; | return dims; | ||||
| } | } | ||||
| } | } | ||||
| @@ -1,6 +1,7 @@ | |||||
| using System; | using System; | ||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Text; | using System.Text; | ||||
| using Tensorflow.Contexts; | |||||
| namespace Tensorflow.Eager | namespace Tensorflow.Eager | ||||
| { | { | ||||
| @@ -1,27 +1,37 @@ | |||||
| using System; | using System; | ||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Text; | using System.Text; | ||||
| using Tensorflow.Contexts; | |||||
| using Tensorflow.Gradients; | using Tensorflow.Gradients; | ||||
| namespace Tensorflow.Eager | namespace Tensorflow.Eager | ||||
| { | { | ||||
| public interface IEagerRunner | public interface IEagerRunner | ||||
| { | { | ||||
| public Tensor[] TFE_FastPathExecute(Context ctx, | |||||
| Tensor[] Execute(Context ctx, string op_name, | |||||
| int num_outputs, | |||||
| Tensor[] inputs, object[] attrs, | |||||
| string name = null); | |||||
| (TF_DataType, Tensor[]) ArgsToMatchingEager(Context ctx, | |||||
| TF_DataType default_dtype = TF_DataType.DtInvalid, | |||||
| object[] args = null); | |||||
| Tensor[] TFE_FastPathExecute(Context ctx, | |||||
| string device_name, | string device_name, | ||||
| string opName, | string opName, | ||||
| string name, | string name, | ||||
| Action callbacks, | Action callbacks, | ||||
| params object[] args); | params object[] args); | ||||
| public Tensor[] TFE_Execute(Context ctx, | |||||
| Tensor[] TFE_Execute(Context ctx, | |||||
| string device_name, | string device_name, | ||||
| string op_name, | string op_name, | ||||
| Tensor[] inputs, | Tensor[] inputs, | ||||
| object[] attrs, | object[] attrs, | ||||
| int num_outputs); | int num_outputs); | ||||
| public Tensor[] TFE_TapeGradient(ITape tape, | |||||
| Tensor[] TFE_TapeGradient(ITape tape, | |||||
| Tensor[] target, | Tensor[] target, | ||||
| Tensor[] sources, | Tensor[] sources, | ||||
| Tensor[] output_gradients); | Tensor[] output_gradients); | ||||
| @@ -18,9 +18,9 @@ namespace Tensorflow.Framework.Models | |||||
| protected string _name; | protected string _name; | ||||
| public string name => _name; | public string name => _name; | ||||
| public DenseSpec(int[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||||
| public DenseSpec(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||||
| { | { | ||||
| _shape = new TensorShape(shape); | |||||
| _shape = shape; | |||||
| _dtype = dtype; | _dtype = dtype; | ||||
| _name = name; | _name = name; | ||||
| } | } | ||||
| @@ -7,7 +7,7 @@ namespace Tensorflow.Framework.Models | |||||
| { | { | ||||
| public class TensorSpec : DenseSpec | public class TensorSpec : DenseSpec | ||||
| { | { | ||||
| public TensorSpec(int[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) : | |||||
| public TensorSpec(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) : | |||||
| base(shape, dtype, name) | base(shape, dtype, name) | ||||
| { | { | ||||
| @@ -37,14 +37,14 @@ namespace Tensorflow.Gradients | |||||
| { | { | ||||
| _persistent = persistent; | _persistent = persistent; | ||||
| _watch_accessed_variables = watch_accessed_variables; | _watch_accessed_variables = watch_accessed_variables; | ||||
| _created_eagerly = tf.context.executing_eagerly(); | |||||
| _created_eagerly = tf.Context.executing_eagerly(); | |||||
| _recording = false; | _recording = false; | ||||
| _created_eagerly = tf.context.executing_eagerly(); | |||||
| _created_eagerly = tf.Context.executing_eagerly(); | |||||
| // Enters a context inside which operations are recorded on this tape. | // Enters a context inside which operations are recorded on this tape. | ||||
| if (_created_eagerly) | if (_created_eagerly) | ||||
| { | { | ||||
| tf.context.ensure_initialized(); | |||||
| tf.context.start_step(); | |||||
| tf.Context.ensure_initialized(); | |||||
| tf.Context.start_step(); | |||||
| } | } | ||||
| _push_tape(); | _push_tape(); | ||||
| } | } | ||||
| @@ -156,7 +156,7 @@ namespace Tensorflow.Gradients | |||||
| _pop_tape(); | _pop_tape(); | ||||
| if (_created_eagerly) | if (_created_eagerly) | ||||
| tf.context.end_step(); | |||||
| tf.Context.end_step(); | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -515,7 +515,7 @@ namespace Tensorflow.Gradients | |||||
| var rank = input_0_shape.Length; | var rank = input_0_shape.Length; | ||||
| if (Enumerable.SequenceEqual(Enumerable.Range(0, rank), axes.Data<int>())) | if (Enumerable.SequenceEqual(Enumerable.Range(0, rank), axes.Data<int>())) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| // should add ones_rank_cache | // should add ones_rank_cache | ||||
| var new_shape = constant_op.constant(range(0, rank).Select(x => 1).ToArray(), dtype: TF_DataType.TF_INT32); | var new_shape = constant_op.constant(range(0, rank).Select(x => 1).ToArray(), dtype: TF_DataType.TF_INT32); | ||||
| @@ -534,7 +534,7 @@ namespace Tensorflow.Gradients | |||||
| input_shape = array_ops.shape(op.inputs[0]); | input_shape = array_ops.shape(op.inputs[0]); | ||||
| return new Tensor[] { gen_array_ops.tile(grad, input_shape), null }; | return new Tensor[] { gen_array_ops.tile(grad, input_shape), null }; | ||||
| } | } | ||||
| else if (!input_0_shape.Contains(-1) && !tf.context.executing_eagerly()) | |||||
| else if (!input_0_shape.Contains(-1) && !tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| throw new NotImplementedException(""); | throw new NotImplementedException(""); | ||||
| } | } | ||||
| @@ -82,7 +82,7 @@ namespace Tensorflow | |||||
| /// </summary> | /// </summary> | ||||
| public _ControlDependenciesController control_dependencies(object[] control_inputs) | public _ControlDependenciesController control_dependencies(object[] control_inputs) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| return new _ControlDependenciesController(this, null); | return new _ControlDependenciesController(this, null); | ||||
| if (control_inputs == null) | if (control_inputs == null) | ||||
| @@ -518,7 +518,7 @@ namespace Tensorflow | |||||
| public TensorShape GetTensorShape(TF_Output output) | public TensorShape GetTensorShape(TF_Output output) | ||||
| { | { | ||||
| var status = tf.status; | |||||
| var status = tf.Status; | |||||
| var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status.Handle); | var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status.Handle); | ||||
| status.Check(); | status.Check(); | ||||
| @@ -0,0 +1,17 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| using static Tensorflow.Binding; | |||||
| namespace Tensorflow.Keras | |||||
| { | |||||
| public delegate Tensor Activation(Tensor x); | |||||
| public class Activations | |||||
| { | |||||
| /// <summary> | |||||
| /// Linear activation function (pass-through). | |||||
| /// </summary> | |||||
| public Activation Linear = x => x; | |||||
| } | |||||
| } | |||||
| @@ -16,7 +16,7 @@ namespace Tensorflow.Keras.ArgsDefinition | |||||
| /// <summary> | /// <summary> | ||||
| /// Activation function to use. | /// Activation function to use. | ||||
| /// </summary> | /// </summary> | ||||
| public IActivation Activation { get; set; } | |||||
| public Activation Activation { get; set; } | |||||
| /// <summary> | /// <summary> | ||||
| /// Whether the layer uses a bias vector. | /// Whether the layer uses a bias vector. | ||||
| @@ -0,0 +1,13 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace Tensorflow.Keras.ArgsDefinition | |||||
| { | |||||
| public class InputLayerArgs : LayerArgs | |||||
| { | |||||
| public Tensor InputTensor { get; set; } | |||||
| public bool Sparse { get; set; } | |||||
| public bool Ragged { get; set; } | |||||
| } | |||||
| } | |||||
| @@ -17,7 +17,7 @@ namespace Tensorflow.Keras.ArgsDefinition | |||||
| /// <summary> | /// <summary> | ||||
| /// Only applicable to input layers. | /// Only applicable to input layers. | ||||
| /// </summary> | /// </summary> | ||||
| public TF_DataType DType { get; set; } | |||||
| public TF_DataType DType { get; set; } = TF_DataType.TF_FLOAT; | |||||
| /// <summary> | /// <summary> | ||||
| /// Whether the `call` method can be used to build a TF graph without issues. | /// Whether the `call` method can be used to build a TF graph without issues. | ||||
| @@ -36,6 +36,8 @@ namespace Tensorflow.Keras.ArgsDefinition | |||||
| /// </summary> | /// </summary> | ||||
| public TensorShape BatchInputShape { get; set; } | public TensorShape BatchInputShape { get; set; } | ||||
| public int BatchSize { get; set; } = -1; | |||||
| /// <summary> | /// <summary> | ||||
| /// Initial weight values. | /// Initial weight values. | ||||
| /// </summary> | /// </summary> | ||||
| @@ -0,0 +1,17 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| using Tensorflow.Keras.Engine; | |||||
| using Tensorflow.Keras.Layers; | |||||
| namespace Tensorflow.Keras.ArgsDefinition | |||||
| { | |||||
| public class NodeArgs | |||||
| { | |||||
| public Layer[] InboundLayers { get; set; } | |||||
| public int[] NodeIndices { get; set; } | |||||
| public int[] TensorIndices { get; set; } | |||||
| public Tensor[] InputTensors { get; set; } | |||||
| public Tensor[] Outputs { get; set; } | |||||
| } | |||||
| } | |||||
| @@ -20,20 +20,21 @@ using static Tensorflow.Binding; | |||||
| namespace Tensorflow.Keras | namespace Tensorflow.Keras | ||||
| { | { | ||||
| public class backend : BackendBase | |||||
| public class BackendImpl : BackendBase | |||||
| { | { | ||||
| /* ---------------------------------------- KERAS BACKEND NATIVE OBJECTS ---------------------------------------- */ | /* ---------------------------------------- KERAS BACKEND NATIVE OBJECTS ---------------------------------------- */ | ||||
| public static Func<Array, double> py_sum = sum; | |||||
| public static Func<Array, bool> py_all = all; | |||||
| public Func<Array, double> py_sum = sum; | |||||
| public Func<Array, bool> py_all = all; | |||||
| //Func<Array, bool> py_any = any; | //Func<Array, bool> py_any = any; | ||||
| //Func<double, double, double, IEnumerable<double>> py_slice = slice; | //Func<double, double, double, IEnumerable<double>> py_slice = slice; | ||||
| public static Session _SESSION = ops.get_default_session(); | |||||
| public static Graph _GRAPH = null; | |||||
| public static Dictionary<Graph, GraphLearningPhase> _GRAPH_LEARNING_PHASES; | |||||
| public Session _SESSION => ops.get_default_session(); | |||||
| public Graph _GRAPH; | |||||
| public Dictionary<Graph, GraphLearningPhase> _GRAPH_LEARNING_PHASES; | |||||
| //Dictionary<Graph, Dictionary<string, int>> PER_GRAPH_LAYER_NAME_UIDS; | //Dictionary<Graph, Dictionary<string, int>> PER_GRAPH_LAYER_NAME_UIDS; | ||||
| public static bool _MANUAL_VAR_INIT = false; | |||||
| public static List<string> _LOCAL_DEVICES = null; | |||||
| public bool _MANUAL_VAR_INIT = false; | |||||
| public List<string> _LOCAL_DEVICES = null; | |||||
| /* -------------------------------------- KERAS BACKEND NATIVE OBJECTS END -------------------------------------- */ | /* -------------------------------------- KERAS BACKEND NATIVE OBJECTS END -------------------------------------- */ | ||||
| /// <summary> | /// <summary> | ||||
| @@ -41,23 +42,28 @@ namespace Tensorflow.Keras | |||||
| /// for various layer names in each graph. | /// for various layer names in each graph. | ||||
| /// Allows to give unique autogenerated names to layers, in a graph-specific way. | /// Allows to give unique autogenerated names to layers, in a graph-specific way. | ||||
| /// </summary> | /// </summary> | ||||
| public static Dictionary<Graph, Dictionary<(string, string), int>> PER_GRAPH_LAYER_NAME_UIDS = new Dictionary<Graph, Dictionary<(string, string), int>>(); | |||||
| public static Dictionary<string, IVariableV1> _GRAPH_VARIABLES = new Dictionary<string, IVariableV1>(); | |||||
| public static Dictionary<string, Optimizer> _GRAPH_TF_OPTIMIZERS = new Dictionary<string, Optimizer>(); | |||||
| public Dictionary<Graph, Dictionary<(string, string), int>> PER_GRAPH_LAYER_NAME_UIDS = new Dictionary<Graph, Dictionary<(string, string), int>>(); | |||||
| public Dictionary<string, IVariableV1> _GRAPH_VARIABLES = new Dictionary<string, IVariableV1>(); | |||||
| public Dictionary<string, Optimizer> _GRAPH_TF_OPTIMIZERS = new Dictionary<string, Optimizer>(); | |||||
| public _DummyEagerGraph _DUMMY_EAGER_GRAPH = new _DummyEagerGraph(); | |||||
| public static _DummyEagerGraph _DUMMY_EAGER_GRAPH = new _DummyEagerGraph(); | |||||
| public BackendImpl() | |||||
| { | |||||
| } | |||||
| public static void track_variable(IVariableV1 v) | |||||
| public void track_variable(IVariableV1 v) | |||||
| { | { | ||||
| var graph = v.Graph; | var graph = v.Graph; | ||||
| _GRAPH_VARIABLES[graph.graph_key] = v; | _GRAPH_VARIABLES[graph.graph_key] = v; | ||||
| } | } | ||||
| public static Tensor placeholder(int[] shape = null, | |||||
| public Tensor placeholder(TensorShape shape = null, | |||||
| int ndim = -1, | int ndim = -1, | ||||
| TF_DataType dtype = TF_DataType.DtInvalid, | TF_DataType dtype = TF_DataType.DtInvalid, | ||||
| bool sparse = false, | bool sparse = false, | ||||
| string name = null) | |||||
| string name = null, | |||||
| bool ragged = false) | |||||
| { | { | ||||
| if (sparse) | if (sparse) | ||||
| { | { | ||||
| @@ -65,16 +71,16 @@ namespace Tensorflow.Keras | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| return gen_array_ops.placeholder(dtype: dtype, shape: new TensorShape(shape), name: name); | |||||
| return array_ops.placeholder(dtype: dtype, shape: shape, name: name); | |||||
| } | } | ||||
| } | } | ||||
| public static Graph get_graph() | |||||
| public Graph get_graph() | |||||
| { | { | ||||
| return ops.get_default_graph(); | return ops.get_default_graph(); | ||||
| } | } | ||||
| public static int get_uid(string prefix, string @namespace = "") | |||||
| public int get_uid(string prefix, string @namespace = "") | |||||
| { | { | ||||
| var graph = tf.get_default_graph(); | var graph = tf.get_default_graph(); | ||||
| if (!PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph)) | if (!PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph)) | ||||
| @@ -83,7 +89,7 @@ namespace Tensorflow.Keras | |||||
| return PER_GRAPH_LAYER_NAME_UIDS[graph][(@namespace, prefix)]; | return PER_GRAPH_LAYER_NAME_UIDS[graph][(@namespace, prefix)]; | ||||
| } | } | ||||
| public static int get_uid((string, string) name) | |||||
| public int get_uid((string, string) name) | |||||
| { | { | ||||
| var graph = tf.get_default_graph(); | var graph = tf.get_default_graph(); | ||||
| if (!PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph)) | if (!PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph)) | ||||
| @@ -92,21 +98,21 @@ namespace Tensorflow.Keras | |||||
| return PER_GRAPH_LAYER_NAME_UIDS[graph][name]; | return PER_GRAPH_LAYER_NAME_UIDS[graph][name]; | ||||
| } | } | ||||
| public static void reset_uids() => PER_GRAPH_LAYER_NAME_UIDS = new Dictionary<Graph, Dictionary<(string, string), int>>(); | |||||
| public static void clear_session() | |||||
| public void reset_uids() => PER_GRAPH_LAYER_NAME_UIDS = new Dictionary<Graph, Dictionary<(string, string), int>>(); | |||||
| public void clear_session() | |||||
| { | { | ||||
| ops.reset_default_graph(); | ops.reset_default_graph(); | ||||
| reset_uids(); | reset_uids(); | ||||
| _SESSION = null; | |||||
| ops.set_default_session(tf.Session(ops.get_default_graph())); | |||||
| var phase = tf.placeholder_with_default(false, new int[] { }, name: "keras_learning_phase"); | var phase = tf.placeholder_with_default(false, new int[] { }, name: "keras_learning_phase"); | ||||
| _GRAPH_LEARNING_PHASES = new Dictionary<Graph, GraphLearningPhase>(); | _GRAPH_LEARNING_PHASES = new Dictionary<Graph, GraphLearningPhase>(); | ||||
| _GRAPH_LEARNING_PHASES[tf.get_default_graph()] = 0; | _GRAPH_LEARNING_PHASES[tf.get_default_graph()] = 0; | ||||
| } | } | ||||
| public static void manual_variable_initialization(bool value) | |||||
| public void manual_variable_initialization(bool value) | |||||
| { | { | ||||
| _MANUAL_VAR_INIT = value; | _MANUAL_VAR_INIT = value; | ||||
| } | } | ||||
| public static GraphLearningPhase learning_phase() | |||||
| public GraphLearningPhase learning_phase() | |||||
| { | { | ||||
| var graph = tf.get_default_graph(); | var graph = tf.get_default_graph(); | ||||
| if (_GRAPH_LEARNING_PHASES.ContainsKey(graph)) | if (_GRAPH_LEARNING_PHASES.ContainsKey(graph)) | ||||
| @@ -116,7 +122,7 @@ namespace Tensorflow.Keras | |||||
| } | } | ||||
| return _GRAPH_LEARNING_PHASES[graph]; | return _GRAPH_LEARNING_PHASES[graph]; | ||||
| } | } | ||||
| public static void set_learning_phase(bool value) | |||||
| public void set_learning_phase(bool value) | |||||
| { | { | ||||
| _GRAPH_LEARNING_PHASES[tf.get_default_graph()] = (GraphLearningPhase)((value) ? 1 : 0); | _GRAPH_LEARNING_PHASES[tf.get_default_graph()] = (GraphLearningPhase)((value) ? 1 : 0); | ||||
| } | } | ||||
| @@ -1,15 +0,0 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace Tensorflow.Keras.Engine | |||||
| { | |||||
| /// <summary> | |||||
| /// A layer is a callable object that takes as input one or more tensors and | |||||
| /// that outputs one or more tensors. | |||||
| /// </summary> | |||||
| public interface ILayer | |||||
| { | |||||
| Tensor Apply(Tensor inputs, bool is_training = false); | |||||
| } | |||||
| } | |||||
| @@ -31,20 +31,22 @@ namespace Tensorflow.Keras.Engine | |||||
| /// A layer is a class implementing common neural networks operations, such | /// A layer is a class implementing common neural networks operations, such | ||||
| /// as convolution, batch norm, etc. These operations require managing weights, | /// as convolution, batch norm, etc. These operations require managing weights, | ||||
| /// losses, updates, and inter-layer connectivity. | /// losses, updates, and inter-layer connectivity. | ||||
| /// | |||||
| /// tensorflow\python\keras\engine\base_layer.py | |||||
| /// </summary> | /// </summary> | ||||
| public class Layer : AutoTrackable, ILayer | |||||
| public abstract class Layer : AutoTrackable | |||||
| { | { | ||||
| protected LayerArgs _args; | |||||
| /// <summary> | |||||
| /// Arguments initialize layer. | |||||
| /// </summary> | |||||
| LayerArgs args; | |||||
| /// <summary> | /// <summary> | ||||
| /// Indicates whether `build` needs to be called upon layer call, to create | /// Indicates whether `build` needs to be called upon layer call, to create | ||||
| /// the layer's weights. | /// the layer's weights. | ||||
| /// </summary> | /// </summary> | ||||
| protected bool built; | protected bool built; | ||||
| protected bool trainable; | |||||
| public TF_DataType _dtype; | |||||
| public bool Trainable => args.Trainable; | |||||
| public TF_DataType DType => args.DType; | |||||
| /// <summary> | /// <summary> | ||||
| /// A stateful layer is a layer whose updates are run during inference too, | /// A stateful layer is a layer whose updates are run during inference too, | ||||
| /// for instance stateful RNNs. | /// for instance stateful RNNs. | ||||
| @@ -53,53 +55,53 @@ namespace Tensorflow.Keras.Engine | |||||
| /// <summary> | /// <summary> | ||||
| /// Provides information about which inputs are compatible with the layer. | /// Provides information about which inputs are compatible with the layer. | ||||
| /// </summary> | /// </summary> | ||||
| protected InputSpec input_spec; | |||||
| protected bool supports_masking; | |||||
| protected List<IVariableV1> _trainable_weights; | |||||
| public List<IVariableV1> trainable_variables => _trainable_weights; | |||||
| protected List<IVariableV1> _non_trainable_weights; | |||||
| private string _name; | |||||
| public string name => _name; | |||||
| protected string _base_name; | |||||
| protected bool _compute_previous_mask; | |||||
| protected List<Operation> _updates; | |||||
| public int[] _batch_input_shape; | |||||
| private List<Node> _inbound_nodes; | |||||
| public List<Node> inbound_nodes => _inbound_nodes; | |||||
| #pragma warning disable CS0649 // Field 'Layer._outbound_nodes' is never assigned to, and will always have its default value null | |||||
| private List<Node> _outbound_nodes; | |||||
| #pragma warning restore CS0649 // Field 'Layer._outbound_nodes' is never assigned to, and will always have its default value null | |||||
| public List<Node> outbound_nodes => _outbound_nodes; | |||||
| #pragma warning disable CS0169 // The field 'Layer._initial_weights' is never used | |||||
| float _initial_weights; | |||||
| #pragma warning restore CS0169 // The field 'Layer._initial_weights' is never used | |||||
| ThreadLocal<CallContext> _call_context; | |||||
| public CallContext CallContext => _call_context.Value; | |||||
| protected InputSpec inputSpec; | |||||
| public bool SupportsMasking { get; set; } | |||||
| protected List<IVariableV1> trainableWeights; | |||||
| public List<IVariableV1> TrainableVariables => trainableWeights; | |||||
| protected List<IVariableV1> nonTrainableWeights; | |||||
| string name; | |||||
| public string Name => name; | |||||
| protected string baseName; | |||||
| protected bool computePreviousMask; | |||||
| protected List<Operation> updates; | |||||
| public TensorShape BatchInputShape => args.BatchInputShape; | |||||
| List<Node> inboundNodes; | |||||
| public List<Node> InboundNodes => inboundNodes; | |||||
| List<Node> outboundNodes; | |||||
| public List<Node> OutboundNodes => outboundNodes; | |||||
| ThreadLocal<CallContext> callContext; | |||||
| public CallContext CallContext => callContext.Value; | |||||
| public Layer(LayerArgs args) | public Layer(LayerArgs args) | ||||
| { | { | ||||
| _args = args; | |||||
| this.args = args; | |||||
| // A stateful layer is a layer whose updates are run during inference too, | // A stateful layer is a layer whose updates are run during inference too, | ||||
| // for instance stateful RNNs. | // for instance stateful RNNs. | ||||
| stateful = false; | stateful = false; | ||||
| // Indicates whether `build` needs to be called upon layer call, to create | // Indicates whether `build` needs to be called upon layer call, to create | ||||
| // the layer's weights. | // the layer's weights. | ||||
| built = false; | built = false; | ||||
| this.supports_masking = false; | |||||
| this.SupportsMasking = false; | |||||
| _init_set_name(name); | _init_set_name(name); | ||||
| _trainable_weights = new List<IVariableV1>(); | |||||
| _non_trainable_weights = new List<IVariableV1>(); | |||||
| _compute_previous_mask = false; | |||||
| _updates = new List<Operation>(); | |||||
| trainableWeights = new List<IVariableV1>(); | |||||
| nonTrainableWeights = new List<IVariableV1>(); | |||||
| computePreviousMask = false; | |||||
| updates = new List<Operation>(); | |||||
| inboundNodes = new List<Node>(); | |||||
| // Manage input shape information if passed. | // Manage input shape information if passed. | ||||
| _inbound_nodes = new List<Node>(); | |||||
| if(args.BatchInputShape == null && args.InputShape != null) | |||||
| { | |||||
| args.BatchInputShape = new int[] { args.BatchSize }.Concat(args.InputShape.dims).ToArray(); | |||||
| } | |||||
| } | } | ||||
| /// <summary> | /// <summary> | ||||
| @@ -108,39 +110,37 @@ namespace Tensorflow.Keras.Engine | |||||
| /// <param name="input"></param> | /// <param name="input"></param> | ||||
| /// <param name="is_training"></param> | /// <param name="is_training"></param> | ||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public Tensor Apply(Tensor input, bool is_training = false) | |||||
| public Tensor Apply(Tensor[] inputs, bool is_training = false) | |||||
| { | { | ||||
| var input_list = new Tensor[] { input }; | |||||
| if (_call_context == null) | |||||
| _call_context = new ThreadLocal<CallContext>() | |||||
| { | |||||
| Value = new CallContext() | |||||
| }; | |||||
| callContext = callContext ?? new ThreadLocal<CallContext>() | |||||
| { | |||||
| Value = new CallContext() | |||||
| }; | |||||
| using var ctxManager = CallContext.enter(); | using var ctxManager = CallContext.enter(); | ||||
| string name_scope = ""; | |||||
| if (tf.context.executing_eagerly()) | |||||
| string nameScope = ""; | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| name_scope = _name; | |||||
| nameScope = name; | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| throw new NotImplementedException(""); | throw new NotImplementedException(""); | ||||
| } | } | ||||
| tf_with(ops.name_scope(name_scope), scope => | |||||
| tf_with(ops.name_scope(nameScope), scope => | |||||
| { | { | ||||
| if (!built) | if (!built) | ||||
| _maybe_build(input); | |||||
| MaybeBuild(inputs); | |||||
| call(input, is_training: is_training); | |||||
| call(inputs, is_training: is_training); | |||||
| }); | }); | ||||
| throw new NotImplementedException(""); | throw new NotImplementedException(""); | ||||
| } | } | ||||
| [Obsolete("User Apply()")] | |||||
| public Tensor[] __call__(Tensor[] inputs, | public Tensor[] __call__(Tensor[] inputs, | ||||
| Tensor training = null, | Tensor training = null, | ||||
| Tensor state = null, | Tensor state = null, | ||||
| @@ -173,14 +173,14 @@ namespace Tensorflow.Keras.Engine | |||||
| { | { | ||||
| // Symbolic execution on symbolic tensors. We will attempt to build | // Symbolic execution on symbolic tensors. We will attempt to build | ||||
| // the corresponding TF subgraph inside `backend.get_graph()` | // the corresponding TF subgraph inside `backend.get_graph()` | ||||
| var graph = backend.get_graph().as_default(); | |||||
| var graph = tf.keras.backend.get_graph().as_default(); | |||||
| tf_with(ops.name_scope(_name_scope()), delegate | tf_with(ops.name_scope(_name_scope()), delegate | ||||
| { | { | ||||
| // Build layer if applicable (if the `build` method has been | // Build layer if applicable (if the `build` method has been | ||||
| // overridden). | // overridden). | ||||
| _maybe_build(inputs[0]); | |||||
| MaybeBuild(inputs); | |||||
| outputs = call(inputs[0], | |||||
| outputs = call(inputs, | |||||
| // training: training, | // training: training, | ||||
| state: state); | state: state); | ||||
| @@ -217,25 +217,25 @@ namespace Tensorflow.Keras.Engine | |||||
| return null; | return null; | ||||
| } | } | ||||
| protected virtual Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null) | |||||
| protected virtual Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null) | |||||
| { | { | ||||
| throw new NotImplementedException(""); | throw new NotImplementedException(""); | ||||
| } | } | ||||
| protected virtual string _name_scope() | protected virtual string _name_scope() | ||||
| { | { | ||||
| return name; | |||||
| return Name; | |||||
| } | } | ||||
| protected void _maybe_build(Tensor input) | |||||
| protected void MaybeBuild(Tensor[] inputs) | |||||
| { | { | ||||
| // Check input assumptions set before layer building, e.g. input rank. | // Check input assumptions set before layer building, e.g. input rank. | ||||
| if (built) | if (built) | ||||
| return; | return; | ||||
| if (_dtype == TF_DataType.DtInvalid) | |||||
| _dtype = input.dtype; | |||||
| if (DType == TF_DataType.DtInvalid) | |||||
| args.DType = inputs[0].dtype; | |||||
| var input_shapes = input.TensorShape; | |||||
| var input_shapes = inputs[0].TensorShape; | |||||
| build(input_shapes); | build(input_shapes); | ||||
| built = true; | built = true; | ||||
| } | } | ||||
| @@ -246,7 +246,7 @@ namespace Tensorflow.Keras.Engine | |||||
| } | } | ||||
| protected virtual IVariableV1 add_weight(string name, | protected virtual IVariableV1 add_weight(string name, | ||||
| int[] shape, | |||||
| TensorShape shape, | |||||
| TF_DataType dtype = TF_DataType.DtInvalid, | TF_DataType dtype = TF_DataType.DtInvalid, | ||||
| IInitializer initializer = null, | IInitializer initializer = null, | ||||
| bool? trainable = null, | bool? trainable = null, | ||||
| @@ -267,10 +267,10 @@ namespace Tensorflow.Keras.Engine | |||||
| else if (dtype.is_integer()) | else if (dtype.is_integer()) | ||||
| initializer = tf.zeros_initializer; | initializer = tf.zeros_initializer; | ||||
| else | else | ||||
| throw new ValueError($"An initializer for variable {name} of type {dtype.as_base_dtype()} is required for layer {this.name}"); | |||||
| throw new ValueError($"An initializer for variable {name} of type {dtype.as_base_dtype()} is required for layer {this.Name}"); | |||||
| } | } | ||||
| var variable = _add_variable_with_custom_getter(new VariableArgs | |||||
| var args = new VariableArgs | |||||
| { | { | ||||
| Name = name, | Name = name, | ||||
| Shape = shape, | Shape = shape, | ||||
| @@ -279,13 +279,14 @@ namespace Tensorflow.Keras.Engine | |||||
| Overwrite = true, | Overwrite = true, | ||||
| Initializer = initializer, | Initializer = initializer, | ||||
| Trainable = trainable.Value | Trainable = trainable.Value | ||||
| }); | |||||
| }; | |||||
| var variable = _add_variable_with_custom_getter(args); | |||||
| //backend.track_variable(variable); | //backend.track_variable(variable); | ||||
| if (trainable == true) | if (trainable == true) | ||||
| _trainable_weights.Add(variable); | |||||
| trainableWeights.Add(variable); | |||||
| else | else | ||||
| _non_trainable_weights.Add(variable); | |||||
| nonTrainableWeights.Add(variable); | |||||
| return variable; | return variable; | ||||
| } | } | ||||
| @@ -293,17 +294,16 @@ namespace Tensorflow.Keras.Engine | |||||
| protected virtual void add_update(Tensor[] updates, bool inputs = false) | protected virtual void add_update(Tensor[] updates, bool inputs = false) | ||||
| { | { | ||||
| var updates_op = updates.Select(x => x.op).ToArray(); | var updates_op = updates.Select(x => x.op).ToArray(); | ||||
| _updates.AddRange(updates_op); | |||||
| this.updates.AddRange(updates_op); | |||||
| } | } | ||||
| // Determine layer name (non-unique). | // Determine layer name (non-unique). | ||||
| protected virtual void _init_set_name(string name, bool zero_based = true) | protected virtual void _init_set_name(string name, bool zero_based = true) | ||||
| { | { | ||||
| var base_name = name; | var base_name = name; | ||||
| _name = name; | |||||
| this.name = name; | |||||
| if (name == null) | if (name == null) | ||||
| (_name, base_name) = _make_unique_name(); | |||||
| _base_name = base_name; | |||||
| (this.name, baseName) = _make_unique_name(); | |||||
| } | } | ||||
| protected virtual (string, string) _make_unique_name() | protected virtual (string, string) _make_unique_name() | ||||
| @@ -0,0 +1,61 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System.Collections.Generic; | |||||
| using System.Linq; | |||||
| using Tensorflow.Keras.ArgsDefinition; | |||||
| using Tensorflow.Keras.Layers; | |||||
| namespace Tensorflow.Keras.Engine | |||||
| { | |||||
| /// <summary> | |||||
| /// A `Node` describes the connectivity between two layers. | |||||
| /// | |||||
| /// Each time a layer is connected to some new input, | |||||
| /// a node is added to `layer._inbound_nodes`. | |||||
| /// Each time the output of a layer is used by another layer, | |||||
| /// a node is added to `layer._outbound_nodes`. | |||||
| /// </summary> | |||||
| public class Node | |||||
| { | |||||
| NodeArgs args; | |||||
| public int[] node_indices; | |||||
| public int[] tensor_indices; | |||||
| public Tensor[] input_tensors; | |||||
| public Tensor[] Outputs => args.Outputs; | |||||
| public TensorShape[] input_shapes; | |||||
| public TensorShape[] output_shapes; | |||||
| List<Layer> kerasInputs; | |||||
| public Node(InputLayer layer, NodeArgs args) | |||||
| { | |||||
| this.args = args; | |||||
| kerasInputs = new List<Layer>(); | |||||
| // Wire up Node to Layers. | |||||
| layer.InboundNodes.Add(this); | |||||
| foreach (var input in kerasInputs) | |||||
| { | |||||
| if (input != null) | |||||
| input.OutboundNodes.Add(this); | |||||
| } | |||||
| // Set metadata on outputs. | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -16,6 +16,7 @@ | |||||
| using Tensorflow.Keras.ArgsDefinition; | using Tensorflow.Keras.ArgsDefinition; | ||||
| using Tensorflow.Keras.Layers; | using Tensorflow.Keras.Layers; | ||||
| using static Tensorflow.Binding; | |||||
| namespace Tensorflow.Keras.Engine | namespace Tensorflow.Keras.Engine | ||||
| { | { | ||||
| @@ -28,11 +29,21 @@ namespace Tensorflow.Keras.Engine | |||||
| Tensor[] outputs; | Tensor[] outputs; | ||||
| #pragma warning restore CS0169 // The field 'Sequential.outputs' is never used | #pragma warning restore CS0169 // The field 'Sequential.outputs' is never used | ||||
| public Sequential(string name = null) | |||||
| bool computeOutputAndMaskJointly; | |||||
| bool autoTrackSubLayers; | |||||
| TensorShape inferredInputShape; | |||||
| bool hasExplicitInputShape; | |||||
| TF_DataType inputDType; | |||||
| Layer[] layers; | |||||
| public Sequential(Layer[] layers = null, string name = null) | |||||
| : base(new ModelArgs { Name = name}) | : base(new ModelArgs { Name = name}) | ||||
| { | { | ||||
| supports_masking = true; | |||||
| // _compute_output_and_mask_jointly = true; | |||||
| this.layers = layers ?? new Layer[0]; | |||||
| SupportsMasking = true; | |||||
| computeOutputAndMaskJointly = true; | |||||
| autoTrackSubLayers = false; | |||||
| hasExplicitInputShape = false; | |||||
| } | } | ||||
| public void __enter__() | public void __enter__() | ||||
| @@ -48,27 +59,26 @@ namespace Tensorflow.Keras.Engine | |||||
| { | { | ||||
| built = false; | built = false; | ||||
| var set_inputs = false; | var set_inputs = false; | ||||
| //if(_layers.Count == 0) | |||||
| if(layers.Length == 0) | |||||
| { | { | ||||
| if(layer is InputLayer) | if(layer is InputLayer) | ||||
| { | { | ||||
| set_inputs = true; | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| var (batch_shape, dtype) = (layer._batch_input_shape, layer._dtype); | |||||
| if (batch_shape != null) | |||||
| if (layer.BatchInputShape != null) | |||||
| { | { | ||||
| // Instantiate an input layer. | // Instantiate an input layer. | ||||
| var x = keras.layers.Input( | |||||
| batch_shape: batch_shape, | |||||
| dtype: dtype, | |||||
| name: layer.name + "_input"); | |||||
| var x = tf.keras.Input( | |||||
| batch_shape: layer.BatchInputShape, | |||||
| dtype: layer.DType, | |||||
| name: layer.Name + "_input"); | |||||
| // This will build the current layer | // This will build the current layer | ||||
| // and create the node connecting the current layer | // and create the node connecting the current layer | ||||
| // to the input layer we just created. | // to the input layer we just created. | ||||
| layer.__call__(x); | |||||
| layer.Apply(x); | |||||
| set_inputs = true; | set_inputs = true; | ||||
| } | } | ||||
| } | } | ||||
| @@ -1,11 +1,12 @@ | |||||
| using System; | using System; | ||||
| using System.Data; | using System.Data; | ||||
| using System.Linq; | |||||
| using Tensorflow.Keras; | using Tensorflow.Keras; | ||||
| using Tensorflow.Keras.ArgsDefinition; | using Tensorflow.Keras.ArgsDefinition; | ||||
| using Tensorflow.Keras.Datasets; | using Tensorflow.Keras.Datasets; | ||||
| using Tensorflow.Keras.Engine; | using Tensorflow.Keras.Engine; | ||||
| using Tensorflow.Keras.Layers; | using Tensorflow.Keras.Layers; | ||||
| using Tensorflow.Operations.Activation; | |||||
| using static Tensorflow.Binding; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| @@ -14,15 +15,56 @@ namespace Tensorflow | |||||
| public KerasDataset datasets { get; } = new KerasDataset(); | public KerasDataset datasets { get; } = new KerasDataset(); | ||||
| public Initializers initializers { get; } = new Initializers(); | public Initializers initializers { get; } = new Initializers(); | ||||
| public Layers layers { get; } = new Layers(); | public Layers layers { get; } = new Layers(); | ||||
| public Activations activations { get; } = new Activations(); | |||||
| public BackendImpl backend { get; } = new BackendImpl(); | |||||
| public Sequential Sequential() | |||||
| => new Sequential(); | |||||
| public Tensor[] Input(int[] batch_shape = null, | |||||
| int batch_size = -1, | |||||
| TF_DataType dtype = TF_DataType.DtInvalid, | |||||
| string name = null, | |||||
| bool sparse = false, | |||||
| bool ragged = false, | |||||
| Tensor tensor = null) | |||||
| { | |||||
| var args = new InputLayerArgs | |||||
| { | |||||
| Name = name, | |||||
| BatchInputShape = batch_shape, | |||||
| BatchSize = batch_size, | |||||
| DType = dtype, | |||||
| Sparse = sparse, | |||||
| Ragged = ragged, | |||||
| InputTensor = tensor | |||||
| }; | |||||
| var layer = new InputLayer(args); | |||||
| return layer.InboundNodes[0].Outputs; | |||||
| } | |||||
| public static Embedding Embedding(int input_dim, | |||||
| int output_dim, | |||||
| IInitializer embeddings_initializer = null, | |||||
| bool mask_zero = false) | |||||
| => new Embedding(input_dim, | |||||
| output_dim, | |||||
| embeddings_initializer, | |||||
| mask_zero); | |||||
| public class Layers | public class Layers | ||||
| { | { | ||||
| public ILayer Dense(int units, | |||||
| IActivation activation = null) | |||||
| public Layer Dense(int units, | |||||
| Activation activation = null, | |||||
| TensorShape input_shape = null) | |||||
| => new Dense(new DenseArgs | => new Dense(new DenseArgs | ||||
| { | { | ||||
| Units = units, | Units = units, | ||||
| Activation = activation | |||||
| Activation = activation ?? tf.keras.activations.Linear, | |||||
| InputShape = input_shape | |||||
| }); | }); | ||||
| } | } | ||||
| } | } | ||||
| @@ -80,7 +80,7 @@ namespace Tensorflow.Keras.Layers | |||||
| this.moving_variance_initializer = moving_variance_initializer; | this.moving_variance_initializer = moving_variance_initializer; | ||||
| this.renorm = renorm; | this.renorm = renorm; | ||||
| this.fused = true; | this.fused = true; | ||||
| this.supports_masking = true; | |||||
| this.SupportsMasking = true; | |||||
| this._bessels_correction_test_only = true; | this._bessels_correction_test_only = true; | ||||
| } | } | ||||
| @@ -95,7 +95,7 @@ namespace Tensorflow.Keras.Layers | |||||
| if (Enumerable.SequenceEqual(axis, new int[] { 3 })) | if (Enumerable.SequenceEqual(axis, new int[] { 3 })) | ||||
| _data_format = "NHWC"; | _data_format = "NHWC"; | ||||
| var param_dtype = _dtype == TF_DataType.DtInvalid ? TF_DataType.TF_FLOAT : _dtype; | |||||
| var param_dtype = DType == TF_DataType.DtInvalid ? TF_DataType.TF_FLOAT : DType; | |||||
| var param_shape = new int[] { input_shape.dims[axis[0]] }; | var param_shape = new int[] { input_shape.dims[axis[0]] }; | ||||
| if (scale) | if (scale) | ||||
| @@ -143,14 +143,14 @@ namespace Tensorflow.Keras.Layers | |||||
| built = true; | built = true; | ||||
| } | } | ||||
| protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null) | |||||
| protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null) | |||||
| { | { | ||||
| Tensor outputs = null; | Tensor outputs = null; | ||||
| if (fused) | if (fused) | ||||
| { | { | ||||
| Tensor training = tf.convert_to_tensor(is_training); | Tensor training = tf.convert_to_tensor(is_training); | ||||
| outputs = _fused_batch_norm(inputs, training: training); | |||||
| outputs = _fused_batch_norm(inputs[0], training: training); | |||||
| return new[] { outputs, outputs }; | return new[] { outputs, outputs }; | ||||
| } | } | ||||
| @@ -65,7 +65,7 @@ namespace Tensorflow.Keras.Layers | |||||
| this.use_bias = use_bias; | this.use_bias = use_bias; | ||||
| this.kernel_initializer = kernel_initializer; | this.kernel_initializer = kernel_initializer; | ||||
| this.bias_initializer = bias_initializer; | this.bias_initializer = bias_initializer; | ||||
| input_spec = new InputSpec(ndim: rank + 2); | |||||
| inputSpec = new InputSpec(ndim: rank + 2); | |||||
| } | } | ||||
| protected override void build(TensorShape input_shape) | protected override void build(TensorShape input_shape) | ||||
| @@ -79,17 +79,17 @@ namespace Tensorflow.Keras.Layers | |||||
| shape: kernel_shape, | shape: kernel_shape, | ||||
| initializer: kernel_initializer, | initializer: kernel_initializer, | ||||
| trainable: true, | trainable: true, | ||||
| dtype: _dtype); | |||||
| dtype: DType); | |||||
| if (use_bias) | if (use_bias) | ||||
| bias = (RefVariable)add_weight(name: "bias", | bias = (RefVariable)add_weight(name: "bias", | ||||
| shape: new int[] { filters }, | shape: new int[] { filters }, | ||||
| initializer: bias_initializer, | initializer: bias_initializer, | ||||
| trainable: true, | trainable: true, | ||||
| dtype: _dtype); | |||||
| dtype: DType); | |||||
| var axes = new Dictionary<int, int>(); | var axes = new Dictionary<int, int>(); | ||||
| axes.Add(-1, input_dim); | axes.Add(-1, input_dim); | ||||
| input_spec = new InputSpec(ndim: rank + 2, axes: axes); | |||||
| inputSpec = new InputSpec(ndim: rank + 2, axes: axes); | |||||
| string op_padding; | string op_padding; | ||||
| if (padding == "causal") | if (padding == "causal") | ||||
| @@ -108,9 +108,9 @@ namespace Tensorflow.Keras.Layers | |||||
| built = true; | built = true; | ||||
| } | } | ||||
| protected override Tensor[] call(Tensor inputs, bool training = false, Tensor state = null) | |||||
| protected override Tensor[] call(Tensor[] inputs, bool training = false, Tensor state = null) | |||||
| { | { | ||||
| var outputs = _convolution_op.__call__(inputs, kernel); | |||||
| var outputs = _convolution_op.__call__(inputs[0], kernel); | |||||
| if (use_bias) | if (use_bias) | ||||
| { | { | ||||
| if (data_format == "channels_first") | if (data_format == "channels_first") | ||||
| @@ -29,19 +29,16 @@ namespace Tensorflow.Keras.Layers | |||||
| /// </summary> | /// </summary> | ||||
| public class Dense : Layer | public class Dense : Layer | ||||
| { | { | ||||
| protected int units; | |||||
| protected IActivation activation; | |||||
| protected bool use_bias; | |||||
| protected IInitializer kernel_initializer; | |||||
| protected IInitializer bias_initializer; | |||||
| DenseArgs args; | |||||
| protected IVariableV1 kernel; | protected IVariableV1 kernel; | ||||
| protected IVariableV1 bias; | protected IVariableV1 bias; | ||||
| public Dense(DenseArgs args) : | public Dense(DenseArgs args) : | ||||
| base(args) | base(args) | ||||
| { | { | ||||
| this.supports_masking = true; | |||||
| this.input_spec = new InputSpec(min_ndim: 2); | |||||
| this.args = args; | |||||
| this.SupportsMasking = true; | |||||
| this.inputSpec = new InputSpec(min_ndim: 2); | |||||
| } | } | ||||
| protected override void build(TensorShape input_shape) | protected override void build(TensorShape input_shape) | ||||
| @@ -49,41 +46,41 @@ namespace Tensorflow.Keras.Layers | |||||
| var last_dim = input_shape.dims.Last(); | var last_dim = input_shape.dims.Last(); | ||||
| var axes = new Dictionary<int, int>(); | var axes = new Dictionary<int, int>(); | ||||
| axes[-1] = last_dim; | axes[-1] = last_dim; | ||||
| input_spec = new InputSpec(min_ndim: 2, axes: axes); | |||||
| inputSpec = new InputSpec(min_ndim: 2, axes: axes); | |||||
| kernel = add_weight( | kernel = add_weight( | ||||
| "kernel", | "kernel", | ||||
| shape: new int[] { last_dim, units }, | |||||
| initializer: kernel_initializer, | |||||
| dtype: _dtype, | |||||
| shape: new TensorShape(last_dim, args.Units), | |||||
| initializer: args.KernelInitializer, | |||||
| dtype: DType, | |||||
| trainable: true); | trainable: true); | ||||
| if (use_bias) | |||||
| if (args.UseBias) | |||||
| bias = add_weight( | bias = add_weight( | ||||
| "bias", | "bias", | ||||
| shape: new int[] { units }, | |||||
| initializer: bias_initializer, | |||||
| dtype: _dtype, | |||||
| shape: new TensorShape(args.Units), | |||||
| initializer: args.BiasInitializer, | |||||
| dtype: DType, | |||||
| trainable: true); | trainable: true); | ||||
| built = true; | built = true; | ||||
| } | } | ||||
| protected override Tensor[] call(Tensor inputs, bool training = false, Tensor state = null) | |||||
| protected override Tensor[] call(Tensor[] inputs, bool training = false, Tensor state = null) | |||||
| { | { | ||||
| Tensor outputs = null; | Tensor outputs = null; | ||||
| var rank = inputs.rank; | |||||
| var rank = inputs[0].rank; | |||||
| if(rank > 2) | if(rank > 2) | ||||
| { | { | ||||
| throw new NotImplementedException("call rank > 2"); | throw new NotImplementedException("call rank > 2"); | ||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| outputs = gen_math_ops.mat_mul(inputs, kernel.Handle); | |||||
| outputs = gen_math_ops.mat_mul(inputs[0], kernel.Handle); | |||||
| } | } | ||||
| if (use_bias) | |||||
| if (args.UseBias) | |||||
| outputs = tf.nn.bias_add(outputs, bias); | outputs = tf.nn.bias_add(outputs, bias); | ||||
| if (activation != null) | |||||
| outputs = activation.Activate(outputs); | |||||
| //if (args.Activation != null) | |||||
| //outputs = args.Activation.Activate(outputs); | |||||
| return new[] { outputs, outputs }; | return new[] { outputs, outputs }; | ||||
| } | } | ||||
| @@ -45,7 +45,7 @@ namespace Tensorflow.Keras.Layers | |||||
| this.output_dim = output_dim; | this.output_dim = output_dim; | ||||
| this.embeddings_initializer = embeddings_initializer == null ? tf.uniform_initializer : embeddings_initializer; | this.embeddings_initializer = embeddings_initializer == null ? tf.uniform_initializer : embeddings_initializer; | ||||
| this.mask_zero = mask_zero; | this.mask_zero = mask_zero; | ||||
| supports_masking = mask_zero; | |||||
| SupportsMasking = mask_zero; | |||||
| this.input_length = input_length; | this.input_length = input_length; | ||||
| } | } | ||||
| @@ -57,13 +57,13 @@ namespace Tensorflow.Keras.Layers | |||||
| built = true; | built = true; | ||||
| } | } | ||||
| protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null) | |||||
| protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null) | |||||
| { | { | ||||
| var dtype = inputs.dtype; | |||||
| var dtype = inputs[0].dtype; | |||||
| if (dtype != tf.int32 && dtype != tf.int64) | if (dtype != tf.int32 && dtype != tf.int64) | ||||
| inputs = math_ops.cast(inputs, tf.int32); | |||||
| inputs[0] = math_ops.cast(inputs[0], tf.int32); | |||||
| var @out = embedding_ops.embedding_lookup(embeddings, inputs); | |||||
| var @out = embedding_ops.embedding_lookup(embeddings, inputs[0]); | |||||
| return new[] { @out, @out }; | return new[] { @out, @out }; | ||||
| } | } | ||||
| } | } | ||||
| @@ -17,8 +17,10 @@ | |||||
| using System; | using System; | ||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Linq; | using System.Linq; | ||||
| using Tensorflow.Framework.Models; | |||||
| using Tensorflow.Keras.ArgsDefinition; | using Tensorflow.Keras.ArgsDefinition; | ||||
| using Tensorflow.Keras.Engine; | using Tensorflow.Keras.Engine; | ||||
| using static Tensorflow.Binding; | |||||
| namespace Tensorflow.Keras.Layers | namespace Tensorflow.Keras.Layers | ||||
| { | { | ||||
| @@ -27,82 +29,68 @@ namespace Tensorflow.Keras.Layers | |||||
| /// </summary> | /// </summary> | ||||
| public class InputLayer : Layer | public class InputLayer : Layer | ||||
| { | { | ||||
| public bool sparse; | |||||
| public int? batch_size; | |||||
| public bool is_placeholder; | |||||
| InputLayerArgs args; | |||||
| bool isPlaceholder; | |||||
| TensorSpec typeSpec; | |||||
| public InputLayer(int[] input_shape = null, | |||||
| int[] batch_input_shape = null, | |||||
| int? batch_size = null, | |||||
| TF_DataType dtype = TF_DataType.DtInvalid, | |||||
| string name = null, | |||||
| bool sparse = false, | |||||
| Tensor input_tensor = null) : | |||||
| base(new LayerArgs | |||||
| { | |||||
| DType = dtype, Name = name | |||||
| }) | |||||
| public InputLayer(InputLayerArgs args) : | |||||
| base(args) | |||||
| { | { | ||||
| this.args = args; | |||||
| built = true; | built = true; | ||||
| this.sparse = sparse; | |||||
| this.batch_size = batch_size; | |||||
| this.supports_masking = true; | |||||
| this.SupportsMasking = true; | |||||
| if(batch_input_shape != null) | |||||
| if(BatchInputShape != null) | |||||
| { | { | ||||
| batch_size = batch_input_shape[0]; | |||||
| input_shape = batch_input_shape.Skip(1).ToArray(); | |||||
| args.BatchSize = BatchInputShape.dims[0]; | |||||
| args.InputShape = BatchInputShape.dims[1..]; | |||||
| } | } | ||||
| // moved to base class | // moved to base class | ||||
| if (string.IsNullOrEmpty(name)) | |||||
| if (string.IsNullOrEmpty(Name)) | |||||
| { | { | ||||
| var prefix = "input"; | var prefix = "input"; | ||||
| name = prefix + '_' + backend.get_uid(prefix); | |||||
| args.Name = prefix + '_' + tf.keras.backend.get_uid(prefix); | |||||
| } | } | ||||
| if (input_tensor == null) | |||||
| if (args.InputTensor == null) | |||||
| { | { | ||||
| if(input_shape != null) | |||||
| if(args.InputShape != null) | |||||
| { | { | ||||
| var dims = new List<int> { batch_size.HasValue ? batch_size.Value : -1 }; | |||||
| dims.AddRange(input_shape); | |||||
| batch_input_shape = dims.ToArray(); | |||||
| args.BatchInputShape = new int[] { args.BatchSize } | |||||
| .Concat(args.InputShape.dims) | |||||
| .ToArray(); | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| batch_input_shape = null; | |||||
| args.BatchInputShape = null; | |||||
| } | } | ||||
| var graph = backend.get_graph().as_default(); | |||||
| // In graph mode, create a graph placeholder to call the layer on. | // In graph mode, create a graph placeholder to call the layer on. | ||||
| if (sparse) | |||||
| { | |||||
| throw new NotImplementedException("InputLayer sparse is true"); | |||||
| } | |||||
| else | |||||
| { | |||||
| input_tensor = backend.placeholder( | |||||
| shape: batch_input_shape, | |||||
| dtype: dtype, | |||||
| name: name); | |||||
| } | |||||
| tf.Context.graph_mode(); | |||||
| args.InputTensor = tf.keras.backend.placeholder( | |||||
| shape: BatchInputShape, | |||||
| dtype: DType, | |||||
| name: Name, | |||||
| sparse: args.Sparse); | |||||
| tf.Context.eager_mode(); | |||||
| is_placeholder = true; | |||||
| _batch_input_shape = batch_input_shape; | |||||
| isPlaceholder = true; | |||||
| } | } | ||||
| // Create an input node to add to self.outbound_node | // Create an input node to add to self.outbound_node | ||||
| // and set output_tensors' _keras_history. | // and set output_tensors' _keras_history. | ||||
| // input_tensor._keras_history = base_layer.KerasHistory(self, 0, 0) | // input_tensor._keras_history = base_layer.KerasHistory(self, 0, 0) | ||||
| // input_tensor._keras_mask = None | // input_tensor._keras_mask = None | ||||
| new Node(this, | |||||
| inbound_layers: new Layer[0], | |||||
| node_indices: new int[0], | |||||
| tensor_indices: new int[0], | |||||
| input_tensors: new Tensor[] { input_tensor }, | |||||
| output_tensors: new Tensor[] { input_tensor }); | |||||
| new Node(this, new NodeArgs | |||||
| { | |||||
| InputTensors = new Tensor[] { args.InputTensor }, | |||||
| Outputs = new Tensor[] { args.InputTensor } | |||||
| }); | |||||
| typeSpec = new TensorSpec(args.InputTensor.TensorShape, | |||||
| dtype: args.InputTensor.dtype, | |||||
| name: Name); | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -1,85 +0,0 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System.Linq; | |||||
| using Tensorflow.Keras.Engine; | |||||
| namespace Tensorflow.Keras.Layers | |||||
| { | |||||
| /// <summary> | |||||
| /// A `Node` describes the connectivity between two layers. | |||||
| /// </summary> | |||||
| public class Node | |||||
| { | |||||
| public InputLayer outbound_layer; | |||||
| public Layer[] inbound_layers; | |||||
| public int[] node_indices; | |||||
| public int[] tensor_indices; | |||||
| public Tensor[] input_tensors; | |||||
| public Tensor[] output_tensors; | |||||
| public int[][] input_shapes; | |||||
| public int[][] output_shapes; | |||||
| /// <summary> | |||||
| /// | |||||
| /// </summary> | |||||
| /// <param name="outbound_layer"> | |||||
| /// the layer that takes | |||||
| /// `input_tensors` and turns them into `output_tensors` | |||||
| /// (the node gets created when the `call` | |||||
| /// method of the layer was called). | |||||
| /// </param> | |||||
| /// <param name="inbound_layers"> | |||||
| /// a list of layers, the same length as `input_tensors`, | |||||
| /// the layers from where `input_tensors` originate. | |||||
| /// </param> | |||||
| /// <param name="node_indices"> | |||||
| /// a list of integers, the same length as `inbound_layers`. | |||||
| /// `node_indices[i]` is the origin node of `input_tensors[i]` | |||||
| /// (necessary since each inbound layer might have several nodes, | |||||
| /// e.g. if the layer is being shared with a different data stream). | |||||
| /// </param> | |||||
| /// <param name="tensor_indices"></param> | |||||
| /// <param name="input_tensors">list of input tensors.</param> | |||||
| /// <param name="output_tensors">list of output tensors.</param> | |||||
| public Node(InputLayer outbound_layer, | |||||
| Layer[] inbound_layers, | |||||
| int[] node_indices, | |||||
| int[] tensor_indices, | |||||
| Tensor[] input_tensors, | |||||
| Tensor[] output_tensors) | |||||
| { | |||||
| this.outbound_layer = outbound_layer; | |||||
| this.inbound_layers = inbound_layers; | |||||
| this.node_indices = node_indices; | |||||
| this.tensor_indices = tensor_indices; | |||||
| this.input_tensors = input_tensors; | |||||
| this.output_tensors = output_tensors; | |||||
| input_shapes = input_tensors.Select(x => x._shape_tuple()).ToArray(); | |||||
| output_shapes = output_tensors.Select(x => x._shape_tuple()).ToArray(); | |||||
| // Add nodes to all layers involved. | |||||
| foreach (var layer in inbound_layers) | |||||
| { | |||||
| if (layer != null) | |||||
| layer.outbound_nodes.Add(this); | |||||
| } | |||||
| outbound_layer.inbound_nodes.Add(this); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -45,7 +45,7 @@ namespace Tensorflow.Keras.Layers | |||||
| this.input_spec = new InputSpec(ndim: 4); | this.input_spec = new InputSpec(ndim: 4); | ||||
| } | } | ||||
| protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null) | |||||
| protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null) | |||||
| { | { | ||||
| int[] pool_shape; | int[] pool_shape; | ||||
| if (data_format == "channels_last") | if (data_format == "channels_last") | ||||
| @@ -60,7 +60,7 @@ namespace Tensorflow.Keras.Layers | |||||
| } | } | ||||
| var outputs = pool_function.Apply( | var outputs = pool_function.Apply( | ||||
| inputs, | |||||
| inputs[0], | |||||
| ksize: pool_shape, | ksize: pool_shape, | ||||
| strides: strides, | strides: strides, | ||||
| padding: padding.ToUpper(), | padding: padding.ToUpper(), | ||||
| @@ -40,7 +40,7 @@ namespace Tensorflow.Keras.Utils | |||||
| var variable_dtype = args.DType.as_base_dtype(); | var variable_dtype = args.DType.as_base_dtype(); | ||||
| var v = tf.Variable(init_val, | var v = tf.Variable(init_val, | ||||
| dtype: args.DType, | |||||
| dtype: variable_dtype, | |||||
| shape: args.Shape, | shape: args.Shape, | ||||
| name: args.Name, | name: args.Name, | ||||
| trainable: args.Trainable, | trainable: args.Trainable, | ||||
| @@ -94,14 +94,14 @@ namespace Tensorflow.Keras.Utils | |||||
| { | { | ||||
| var graph = ops.get_default_graph(); | var graph = ops.get_default_graph(); | ||||
| Dictionary<(string, string), int> name_uid_map = null; | Dictionary<(string, string), int> name_uid_map = null; | ||||
| if (backend.PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph)) | |||||
| if (tf.keras.backend.PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph)) | |||||
| { | { | ||||
| name_uid_map = backend.PER_GRAPH_LAYER_NAME_UIDS[graph]; | |||||
| name_uid_map = tf.keras.backend.PER_GRAPH_LAYER_NAME_UIDS[graph]; | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| name_uid_map = new Dictionary<(string, string), int>(); | name_uid_map = new Dictionary<(string, string), int>(); | ||||
| backend.PER_GRAPH_LAYER_NAME_UIDS[graph] = name_uid_map; | |||||
| tf.keras.backend.PER_GRAPH_LAYER_NAME_UIDS[graph] = name_uid_map; | |||||
| } | } | ||||
| return name_uid_map; | return name_uid_map; | ||||
| @@ -49,8 +49,8 @@ namespace Tensorflow.Layers | |||||
| this._reuse = _reuse; | this._reuse = _reuse; | ||||
| // Avoid an incorrect lint error | // Avoid an incorrect lint error | ||||
| _trainable_weights = new List<IVariableV1>(); | |||||
| _non_trainable_weights = new List<IVariableV1>(); | |||||
| trainableWeights = new List<IVariableV1>(); | |||||
| nonTrainableWeights = new List<IVariableV1>(); | |||||
| this.built = false; | this.built = false; | ||||
| _keras_style = false; | _keras_style = false; | ||||
| } | } | ||||
| @@ -95,7 +95,7 @@ namespace Tensorflow.Layers | |||||
| // Update global default collections. | // Update global default collections. | ||||
| _add_elements_to_collection(_updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS }); | |||||
| _add_elements_to_collection(updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS }); | |||||
| return outputs; | return outputs; | ||||
| } | } | ||||
| @@ -202,7 +202,7 @@ namespace Tensorflow.Layers | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| tf_with(tf.variable_scope(scope, default_name: _base_name), captured_scope => | |||||
| tf_with(tf.variable_scope(scope, default_name: baseName), captured_scope => | |||||
| { | { | ||||
| // convert variable_scope to VariableScope | // convert variable_scope to VariableScope | ||||
| _scope = captured_scope; | _scope = captured_scope; | ||||
| @@ -40,7 +40,7 @@ namespace Tensorflow | |||||
| IActivation activation = null, bool? reuse = null, string name = null, | IActivation activation = null, bool? reuse = null, string name = null, | ||||
| TF_DataType dtype = TF_DataType.DtInvalid) : base(_reuse: reuse, name: name, dtype: dtype) | TF_DataType dtype = TF_DataType.DtInvalid) : base(_reuse: reuse, name: name, dtype: dtype) | ||||
| { | { | ||||
| input_spec = new InputSpec(ndim: 2); | |||||
| inputSpec = new InputSpec(ndim: 2); | |||||
| _num_units = num_units; | _num_units = num_units; | ||||
| _forget_bias = forget_bias; | _forget_bias = forget_bias; | ||||
| _state_is_tuple = state_is_tuple; | _state_is_tuple = state_is_tuple; | ||||
| @@ -74,7 +74,7 @@ namespace Tensorflow | |||||
| /// <param name="training"></param> | /// <param name="training"></param> | ||||
| /// <param name="state"></param> | /// <param name="state"></param> | ||||
| /// <returns></returns> | /// <returns></returns> | ||||
| protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null) | |||||
| protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null) | |||||
| { | { | ||||
| var one = constant_op.constant(1, dtype: dtypes.int32); | var one = constant_op.constant(1, dtype: dtypes.int32); | ||||
| // Parameters of gates are concatenated into one multiply for efficiency. | // Parameters of gates are concatenated into one multiply for efficiency. | ||||
| @@ -87,7 +87,7 @@ namespace Tensorflow | |||||
| // array_ops.split(value: state, num_or_size_splits: 2, axis: one); | // array_ops.split(value: state, num_or_size_splits: 2, axis: one); | ||||
| throw new NotImplementedException("BasicLstmCell call"); | throw new NotImplementedException("BasicLstmCell call"); | ||||
| } | } | ||||
| var gate_inputs = math_ops.matmul(array_ops.concat(new[] { inputs, h }, 1), _kernel as RefVariable); | |||||
| var gate_inputs = math_ops.matmul(array_ops.concat(new[] { inputs[0], h }, 1), _kernel as RefVariable); | |||||
| gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable); | gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable); | ||||
| // i = input_gate, j = new_input, f = forget_gate, o = output_gate | // i = input_gate, j = new_input, f = forget_gate, o = output_gate | ||||
| @@ -42,7 +42,7 @@ namespace Tensorflow | |||||
| dtype: dtype) | dtype: dtype) | ||||
| { | { | ||||
| // Inputs must be 2-dimensional. | // Inputs must be 2-dimensional. | ||||
| input_spec = new InputSpec(ndim: 2); | |||||
| inputSpec = new InputSpec(ndim: 2); | |||||
| _num_units = num_units; | _num_units = num_units; | ||||
| if (activation == null) | if (activation == null) | ||||
| @@ -67,10 +67,10 @@ namespace Tensorflow | |||||
| built = true; | built = true; | ||||
| } | } | ||||
| protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null) | |||||
| protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null) | |||||
| { | { | ||||
| // Most basic RNN: output = new_state = act(W * input + U * state + B). | // Most basic RNN: output = new_state = act(W * input + U * state + B). | ||||
| var concat = array_ops.concat(new[] { inputs, state }, 1); | |||||
| var concat = array_ops.concat(new[] { inputs[0], state }, 1); | |||||
| var gate_inputs = math_ops.matmul(concat, _kernel as RefVariable); | var gate_inputs = math_ops.matmul(concat, _kernel as RefVariable); | ||||
| gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable); | gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable); | ||||
| var output = _activation(gate_inputs, null); | var output = _activation(gate_inputs, null); | ||||
| @@ -42,7 +42,7 @@ namespace Tensorflow.Operations | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor conv2d(Conv2dParams parameters) | public static Tensor conv2d(Conv2dParams parameters) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Conv2D", name: parameters.Name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("Conv2D", name: parameters.Name, args: new | |||||
| { | { | ||||
| input = parameters.Input, | input = parameters.Input, | ||||
| filter = parameters.Filter, | filter = parameters.Filter, | ||||
| @@ -64,7 +64,7 @@ namespace Tensorflow.Operations | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor conv2d_backprop_filter(Conv2dParams parameters) | public static Tensor conv2d_backprop_filter(Conv2dParams parameters) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new | |||||
| { | { | ||||
| input = parameters.Input, | input = parameters.Input, | ||||
| filter_sizes = parameters.FilterSizes, | filter_sizes = parameters.FilterSizes, | ||||
| @@ -87,7 +87,7 @@ namespace Tensorflow.Operations | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor conv2d_backprop_input(Conv2dParams parameters) | public static Tensor conv2d_backprop_input(Conv2dParams parameters) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new | |||||
| { | { | ||||
| input_sizes = parameters.InputSizes, | input_sizes = parameters.InputSizes, | ||||
| filter = parameters.Filter, | filter = parameters.Filter, | ||||
| @@ -111,7 +111,7 @@ namespace Tensorflow.Operations | |||||
| if (data_format == null) | if (data_format == null) | ||||
| data_format = "NHWC"; | data_format = "NHWC"; | ||||
| var _op = tf._op_def_lib._apply_op_helper("BiasAdd", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("BiasAdd", name: name, args: new | |||||
| { | { | ||||
| value, | value, | ||||
| bias, | bias, | ||||
| @@ -128,7 +128,7 @@ namespace Tensorflow.Operations | |||||
| if (data_format == null) | if (data_format == null) | ||||
| data_format = "NHWC"; | data_format = "NHWC"; | ||||
| var _op = tf._op_def_lib._apply_op_helper("BiasAddGrad", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("BiasAddGrad", name: name, args: new | |||||
| { | { | ||||
| out_backprop, | out_backprop, | ||||
| data_format | data_format | ||||
| @@ -154,7 +154,7 @@ namespace Tensorflow.Operations | |||||
| /// </remarks> | /// </remarks> | ||||
| public static Tensor elu(Tensor features, string name = "Elu") | public static Tensor elu(Tensor features, string name = "Elu") | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("Elu", name: name, args: new { features }); | |||||
| var op = tf.OpDefLib._apply_op_helper("Elu", name: name, args: new { features }); | |||||
| return op.output; | return op.output; | ||||
| } | } | ||||
| @@ -165,7 +165,7 @@ namespace Tensorflow.Operations | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params) | public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params) | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new | |||||
| var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new | |||||
| { | { | ||||
| y_backprop = @params.YBackprop, | y_backprop = @params.YBackprop, | ||||
| x = @params.X, | x = @params.X, | ||||
| @@ -181,7 +181,7 @@ namespace Tensorflow.Operations | |||||
| public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params) | public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params) | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGradV3", name: @params.Name, args: new | |||||
| var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGradV3", name: @params.Name, args: new | |||||
| { | { | ||||
| y_backprop = @params.YBackprop, | y_backprop = @params.YBackprop, | ||||
| x = @params.X, | x = @params.X, | ||||
| @@ -206,7 +206,7 @@ namespace Tensorflow.Operations | |||||
| bool is_training = true, | bool is_training = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("FusedBatchNorm", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("FusedBatchNorm", name: name, args: new | |||||
| { | { | ||||
| x, | x, | ||||
| scale, | scale, | ||||
| @@ -231,7 +231,7 @@ namespace Tensorflow.Operations | |||||
| bool is_training = true, | bool is_training = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("FusedBatchNormV3", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormV3", name: name, args: new | |||||
| { | { | ||||
| x, | x, | ||||
| scale, | scale, | ||||
| @@ -259,7 +259,7 @@ namespace Tensorflow.Operations | |||||
| public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1, | public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1, | ||||
| int alpha = 1, float beta = 0.5f, string name = null) | int alpha = 1, float beta = 0.5f, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("LRN", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("LRN", name: name, args: new | |||||
| { | { | ||||
| input, | input, | ||||
| depth_radius, | depth_radius, | ||||
| @@ -273,7 +273,7 @@ namespace Tensorflow.Operations | |||||
| public static Tensor log_softmax(Tensor logits, string name = null) | public static Tensor log_softmax(Tensor logits, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("LogSoftmax", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("LogSoftmax", name: name, args: new | |||||
| { | { | ||||
| logits | logits | ||||
| }); | }); | ||||
| @@ -291,7 +291,7 @@ namespace Tensorflow.Operations | |||||
| /// <returns>A `Tensor` of type `bool`.</returns> | /// <returns>A `Tensor` of type `bool`.</returns> | ||||
| public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null) | public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("InTopKV2", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("InTopKV2", name: name, args: new | |||||
| { | { | ||||
| predictions, | predictions, | ||||
| targets, | targets, | ||||
| @@ -303,7 +303,7 @@ namespace Tensorflow.Operations | |||||
| public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) | public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("LeakyRelu", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("LeakyRelu", name: name, args: new | |||||
| { | { | ||||
| features, | features, | ||||
| alpha | alpha | ||||
| @@ -319,7 +319,7 @@ namespace Tensorflow.Operations | |||||
| string data_format = "NHWC", | string data_format = "NHWC", | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("MaxPool", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("MaxPool", name: name, args: new | |||||
| { | { | ||||
| input, | input, | ||||
| ksize, | ksize, | ||||
| @@ -334,7 +334,7 @@ namespace Tensorflow.Operations | |||||
| public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, | public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, | ||||
| string data_format= "NHWC", string name= null) | string data_format= "NHWC", string name= null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("MaxPoolGrad", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("MaxPoolGrad", name: name, args: new | |||||
| { | { | ||||
| orig_input, | orig_input, | ||||
| orig_output, | orig_output, | ||||
| @@ -350,7 +350,7 @@ namespace Tensorflow.Operations | |||||
| public static Tensor[] top_kv2(Tensor input, int k, bool sorted = true, string name = null) | public static Tensor[] top_kv2(Tensor input, int k, bool sorted = true, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("TopKV2", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("TopKV2", name: name, args: new | |||||
| { | { | ||||
| input, | input, | ||||
| k, | k, | ||||
| @@ -362,9 +362,9 @@ namespace Tensorflow.Operations | |||||
| public static Tensor relu_grad(Tensor gradients, Tensor features, string name = null) | public static Tensor relu_grad(Tensor gradients, Tensor features, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "ReluGrad", name, | "ReluGrad", name, | ||||
| null, | null, | ||||
| gradients, features); | gradients, features); | ||||
| @@ -372,7 +372,7 @@ namespace Tensorflow.Operations | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("ReluGrad", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("ReluGrad", name: name, args: new | |||||
| { | { | ||||
| gradients, | gradients, | ||||
| features | features | ||||
| @@ -383,7 +383,7 @@ namespace Tensorflow.Operations | |||||
| public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null) | public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("LeakyReluGrad", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("LeakyReluGrad", name: name, args: new | |||||
| { | { | ||||
| gradients, | gradients, | ||||
| features, | features, | ||||
| @@ -395,9 +395,9 @@ namespace Tensorflow.Operations | |||||
| public static Tensor softmax(Tensor logits, string name = null) | public static Tensor softmax(Tensor logits, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Softmax", name, | "Softmax", name, | ||||
| null, | null, | ||||
| logits); | logits); | ||||
| @@ -405,7 +405,7 @@ namespace Tensorflow.Operations | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Softmax", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("Softmax", name: name, args: new | |||||
| { | { | ||||
| logits | logits | ||||
| }); | }); | ||||
| @@ -422,7 +422,7 @@ namespace Tensorflow.Operations | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static (Tensor, Tensor) softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = null) | public static (Tensor, Tensor) softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name: name, args: new | |||||
| { | { | ||||
| features, | features, | ||||
| labels | labels | ||||
| @@ -460,7 +460,7 @@ namespace Tensorflow.Operations | |||||
| /// </remarks> | /// </remarks> | ||||
| public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") | public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels }); | |||||
| var op = tf.OpDefLib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels }); | |||||
| int _idx = 0; | int _idx = 0; | ||||
| var loss = op.outputs[_idx++]; | var loss = op.outputs[_idx++]; | ||||
| var backprop = op.outputs[_idx++]; | var backprop = op.outputs[_idx++]; | ||||
| @@ -475,9 +475,9 @@ namespace Tensorflow.Operations | |||||
| /// <returns>A `Tensor`. Has the same type as `features`.</returns> | /// <returns>A `Tensor`. Has the same type as `features`.</returns> | ||||
| public static Tensor relu(Tensor features, string name = null) | public static Tensor relu(Tensor features, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Relu", name, | "Relu", name, | ||||
| null, | null, | ||||
| features); | features); | ||||
| @@ -485,15 +485,15 @@ namespace Tensorflow.Operations | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Relu", name: name, args: new { features }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Relu", name: name, args: new { features }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor tanh(Tensor x, string name = null) | public static Tensor tanh(Tensor x, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Tanh", name, | "Tanh", name, | ||||
| null, | null, | ||||
| x); | x); | ||||
| @@ -501,7 +501,7 @@ namespace Tensorflow.Operations | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Tanh", name: name, args: new { x }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Tanh", name: name, args: new { x }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| } | } | ||||
| @@ -31,8 +31,8 @@ namespace Tensorflow | |||||
| public int InputListLength(string name) | public int InputListLength(string name) | ||||
| { | { | ||||
| int num = 0; | int num = 0; | ||||
| num = c_api.TF_OperationInputListLength(_handle, name, tf.status.Handle); | |||||
| tf.status.Check(true); | |||||
| num = c_api.TF_OperationInputListLength(_handle, name, tf.Status.Handle); | |||||
| tf.Status.Check(true); | |||||
| return num; | return num; | ||||
| } | } | ||||
| public int NumInputs => c_api.TF_OperationNumInputs(_handle); | public int NumInputs => c_api.TF_OperationNumInputs(_handle); | ||||
| @@ -28,8 +28,8 @@ namespace Tensorflow | |||||
| public int OutputListLength(string name) | public int OutputListLength(string name) | ||||
| { | { | ||||
| int num = c_api.TF_OperationOutputListLength(_handle, name, tf.status.Handle); | |||||
| tf.status.Check(true); | |||||
| int num = c_api.TF_OperationOutputListLength(_handle, name, tf.Status.Handle); | |||||
| tf.Status.Check(true); | |||||
| return num; | return num; | ||||
| } | } | ||||
| @@ -237,8 +237,8 @@ namespace Tensorflow | |||||
| lock (Locks.ProcessWide) | lock (Locks.ProcessWide) | ||||
| { | { | ||||
| using var buf = new Buffer(); | using var buf = new Buffer(); | ||||
| c_api.TF_OperationGetAttrValueProto(_handle, name, buf.Handle, tf.status.Handle); | |||||
| tf.status.Check(true); | |||||
| c_api.TF_OperationGetAttrValueProto(_handle, name, buf.Handle, tf.Status.Handle); | |||||
| tf.Status.Check(true); | |||||
| x = AttrValue.Parser.ParseFrom(buf.DangerousMemoryBlock.Stream()); | x = AttrValue.Parser.ParseFrom(buf.DangerousMemoryBlock.Stream()); | ||||
| } | } | ||||
| @@ -297,9 +297,9 @@ namespace Tensorflow | |||||
| // the updated inputs are reloaded from the c_api | // the updated inputs are reloaded from the c_api | ||||
| lock (Locks.ProcessWide) | lock (Locks.ProcessWide) | ||||
| { | { | ||||
| c_api.UpdateEdge(_graph, output, input, tf.status.Handle); | |||||
| c_api.UpdateEdge(_graph, output, input, tf.Status.Handle); | |||||
| //var updated_inputs = inputs; | //var updated_inputs = inputs; | ||||
| tf.status.Check(); | |||||
| tf.Status.Check(); | |||||
| } | } | ||||
| } | } | ||||
| @@ -18,6 +18,8 @@ using NumSharp; | |||||
| using System; | using System; | ||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Linq; | using System.Linq; | ||||
| using System.Reflection; | |||||
| using Tensorflow.Contexts; | |||||
| using Tensorflow.Eager; | using Tensorflow.Eager; | ||||
| using Tensorflow.Framework; | using Tensorflow.Framework; | ||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| @@ -459,7 +461,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| if (!tf.context.executing_eagerly()) | |||||
| if (!tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var input_tensor = ops.convert_to_tensor(input); | var input_tensor = ops.convert_to_tensor(input); | ||||
| var input_shape = input_tensor.TensorShape; | var input_shape = input_tensor.TensorShape; | ||||
| @@ -607,9 +609,9 @@ namespace Tensorflow | |||||
| float padding_value = 0, | float padding_value = 0, | ||||
| string align = "RIGHT_LEFT") | string align = "RIGHT_LEFT") | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "MatrixDiagV3", name, | "MatrixDiagV3", name, | ||||
| null, | null, | ||||
| diagonal, k, num_rows, num_cols, padding_value, | diagonal, k, num_rows, num_cols, padding_value, | ||||
| @@ -626,9 +628,9 @@ namespace Tensorflow | |||||
| int k = 0, | int k = 0, | ||||
| string align = "RIGHT_LEFT") | string align = "RIGHT_LEFT") | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "MatrixSetDiagV3", name, | "MatrixSetDiagV3", name, | ||||
| null, | null, | ||||
| input, diagonal, k, | input, diagonal, k, | ||||
| @@ -714,24 +716,24 @@ namespace Tensorflow | |||||
| { | { | ||||
| var size_splits = ops.convert_to_tensor(num_split); | var size_splits = ops.convert_to_tensor(num_split); | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| return split_eager_fallback(axis, value, num_split: num_split, name: name, ctx: tf.context); | |||||
| return split_eager_fallback(axis, value, num_split: num_split, name: name, ctx: tf.Context); | |||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Split", name, new { split_dim = axis, value, num_split }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Split", name, new { split_dim = axis, value, num_split }); | |||||
| return _op.outputs; | return _op.outputs; | ||||
| } | } | ||||
| private static Tensor[] split_eager_fallback<Ta, Tv>(Ta axis, Tv value, int num_split, string name, Context ctx = null) | private static Tensor[] split_eager_fallback<Ta, Tv>(Ta axis, Tv value, int num_split, string name, Context ctx = null) | ||||
| { | { | ||||
| var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: new object[] { value }); | |||||
| var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { value }); | |||||
| var axis_tensor = ops.convert_to_tensor(axis, dtype: TF_DataType.TF_INT32); | var axis_tensor = ops.convert_to_tensor(axis, dtype: TF_DataType.TF_INT32); | ||||
| var _inputs_flat = new List<Tensor> { axis_tensor }; | var _inputs_flat = new List<Tensor> { axis_tensor }; | ||||
| _inputs_flat.AddRange(input); | _inputs_flat.AddRange(input); | ||||
| var _attrs = new object[] { "num_split", num_split, "T", _attr_T }; | var _attrs = new object[] { "num_split", num_split, "T", _attr_T }; | ||||
| return tf._execute.execute(ctx, "Split", num_split, _inputs_flat.ToArray(), _attrs, name: name); | |||||
| return tf.Runner.Execute(ctx, "Split", num_split, _inputs_flat.ToArray(), _attrs, name: name); | |||||
| } | } | ||||
| public static Tensor slice<Tb, Ts>(Tensor input, Tb begin, Ts size, string name = null) | public static Tensor slice<Tb, Ts>(Tensor input, Tb begin, Ts size, string name = null) | ||||
| @@ -780,9 +782,13 @@ namespace Tensorflow | |||||
| return result; | return result; | ||||
| } | } | ||||
| public static Tensor placeholder(TF_DataType dtype) | |||||
| public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) | |||||
| { | { | ||||
| throw new NotImplementedException("array_ops.placeholder"); | |||||
| if (tf.Context.executing_eagerly()) | |||||
| throw new RuntimeError("tf.placeholder() is not compatible with eager execution."); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Placeholder", name: name, args: new { dtype, shape }); | |||||
| return _op.output; | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -376,7 +376,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| return tf_with(ops.name_scope(name, "cond", new { pred }), delegate | return tf_with(ops.name_scope(name, "cond", new { pred }), delegate | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| if (pred.ToArray<bool>()[0]) | if (pred.ToArray<bool>()[0]) | ||||
| return true_fn() as Tensor; | return true_fn() as Tensor; | ||||
| @@ -460,7 +460,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| return tf_with(ops.name_scope(name, "cond", new { pred }), delegate | return tf_with(ops.name_scope(name, "cond", new { pred }), delegate | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| if (pred.ToArray<bool>()[0]) | if (pred.ToArray<bool>()[0]) | ||||
| return true_fn() as Tensor[]; | return true_fn() as Tensor[]; | ||||
| @@ -17,9 +17,9 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public Tensor tensor_slice_dataset(Tensor[] components, TensorShape[] output_shapes, string name = null) | public Tensor tensor_slice_dataset(Tensor[] components, TensorShape[] output_shapes, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "TensorSliceDataset", name, | "TensorSliceDataset", name, | ||||
| null, | null, | ||||
| new object[] | new object[] | ||||
| @@ -35,9 +35,9 @@ namespace Tensorflow | |||||
| public Tensor repeat_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) | public Tensor repeat_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "RepeatDataset", name, | "RepeatDataset", name, | ||||
| null, | null, | ||||
| input_dataset, count, | input_dataset, count, | ||||
| @@ -55,9 +55,9 @@ namespace Tensorflow | |||||
| bool reshuffle_each_iteration = true, | bool reshuffle_each_iteration = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "ShuffleDatasetV3", name, | "ShuffleDatasetV3", name, | ||||
| null, | null, | ||||
| input_dataset, buffer_size, | input_dataset, buffer_size, | ||||
| @@ -73,9 +73,9 @@ namespace Tensorflow | |||||
| public Tensor dummy_seed_generator(string name = null) | public Tensor dummy_seed_generator(string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "DummySeedGenerator", name, | "DummySeedGenerator", name, | ||||
| null); | null); | ||||
| return results[0]; | return results[0]; | ||||
| @@ -101,9 +101,9 @@ namespace Tensorflow | |||||
| bool parallel_copy = false, | bool parallel_copy = false, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "BatchDatasetV2", name, | "BatchDatasetV2", name, | ||||
| null, | null, | ||||
| input_dataset, buffer_size, drop_remainder, | input_dataset, buffer_size, drop_remainder, | ||||
| @@ -133,9 +133,9 @@ namespace Tensorflow | |||||
| bool legacy_autotune = true, | bool legacy_autotune = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "PrefetchDataset", name, | "PrefetchDataset", name, | ||||
| null, | null, | ||||
| input_dataset, buffer_size, | input_dataset, buffer_size, | ||||
| @@ -162,9 +162,9 @@ namespace Tensorflow | |||||
| TF_DataType[] output_types, TensorShape[] output_shapes, | TF_DataType[] output_types, TensorShape[] output_shapes, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "TakeDataset", name, | "TakeDataset", name, | ||||
| null, | null, | ||||
| input_dataset, count, | input_dataset, count, | ||||
| @@ -194,9 +194,9 @@ namespace Tensorflow | |||||
| if (optimization_configs == null) | if (optimization_configs == null) | ||||
| optimization_configs = new string[0]; | optimization_configs = new string[0]; | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "OptimizeDataset", name, | "OptimizeDataset", name, | ||||
| null, | null, | ||||
| input_dataset, optimizations, | input_dataset, optimizations, | ||||
| @@ -224,9 +224,9 @@ namespace Tensorflow | |||||
| AutotuneAlgorithm algorithm, long cpu_budget, | AutotuneAlgorithm algorithm, long cpu_budget, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "ModelDataset", name, | "ModelDataset", name, | ||||
| null, | null, | ||||
| input_dataset, | input_dataset, | ||||
| @@ -249,9 +249,9 @@ namespace Tensorflow | |||||
| /// <returns>A tuple of `Tensor` objects (handle, deleter).</returns> | /// <returns>A tuple of `Tensor` objects (handle, deleter).</returns> | ||||
| public (Tensor, Tensor) anonymous_iterator_v2(TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) | public (Tensor, Tensor) anonymous_iterator_v2(TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "AnonymousIteratorV2", name, | "AnonymousIteratorV2", name, | ||||
| null, | null, | ||||
| "output_types", output_types, | "output_types", output_types, | ||||
| @@ -271,9 +271,9 @@ namespace Tensorflow | |||||
| /// <returns>The created Operation.</returns> | /// <returns>The created Operation.</returns> | ||||
| public ITensorOrOperation make_iterator(Tensor dataset, Tensor iterator, string name = null) | public ITensorOrOperation make_iterator(Tensor dataset, Tensor iterator, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "MakeIterator", name, | "MakeIterator", name, | ||||
| null, | null, | ||||
| dataset, iterator); | dataset, iterator); | ||||
| @@ -292,9 +292,9 @@ namespace Tensorflow | |||||
| /// <returns>The created Operation.</returns> | /// <returns>The created Operation.</returns> | ||||
| public ITensorOrOperation delete_iterator(Tensor handle, Tensor deleter, string name = null) | public ITensorOrOperation delete_iterator(Tensor handle, Tensor deleter, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "DeleteIterator", name, | "DeleteIterator", name, | ||||
| null, | null, | ||||
| handle, deleter); | handle, deleter); | ||||
| @@ -314,9 +314,9 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public Tensor[] iterator_get_next(Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) | public Tensor[] iterator_get_next(Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "IteratorGetNext", name, | "IteratorGetNext", name, | ||||
| null, | null, | ||||
| iterator, | iterator, | ||||
| @@ -60,7 +60,7 @@ namespace Tensorflow | |||||
| var elems_flat = input_flatten(elems); | var elems_flat = input_flatten(elems); | ||||
| bool in_graph_mode = tf.context.executing_eagerly(); | |||||
| bool in_graph_mode = tf.Context.executing_eagerly(); | |||||
| return tf_with(ops.name_scope(name, "scan", new { elems_flat }), scope => | return tf_with(ops.name_scope(name, "scan", new { elems_flat }), scope => | ||||
| { | { | ||||
| @@ -19,6 +19,7 @@ using System.Collections.Generic; | |||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| using Tensorflow.Eager; | using Tensorflow.Eager; | ||||
| using System.Linq; | using System.Linq; | ||||
| using Tensorflow.Contexts; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| @@ -26,14 +27,14 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Tensor batch_to_space_nd<T>(T input, int[] block_shape, int[,] crops, string name = null) | public static Tensor batch_to_space_nd<T>(T input, int[] block_shape, int[,] crops, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| public static Tensor check_numerics(Tensor tensor, string message, string name = null) | public static Tensor check_numerics(Tensor tensor, string message, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| @@ -47,9 +48,9 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor concat_v2<T, Ta>(T[] values, Ta axis, string name = null) | public static Tensor concat_v2<T, Ta>(T[] values, Ta axis, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "ConcatV2", name, | "ConcatV2", name, | ||||
| null, | null, | ||||
| values, axis); | values, axis); | ||||
| @@ -57,35 +58,35 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| public static Tensor concat_v2(Tensor[] values, Tensor axis, string name = null) | public static Tensor concat_v2(Tensor[] values, Tensor axis, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| return concat_v2_eager_fallback(values, axis, name, tf.context); | |||||
| return concat_v2_eager_fallback(values, axis, name, tf.Context); | |||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| private static Tensor concat_v2_eager_fallback<T1, T2>(T1[] values, T2 axis, string name, Context ctx) | private static Tensor concat_v2_eager_fallback<T1, T2>(T1[] values, T2 axis, string name, Context ctx) | ||||
| { | { | ||||
| var _attr_N = len(values); | var _attr_N = len(values); | ||||
| var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: values.Select(x => (object)x).ToArray()); | |||||
| var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new object[] { axis }); | |||||
| var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: values.Select(x => (object)x).ToArray()); | |||||
| var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new object[] { axis }); | |||||
| var _inputs_flat = input.concat(axis1); | var _inputs_flat = input.concat(axis1); | ||||
| var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx }; | var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx }; | ||||
| return tf._execute.execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0]; | |||||
| return tf.Runner.Execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0]; | |||||
| } | } | ||||
| public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null) | public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape }); | |||||
| return _op.outputs; | return _op.outputs; | ||||
| } | } | ||||
| @@ -123,9 +124,9 @@ namespace Tensorflow | |||||
| /// </remarks> | /// </remarks> | ||||
| public static Tensor diag(Tensor diagonal, string name = null) | public static Tensor diag(Tensor diagonal, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Diag", name, | "Diag", name, | ||||
| null, | null, | ||||
| diagonal); | diagonal); | ||||
| @@ -133,16 +134,16 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var op = tf._op_def_lib._apply_op_helper("Diag", name: name, args: new { diagonal }); | |||||
| var op = tf.OpDefLib._apply_op_helper("Diag", name: name, args: new { diagonal }); | |||||
| return op.output; | return op.output; | ||||
| } | } | ||||
| public static Tensor expand_dims(Tensor input, int axis, string name = null) | public static Tensor expand_dims(Tensor input, int axis, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "ExpandDims", name, | "ExpandDims", name, | ||||
| null, | null, | ||||
| input, tf.convert_to_tensor(axis)); | input, tf.convert_to_tensor(axis)); | ||||
| @@ -150,30 +151,30 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("ExpandDims", name: name, args: new { input, dim = axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ExpandDims", name: name, args: new { input, dim = axis }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor gather_v2<T1, T2>(T1 @params, T2 indices, int axis, string name = null) | public static Tensor gather_v2<T1, T2>(T1 @params, T2 indices, int axis, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("GatherV2", name: name, new { @params, indices, axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("GatherV2", name: name, new { @params, indices, axis }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor pad(Tensor input, Tensor paddings, string name = null) | public static Tensor pad(Tensor input, Tensor paddings, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Pad", name: name, args: new { input, paddings }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Pad", name: name, args: new { input, paddings }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| public static Tensor pack(Tensor[] values, int axis = 0, string name = null) | public static Tensor pack(Tensor[] values, int axis = 0, string name = null) | ||||
| { | { | ||||
| if(tf.context.executing_eagerly()) | |||||
| if(tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Pack", name, | "Pack", name, | ||||
| null, | null, | ||||
| values, | values, | ||||
| @@ -181,23 +182,10 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Pack", name: name, args: new { values, axis }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) | |||||
| { | |||||
| var _op = tf._op_def_lib._apply_op_helper("Placeholder", name: name, args: new { dtype, shape }); | |||||
| var _result = _op.outputs; | |||||
| var _inputs_flat = _op.inputs; | |||||
| var _attrs = new Dictionary<string, object>(); | |||||
| _attrs["dtype"] = _op.get_attr("dtype"); | |||||
| _attrs["shape"] = _op.get_attr("shape"); | |||||
| return new Tensor(_op, 0, dtype); | |||||
| } | |||||
| /// <summary> | /// <summary> | ||||
| /// An identity op that triggers an error if a gradient is requested. | /// An identity op that triggers an error if a gradient is requested. | ||||
| /// </summary> | /// </summary> | ||||
| @@ -226,7 +214,7 @@ namespace Tensorflow | |||||
| /// </remarks> | /// </remarks> | ||||
| public static Tensor prevent_gradient(Tensor input, string message = "", string name = null) | public static Tensor prevent_gradient(Tensor input, string message = "", string name = null) | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("PreventGradient", name: name, args: new { input, message }); | |||||
| var op = tf.OpDefLib._apply_op_helper("PreventGradient", name: name, args: new { input, message }); | |||||
| return op.output; | return op.output; | ||||
| } | } | ||||
| @@ -237,9 +225,9 @@ namespace Tensorflow | |||||
| /// <param name="name"></param> | /// <param name="name"></param> | ||||
| public static Tensor identity(Tensor input, string name = null) | public static Tensor identity(Tensor input, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Identity", name, | "Identity", name, | ||||
| null, | null, | ||||
| input); | input); | ||||
| @@ -247,30 +235,30 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Identity", name, new { input }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Identity", name, new { input }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| public static Tensor invert_permutation(Tensor x, string name = null) | public static Tensor invert_permutation(Tensor x, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("InvertPermutation", name, new { x }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("InvertPermutation", name, new { x }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor log(Tensor x, string name = null) | public static Tensor log(Tensor x, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Log", name: name, args: new { x }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Log", name: name, args: new { x }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor rank(Tensor input, string name = null) | public static Tensor rank(Tensor input, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Rank", name, | "Rank", name, | ||||
| null, | null, | ||||
| input); | input); | ||||
| @@ -278,7 +266,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Rank", name: name, args: new { input }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Rank", name: name, args: new { input }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -292,9 +280,9 @@ namespace Tensorflow | |||||
| /// <returns>A `Tensor`. Has the same type as `value`.</returns> | /// <returns>A `Tensor`. Has the same type as `value`.</returns> | ||||
| public static Tensor fill<T>(Tensor dims, T value, string name = null) | public static Tensor fill<T>(Tensor dims, T value, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Fill", name, | "Fill", name, | ||||
| null, | null, | ||||
| dims, value); | dims, value); | ||||
| @@ -302,7 +290,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Fill", name, new { dims, value }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Fill", name, new { dims, value }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| @@ -315,9 +303,9 @@ namespace Tensorflow | |||||
| /// <returns>A tuple of `Tensor` objects (r0, r1).</returns> | /// <returns>A tuple of `Tensor` objects (r0, r1).</returns> | ||||
| public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "") | public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "") | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "BroadcastGradientArgs", name, | "BroadcastGradientArgs", name, | ||||
| null, | null, | ||||
| s0,s1); | s0,s1); | ||||
| @@ -325,22 +313,22 @@ namespace Tensorflow | |||||
| return (results[0], results[1]); | return (results[0], results[1]); | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 }); | |||||
| return (_op.outputs[0], _op.outputs[1]); | return (_op.outputs[0], _op.outputs[1]); | ||||
| } | } | ||||
| public static Tensor reverse<T>(Tensor tensor, T axis, string name = null) | public static Tensor reverse<T>(Tensor tensor, T axis, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ReverseV2", name, new { tensor, axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ReverseV2", name, new { tensor, axis }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| public static Tensor reshape<T1, T2>(T1 tensor, T2 shape, string name = null) | public static Tensor reshape<T1, T2>(T1 tensor, T2 shape, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Reshape", name, | "Reshape", name, | ||||
| null, | null, | ||||
| tensor, shape); | tensor, shape); | ||||
| @@ -348,13 +336,13 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Reshape", name, new { tensor, shape }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| public static Tensor reshape(Tensor tensor, int[] shape, string name = null) | public static Tensor reshape(Tensor tensor, int[] shape, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Reshape", name, new { tensor, shape }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -367,7 +355,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null) | public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Unique", name, new { x, out_idx }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Unique", name, new { x, out_idx }); | |||||
| // TODO | // TODO | ||||
| //var _result = _UniqueOutput._make(_op.outputs); | //var _result = _UniqueOutput._make(_op.outputs); | ||||
| return (_op.outputs[0], _op.outputs[1]); | return (_op.outputs[0], _op.outputs[1]); | ||||
| @@ -375,13 +363,13 @@ namespace Tensorflow | |||||
| public static Tensor[] unpack(Tensor value, int num, int axis = 0, string name = null) | public static Tensor[] unpack(Tensor value, int num, int axis = 0, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Unpack", name, new { value, num, axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Unpack", name, new { value, num, axis }); | |||||
| return _op.outputs; | return _op.outputs; | ||||
| } | } | ||||
| public static Tensor where(Tensor condition, string name = null) | public static Tensor where(Tensor condition, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Where", name, new { input = condition }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Where", name, new { input = condition }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| @@ -392,9 +380,9 @@ namespace Tensorflow | |||||
| int axis = -1, | int axis = -1, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "OneHot", name, | "OneHot", name, | ||||
| null, | null, | ||||
| indices, depth, on_value, off_value, | indices, depth, on_value, off_value, | ||||
| @@ -403,7 +391,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("OneHot", name, new { indices, depth, on_value, off_value, axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("OneHot", name, new { indices, depth, on_value, off_value, axis }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -416,15 +404,15 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor placeholder_with_default<T>(T input, int[] shape, string name = null) | public static Tensor placeholder_with_default<T>(T input, int[] shape, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor select<Tx, Ty>(Tensor condition, Tx t, Ty e, string name = null) | public static Tensor select<Tx, Ty>(Tensor condition, Tx t, Ty e, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "SelectV2", name, | "SelectV2", name, | ||||
| null, | null, | ||||
| condition, t, e); | condition, t, e); | ||||
| @@ -432,21 +420,21 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Select", name, new { condition, t, e }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Select", name, new { condition, t, e }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor[] shape, string name = null) | public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor[] shape, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ScatterNd", name, new { indices, updates, shape }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ScatterNd", name, new { indices, updates, shape }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) | public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Shape", name, | "Shape", name, | ||||
| null, | null, | ||||
| input, | input, | ||||
| @@ -455,7 +443,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Shape", name, new { input, out_type }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Shape", name, new { input, out_type }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -468,13 +456,13 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor[] shape_n(Tensor[] input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) | public static Tensor[] shape_n(Tensor[] input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ShapeN", name, new { input, out_type }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ShapeN", name, new { input, out_type }); | |||||
| return _op.outputs; | return _op.outputs; | ||||
| } | } | ||||
| public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) | public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Size", name, new { input, out_type }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Size", name, new { input, out_type }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -488,15 +476,15 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor slice(Tensor input, Tensor begin, Tensor size, string name = null) | public static Tensor slice(Tensor input, Tensor begin, Tensor size, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Slice", name, new { input, begin, size }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor tile<T>(Tensor input, T multiples, string name = null) | public static Tensor tile<T>(Tensor input, T multiples, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Tile", name, | "Tile", name, | ||||
| null, | null, | ||||
| input, multiples); | input, multiples); | ||||
| @@ -504,34 +492,34 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Tile", name, new { input, multiples }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Tile", name, new { input, multiples }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor transpose<T1, T2>(T1 x, T2 perm, string name = null) | public static Tensor transpose<T1, T2>(T1 x, T2 perm, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Transpose", name, | "Transpose", name, | ||||
| null, | null, | ||||
| x, perm); | x, perm); | ||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Transpose", name, new { x, perm }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Transpose", name, new { x, perm }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor zeros_like(Tensor x, string name = null) | public static Tensor zeros_like(Tensor x, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ZerosLike", name, new { x }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ZerosLike", name, new { x }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor stop_gradient(Tensor x, string name = null) | public static Tensor stop_gradient(Tensor x, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("StopGradient", name, args: new { input = x, name }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("StopGradient", name, args: new { input = x, name }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| @@ -544,9 +532,9 @@ namespace Tensorflow | |||||
| int shrink_axis_mask = 0, | int shrink_axis_mask = 0, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "StridedSlice", name, | "StridedSlice", name, | ||||
| null, | null, | ||||
| input, begin, end, strides, | input, begin, end, strides, | ||||
| @@ -559,7 +547,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("StridedSlice", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, new | |||||
| { | { | ||||
| input, | input, | ||||
| begin, | begin, | ||||
| @@ -583,7 +571,7 @@ namespace Tensorflow | |||||
| int shrink_axis_mask = 0, | int shrink_axis_mask = 0, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("StridedSlice", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, new | |||||
| { | { | ||||
| input, | input, | ||||
| begin, | begin, | ||||
| @@ -623,7 +611,7 @@ namespace Tensorflow | |||||
| int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, | int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, | ||||
| int shrink_axis_mask = 0, string name = null) | int shrink_axis_mask = 0, string name = null) | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("StridedSliceGrad", name: name, args: new | |||||
| var op = tf.OpDefLib._apply_op_helper("StridedSliceGrad", name: name, args: new | |||||
| { | { | ||||
| shape, | shape, | ||||
| begin, | begin, | ||||
| @@ -642,7 +630,7 @@ namespace Tensorflow | |||||
| public static Tensor slice<Tb, Ts>(Tensor input, Tb begin, Ts size, string name = null) | public static Tensor slice<Tb, Ts>(Tensor input, Tb begin, Ts size, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Slice", name, new { input, begin, size }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -659,9 +647,9 @@ namespace Tensorflow | |||||
| /// <returns> A `Tensor`. Has the same type as `input`.</returns> | /// <returns> A `Tensor`. Has the same type as `input`.</returns> | ||||
| public static Tensor squeeze(Tensor input, int[] axis = null, string name = null) | public static Tensor squeeze(Tensor input, int[] axis = null, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Squeeze", name, | "Squeeze", name, | ||||
| null, | null, | ||||
| input, | input, | ||||
| @@ -671,7 +659,7 @@ namespace Tensorflow | |||||
| } | } | ||||
| if (axis == null) axis = new int[0]; | if (axis == null) axis = new int[0]; | ||||
| var _op = tf._op_def_lib._apply_op_helper("Squeeze", name, args: new { input, squeeze_dims = axis }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Squeeze", name, args: new { input, squeeze_dims = axis }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -687,7 +675,7 @@ namespace Tensorflow | |||||
| /// <returns> `Tensor`. Has the same type as `s0`.</returns> | /// <returns> `Tensor`. Has the same type as `s0`.</returns> | ||||
| public static Tensor broadcast_args(Tensor s0, Tensor s1, string name = null) | public static Tensor broadcast_args(Tensor s0, Tensor s1, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("BroadcastArgs", name, args: new { s0, s1, name }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("BroadcastArgs", name, args: new { s0, s1, name }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -701,9 +689,9 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor broadcast_to<T>(Tensor input, T shape, string name = null) | public static Tensor broadcast_to<T>(Tensor input, T shape, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "BroadcastTo", name, | "BroadcastTo", name, | ||||
| null, | null, | ||||
| input, shape); | input, shape); | ||||
| @@ -711,7 +699,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("BroadcastTo", name, args: new { input, shape, name }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("BroadcastTo", name, args: new { input, shape, name }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -23,7 +23,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Operation control_trigger(string name = null) | public static Operation control_trigger(string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ControlTrigger", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("ControlTrigger", name, new | |||||
| { | { | ||||
| }); | }); | ||||
| @@ -41,7 +41,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor enter(Tensor data, string frame_name = "frame_name", bool is_constant = false, int parallel_iterations = 10, string name = null) | public static Tensor enter(Tensor data, string frame_name = "frame_name", bool is_constant = false, int parallel_iterations = 10, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Enter", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("Enter", name, new | |||||
| { | { | ||||
| data, | data, | ||||
| frame_name, | frame_name, | ||||
| @@ -60,7 +60,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor loop_cond(Tensor input, string name = null) | public static Tensor loop_cond(Tensor input, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("LoopCond", name, new { input }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("LoopCond", name, new { input }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| @@ -73,7 +73,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor ref_next_iteration(Tensor data, string name = null) | public static Tensor ref_next_iteration(Tensor data, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("RefNextIteration", name, new { data }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("RefNextIteration", name, new { data }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| @@ -86,7 +86,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor next_iteration(Tensor data, string name = null) | public static Tensor next_iteration(Tensor data, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("NextIteration", name, new { data }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("NextIteration", name, new { data }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| @@ -99,7 +99,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor ref_exit(Tensor data, string name = null) | public static Tensor ref_exit(Tensor data, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("RefExit", name, new { data }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("RefExit", name, new { data }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| @@ -112,21 +112,21 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor _exit(Tensor data, string name = null) | public static Tensor _exit(Tensor data, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Exit", name, new { data }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Exit", name, new { data }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| public static Operation no_op(string name = null) | public static Operation no_op(string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("NoOp", name, null); | |||||
| var _op = tf.OpDefLib._apply_op_helper("NoOp", name, null); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| public static Tensor[] ref_switch(Tensor data, Tensor pred, string name = null) | public static Tensor[] ref_switch(Tensor data, Tensor pred, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("RefSwitch", name, new { data, pred }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("RefSwitch", name, new { data, pred }); | |||||
| return _op.outputs; | return _op.outputs; | ||||
| } | } | ||||
| @@ -150,7 +150,7 @@ namespace Tensorflow | |||||
| /// </returns> | /// </returns> | ||||
| public static Tensor[] @switch(Tensor data, Tensor pred, string name = null) | public static Tensor[] @switch(Tensor data, Tensor pred, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Switch", name, new { data, pred }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Switch", name, new { data, pred }); | |||||
| var _inputs_flat = _op.inputs; | var _inputs_flat = _op.inputs; | ||||
| #pragma warning disable CS0219 // Variable is assigned but its value is never used | #pragma warning disable CS0219 // Variable is assigned but its value is never used | ||||
| var _attrs = ("T", _op.get_attr("T")); | var _attrs = ("T", _op.get_attr("T")); | ||||
| @@ -162,14 +162,14 @@ namespace Tensorflow | |||||
| public static MergeOutput ref_merge(Tensor[] inputs, string name = null) | public static MergeOutput ref_merge(Tensor[] inputs, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("RefMerge", name, new { inputs }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("RefMerge", name, new { inputs }); | |||||
| return new MergeOutput(_op.outputs); | return new MergeOutput(_op.outputs); | ||||
| } | } | ||||
| public static MergeOutput merge(Tensor[] inputs, string name = null) | public static MergeOutput merge(Tensor[] inputs, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Merge", name, new { inputs }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Merge", name, new { inputs }); | |||||
| return new MergeOutput(_op.outputs); | return new MergeOutput(_op.outputs); | ||||
| } | } | ||||
| @@ -22,7 +22,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = "CTCGreedyDecoder") | public static Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = "CTCGreedyDecoder") | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, args: new | |||||
| var op = tf.OpDefLib._apply_op_helper("CTCGreedyDecoder", name: name, args: new | |||||
| { | { | ||||
| inputs, | inputs, | ||||
| sequence_length, | sequence_length, | ||||
| @@ -22,7 +22,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null) | public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("DynamicStitch", name, new { indices, data }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("DynamicStitch", name, new { indices, data }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| @@ -30,7 +30,7 @@ namespace Tensorflow | |||||
| public static Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions, | public static Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("DynamicPartition", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("DynamicPartition", name, new | |||||
| { | { | ||||
| data, | data, | ||||
| partitions, | partitions, | ||||
| @@ -44,7 +44,7 @@ namespace Tensorflow | |||||
| TensorShape element_shape = null, bool dynamic_size = false, bool clear_after_read = true, | TensorShape element_shape = null, bool dynamic_size = false, bool clear_after_read = true, | ||||
| bool identical_element_shapes = false, string tensor_array_name = "", string name = null) | bool identical_element_shapes = false, string tensor_array_name = "", string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("TensorArrayV3", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("TensorArrayV3", name, new | |||||
| { | { | ||||
| size, | size, | ||||
| dtype, | dtype, | ||||
| @@ -61,7 +61,7 @@ namespace Tensorflow | |||||
| public static Tensor tensor_array_scatter_v3(Tensor handle, Tensor indices, Tensor value, | public static Tensor tensor_array_scatter_v3(Tensor handle, Tensor indices, Tensor value, | ||||
| Tensor flow_in, string name = null) | Tensor flow_in, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("TensorArrayScatterV3", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("TensorArrayScatterV3", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| indices, | indices, | ||||
| @@ -76,7 +76,7 @@ namespace Tensorflow | |||||
| int capacity = -1, string container = "", string shared_name = "", | int capacity = -1, string container = "", string shared_name = "", | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("PaddingFIFOQueueV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("PaddingFIFOQueueV2", name, new | |||||
| { | { | ||||
| component_types, | component_types, | ||||
| shapes, | shapes, | ||||
| @@ -92,7 +92,7 @@ namespace Tensorflow | |||||
| int capacity = -1, string container = "", string shared_name = "", | int capacity = -1, string container = "", string shared_name = "", | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("FIFOQueueV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("FIFOQueueV2", name, new | |||||
| { | { | ||||
| component_types, | component_types, | ||||
| shapes, | shapes, | ||||
| @@ -108,7 +108,7 @@ namespace Tensorflow | |||||
| int capacity = -1, string container = "", string shared_name = "", | int capacity = -1, string container = "", string shared_name = "", | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("PriorityQueueV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("PriorityQueueV2", name, new | |||||
| { | { | ||||
| component_types, | component_types, | ||||
| shapes, | shapes, | ||||
| @@ -124,7 +124,7 @@ namespace Tensorflow | |||||
| int capacity = -1, int min_after_dequeue = 0, int seed = 0, int seed2 = 0, | int capacity = -1, int min_after_dequeue = 0, int seed = 0, int seed2 = 0, | ||||
| string container = "", string shared_name = "", string name = null) | string container = "", string shared_name = "", string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("RandomShuffleQueueV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("RandomShuffleQueueV2", name, new | |||||
| { | { | ||||
| component_types, | component_types, | ||||
| shapes, | shapes, | ||||
| @@ -141,7 +141,7 @@ namespace Tensorflow | |||||
| public static Operation queue_enqueue(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null) | public static Operation queue_enqueue(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("QueueEnqueue", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("QueueEnqueue", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| components, | components, | ||||
| @@ -153,7 +153,7 @@ namespace Tensorflow | |||||
| public static Operation queue_enqueue_v2(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null) | public static Operation queue_enqueue_v2(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("QueueEnqueueV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("QueueEnqueueV2", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| components, | components, | ||||
| @@ -165,7 +165,7 @@ namespace Tensorflow | |||||
| public static Tensor[] queue_dequeue_v2(Tensor handle, TF_DataType[] component_types, int timeout_ms = -1, string name = null) | public static Tensor[] queue_dequeue_v2(Tensor handle, TF_DataType[] component_types, int timeout_ms = -1, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("QueueDequeueV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("QueueDequeueV2", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| component_types, | component_types, | ||||
| @@ -177,7 +177,7 @@ namespace Tensorflow | |||||
| public static Tensor[] queue_dequeue(Tensor handle, TF_DataType[] component_types, int timeout_ms = -1, string name = null) | public static Tensor[] queue_dequeue(Tensor handle, TF_DataType[] component_types, int timeout_ms = -1, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("QueueDequeue", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("QueueDequeue", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| component_types, | component_types, | ||||
| @@ -189,7 +189,7 @@ namespace Tensorflow | |||||
| public static Operation queue_enqueue_many_v2(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null) | public static Operation queue_enqueue_many_v2(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("QueueEnqueueManyV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("QueueEnqueueManyV2", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| components, | components, | ||||
| @@ -201,7 +201,7 @@ namespace Tensorflow | |||||
| public static Tensor[] queue_dequeue_many_v2(Tensor handle, int n, TF_DataType[] component_types, int timeout_ms = -1, string name = null) | public static Tensor[] queue_dequeue_many_v2(Tensor handle, int n, TF_DataType[] component_types, int timeout_ms = -1, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("QueueDequeueManyV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("QueueDequeueManyV2", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| n, | n, | ||||
| @@ -223,7 +223,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor tensor_array_read_v3(Tensor handle, Tensor index, Tensor flow_in, TF_DataType dtype, string name = null) | public static Tensor tensor_array_read_v3(Tensor handle, Tensor index, Tensor flow_in, TF_DataType dtype, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("TensorArrayReadV3", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("TensorArrayReadV3", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| index, | index, | ||||
| @@ -236,7 +236,7 @@ namespace Tensorflow | |||||
| public static Tensor tensor_array_write_v3(Tensor handle, Tensor index, Tensor value, Tensor flow_in, string name = null) | public static Tensor tensor_array_write_v3(Tensor handle, Tensor index, Tensor value, Tensor flow_in, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("TensorArrayWriteV3", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("TensorArrayWriteV3", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| index, | index, | ||||
| @@ -249,7 +249,7 @@ namespace Tensorflow | |||||
| public static Tensor tensor_array_size_v3(Tensor handle, Tensor flow_in, string name = null) | public static Tensor tensor_array_size_v3(Tensor handle, Tensor flow_in, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("TensorArraySizeV3", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("TensorArraySizeV3", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| flow_in | flow_in | ||||
| @@ -261,7 +261,7 @@ namespace Tensorflow | |||||
| public static Tensor tensor_array_gather_v3(Tensor handle, Tensor indices, Tensor flow_in, | public static Tensor tensor_array_gather_v3(Tensor handle, Tensor indices, Tensor flow_in, | ||||
| TF_DataType dtype, TensorShape element_shape = null, string name = null) | TF_DataType dtype, TensorShape element_shape = null, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("TensorArrayGatherV3", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("TensorArrayGatherV3", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| indices, | indices, | ||||
| @@ -276,7 +276,7 @@ namespace Tensorflow | |||||
| public static Tensor stack_v2(Tensor max_size, TF_DataType elem_type, string stack_name = "", | public static Tensor stack_v2(Tensor max_size, TF_DataType elem_type, string stack_name = "", | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("StackV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("StackV2", name, new | |||||
| { | { | ||||
| max_size, | max_size, | ||||
| elem_type, | elem_type, | ||||
| @@ -289,7 +289,7 @@ namespace Tensorflow | |||||
| public static Tensor stack_push_v2(Tensor handle, Tensor elem, bool swap_memory = false, | public static Tensor stack_push_v2(Tensor handle, Tensor elem, bool swap_memory = false, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("StackPushV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("StackPushV2", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| elem, | elem, | ||||
| @@ -301,7 +301,7 @@ namespace Tensorflow | |||||
| public static Tensor stack_pop_v2(Tensor handle, TF_DataType elem_type, string name = null) | public static Tensor stack_pop_v2(Tensor handle, TF_DataType elem_type, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("StackPopV2", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("StackPopV2", name, new | |||||
| { | { | ||||
| handle, | handle, | ||||
| elem_type | elem_type | ||||
| @@ -65,13 +65,13 @@ namespace Tensorflow | |||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| // Add nodes to the TensorFlow graph. | // Add nodes to the TensorFlow graph. | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| throw new NotImplementedException("decode_jpeg"); | throw new NotImplementedException("decode_jpeg"); | ||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("DecodeJpeg", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("DecodeJpeg", name: name, args: new | |||||
| { | { | ||||
| contents, | contents, | ||||
| channels, | channels, | ||||
| @@ -90,13 +90,13 @@ namespace Tensorflow | |||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| // Add nodes to the TensorFlow graph. | // Add nodes to the TensorFlow graph. | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| throw new NotImplementedException("decode_gif"); | throw new NotImplementedException("decode_gif"); | ||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("DecodeGif", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("DecodeGif", name: name, args: new | |||||
| { | { | ||||
| contents | contents | ||||
| }); | }); | ||||
| @@ -111,13 +111,13 @@ namespace Tensorflow | |||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| // Add nodes to the TensorFlow graph. | // Add nodes to the TensorFlow graph. | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| throw new NotImplementedException("decode_png"); | throw new NotImplementedException("decode_png"); | ||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("DecodePng", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("DecodePng", name: name, args: new | |||||
| { | { | ||||
| contents, | contents, | ||||
| channels, | channels, | ||||
| @@ -133,13 +133,13 @@ namespace Tensorflow | |||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| // Add nodes to the TensorFlow graph. | // Add nodes to the TensorFlow graph. | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| throw new NotImplementedException("decode_bmp"); | throw new NotImplementedException("decode_bmp"); | ||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("DecodeBmp", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("DecodeBmp", name: name, args: new | |||||
| { | { | ||||
| contents, | contents, | ||||
| channels | channels | ||||
| @@ -151,13 +151,13 @@ namespace Tensorflow | |||||
| public static Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, string name = null) | public static Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| throw new NotImplementedException("resize_bilinear"); | throw new NotImplementedException("resize_bilinear"); | ||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ResizeBilinear", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("ResizeBilinear", name: name, args: new | |||||
| { | { | ||||
| images, | images, | ||||
| size, | size, | ||||
| @@ -171,7 +171,7 @@ namespace Tensorflow | |||||
| public static Tensor resize_nearest_neighbor<Tsize>(Tensor images, Tsize size, bool align_corners = false, | public static Tensor resize_nearest_neighbor<Tsize>(Tensor images, Tsize size, bool align_corners = false, | ||||
| bool half_pixel_centers = false, string name = null) | bool half_pixel_centers = false, string name = null) | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("ResizeNearestNeighbor", name: name, args: new | |||||
| var op = tf.OpDefLib._apply_op_helper("ResizeNearestNeighbor", name: name, args: new | |||||
| { | { | ||||
| images, | images, | ||||
| size, | size, | ||||
| @@ -185,7 +185,7 @@ namespace Tensorflow | |||||
| public static Tensor resize_nearest_neighbor_grad<Tsize>(Tensor grads, Tsize size, bool align_corners = false, | public static Tensor resize_nearest_neighbor_grad<Tsize>(Tensor grads, Tsize size, bool align_corners = false, | ||||
| bool half_pixel_centers = false, string name = null) | bool half_pixel_centers = false, string name = null) | ||||
| { | { | ||||
| var op = tf._op_def_lib._apply_op_helper("ResizeNearestNeighborGrad", name: name, args: new | |||||
| var op = tf.OpDefLib._apply_op_helper("ResizeNearestNeighborGrad", name: name, args: new | |||||
| { | { | ||||
| grads, | grads, | ||||
| size, | size, | ||||
| @@ -26,7 +26,7 @@ namespace Tensorflow | |||||
| if (!summarize.HasValue) | if (!summarize.HasValue) | ||||
| summarize = 3; | summarize = 3; | ||||
| var _op = tf._op_def_lib._apply_op_helper("Assert", name, args: new { condition, data, summarize }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Assert", name, args: new { condition, data, summarize }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| @@ -34,7 +34,7 @@ namespace Tensorflow | |||||
| public static Tensor histogram_summary(string tag, Tensor values, string name = null) | public static Tensor histogram_summary(string tag, Tensor values, string name = null) | ||||
| { | { | ||||
| var dict = new Dictionary<string, object>(); | var dict = new Dictionary<string, object>(); | ||||
| var op = tf._op_def_lib._apply_op_helper("HistogramSummary", name: name, args: new { tag, values }); | |||||
| var op = tf.OpDefLib._apply_op_helper("HistogramSummary", name: name, args: new { tag, values }); | |||||
| return op.output; | return op.output; | ||||
| } | } | ||||
| @@ -63,7 +63,7 @@ namespace Tensorflow | |||||
| var dict = new Dictionary<string, object>(); | var dict = new Dictionary<string, object>(); | ||||
| dict["tags"] = tags; | dict["tags"] = tags; | ||||
| dict["values"] = values; | dict["values"] = values; | ||||
| var op = tf._op_def_lib._apply_op_helper("ScalarSummary", name: name, keywords: dict); | |||||
| var op = tf.OpDefLib._apply_op_helper("ScalarSummary", name: name, keywords: dict); | |||||
| return op.output; | return op.output; | ||||
| } | } | ||||
| @@ -94,7 +94,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| var dict = new Dictionary<string, object>(); | var dict = new Dictionary<string, object>(); | ||||
| dict["inputs"] = inputs; | dict["inputs"] = inputs; | ||||
| var op = tf._op_def_lib._apply_op_helper("MergeSummary", name: name, keywords: dict); | |||||
| var op = tf.OpDefLib._apply_op_helper("MergeSummary", name: name, keywords: dict); | |||||
| return op.output; | return op.output; | ||||
| } | } | ||||
| } | } | ||||
| @@ -11,7 +11,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Tensor mul(IntPtr x, IntPtr y, string name = null) | public static Tensor mul(IntPtr x, IntPtr y, string name = null) | ||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Mul", name, | "Mul", name, | ||||
| null, | null, | ||||
| x, y); | x, y); | ||||
| @@ -38,9 +38,9 @@ namespace Tensorflow | |||||
| if (!seed2.HasValue) | if (!seed2.HasValue) | ||||
| seed2 = 0; | seed2 = 0; | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "RandomStandardNormal", name, | "RandomStandardNormal", name, | ||||
| null, | null, | ||||
| shape, | shape, | ||||
| @@ -51,7 +51,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("RandomStandardNormal", | |||||
| var _op = tf.OpDefLib._apply_op_helper("RandomStandardNormal", | |||||
| name: name, | name: name, | ||||
| args: new { shape, dtype, seed, seed2 }); | args: new { shape, dtype, seed, seed2 }); | ||||
| @@ -75,7 +75,7 @@ namespace Tensorflow | |||||
| if (!seed2.HasValue) | if (!seed2.HasValue) | ||||
| seed2 = 0; | seed2 = 0; | ||||
| var _op = tf._op_def_lib._apply_op_helper("RandomUniformInt", | |||||
| var _op = tf.OpDefLib._apply_op_helper("RandomUniformInt", | |||||
| name: name, | name: name, | ||||
| args: new { shape, minval, maxval, seed, seed2 }); | args: new { shape, minval, maxval, seed, seed2 }); | ||||
| @@ -98,7 +98,7 @@ namespace Tensorflow | |||||
| if (!seed2.HasValue) | if (!seed2.HasValue) | ||||
| seed2 = 0; | seed2 = 0; | ||||
| var _op = tf._op_def_lib._apply_op_helper("RandomUniform", | |||||
| var _op = tf.OpDefLib._apply_op_helper("RandomUniform", | |||||
| name: name, | name: name, | ||||
| args: new { shape, dtype, seed, seed2}); | args: new { shape, dtype, seed, seed2}); | ||||
| @@ -116,7 +116,7 @@ namespace Tensorflow | |||||
| public static Tensor random_shuffle(Tensor value, int seed = 0, int seed2 = 0, | public static Tensor random_shuffle(Tensor value, int seed = 0, int seed2 = 0, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("RandomShuffle", | |||||
| var _op = tf.OpDefLib._apply_op_helper("RandomShuffle", | |||||
| name: name, | name: name, | ||||
| args: new { value, seed, seed2 }); | args: new { value, seed, seed2 }); | ||||
| @@ -140,9 +140,9 @@ namespace Tensorflow | |||||
| if (!seed2.HasValue) | if (!seed2.HasValue) | ||||
| seed2 = 0; | seed2 = 0; | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "TruncatedNormal", name, | "TruncatedNormal", name, | ||||
| null, | null, | ||||
| shape, | shape, | ||||
| @@ -153,7 +153,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("TruncatedNormal", | |||||
| var _op = tf.OpDefLib._apply_op_helper("TruncatedNormal", | |||||
| name: name, | name: name, | ||||
| args: new { shape, dtype, seed, seed2 }); | args: new { shape, dtype, seed, seed2 }); | ||||
| @@ -170,7 +170,7 @@ namespace Tensorflow | |||||
| if (output_dtype == TF_DataType.DtInvalid) | if (output_dtype == TF_DataType.DtInvalid) | ||||
| output_dtype = TF_DataType.TF_INT64; | output_dtype = TF_DataType.TF_INT64; | ||||
| var _op = tf._op_def_lib._apply_op_helper("Multinomial", | |||||
| var _op = tf.OpDefLib._apply_op_helper("Multinomial", | |||||
| name: name, | name: name, | ||||
| args: new { logits, num_samples, seed, seed2, output_dtype }); | args: new { logits, num_samples, seed, seed2, output_dtype }); | ||||
| @@ -25,9 +25,9 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Operation assign_sub_variable_op(Tensor resource, Tensor value, string name = null) | public static Operation assign_sub_variable_op(Tensor resource, Tensor value, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "AssignSubVariableOp", name, | "AssignSubVariableOp", name, | ||||
| null, | null, | ||||
| resource, value); | resource, value); | ||||
| @@ -47,9 +47,9 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Operation assign_add_variable_op(Tensor resource, Tensor value, string name = null) | public static Operation assign_add_variable_op(Tensor resource, Tensor value, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "AssignAddVariableOp", name, | "AssignAddVariableOp", name, | ||||
| null, | null, | ||||
| resource, value); | resource, value); | ||||
| @@ -57,16 +57,16 @@ namespace Tensorflow | |||||
| return null; | return null; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("AssignAddVariableOp", name, new { resource, value }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("AssignAddVariableOp", name, new { resource, value }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| public static Operation assign_variable_op(Tensor resource, Tensor value, string name = null) | public static Operation assign_variable_op(Tensor resource, Tensor value, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "AssignVariableOp", name, | "AssignVariableOp", name, | ||||
| null, | null, | ||||
| resource, value); | resource, value); | ||||
| @@ -74,16 +74,16 @@ namespace Tensorflow | |||||
| return null; | return null; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("AssignVariableOp", name, new { resource, value }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("AssignVariableOp", name, new { resource, value }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| public static Tensor var_is_initialized_op(Tensor resource, string name = null) | public static Tensor var_is_initialized_op(Tensor resource, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "VarIsInitializedOp", name, | "VarIsInitializedOp", name, | ||||
| null, | null, | ||||
| resource); | resource); | ||||
| @@ -91,7 +91,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("VarIsInitializedOp", name, new { resource }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("VarIsInitializedOp", name, new { resource }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| @@ -108,9 +108,9 @@ namespace Tensorflow | |||||
| public static Tensor var_handle_op(TF_DataType dtype, TensorShape shape, | public static Tensor var_handle_op(TF_DataType dtype, TensorShape shape, | ||||
| string container ="", string shared_name = "", string name = null) | string container ="", string shared_name = "", string name = null) | ||||
| { | { | ||||
| if(tf.context.executing_eagerly()) | |||||
| if(tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "VarHandleOp", name, | "VarHandleOp", name, | ||||
| null, | null, | ||||
| "container", container, | "container", container, | ||||
| @@ -121,7 +121,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("VarHandleOp", name, new { | |||||
| var _op = tf.OpDefLib._apply_op_helper("VarHandleOp", name, new { | |||||
| dtype, | dtype, | ||||
| shape, | shape, | ||||
| container, | container, | ||||
| @@ -140,9 +140,9 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor read_variable_op(Tensor resource, TF_DataType dtype, string name = null) | public static Tensor read_variable_op(Tensor resource, TF_DataType dtype, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "ReadVariableOp", name, | "ReadVariableOp", name, | ||||
| null, | null, | ||||
| resource, | resource, | ||||
| @@ -151,7 +151,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("ReadVariableOp", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("ReadVariableOp", name, new | |||||
| { | { | ||||
| resource, | resource, | ||||
| dtype | dtype | ||||
| @@ -163,7 +163,7 @@ namespace Tensorflow | |||||
| public static Tensor resource_gather(Tensor resource, Tensor indices, TF_DataType dtype, | public static Tensor resource_gather(Tensor resource, Tensor indices, TF_DataType dtype, | ||||
| int batch_dims = 0, bool validate_indices = true, string name = null) | int batch_dims = 0, bool validate_indices = true, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ResourceGather", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("ResourceGather", name, new | |||||
| { | { | ||||
| resource, | resource, | ||||
| indices, | indices, | ||||
| @@ -38,7 +38,7 @@ namespace Tensorflow | |||||
| bool validate_indices = true, | bool validate_indices = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("SparseToDense", name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("SparseToDense", name, args: new | |||||
| { | { | ||||
| sparse_indices, | sparse_indices, | ||||
| output_shape, | output_shape, | ||||
| @@ -57,7 +57,7 @@ namespace Tensorflow | |||||
| bool validate_indices = true, | bool validate_indices = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("SparseToDense", name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("SparseToDense", name, args: new | |||||
| { | { | ||||
| sparse_indices, | sparse_indices, | ||||
| output_shape, | output_shape, | ||||
| @@ -112,7 +112,7 @@ namespace Tensorflow | |||||
| public static Tensor crop_and_resize(Tensor image, Tensor boxes, Tensor box_ind, Tensor crop_size, string method, float extrapolation_value, string name) | public static Tensor crop_and_resize(Tensor image, Tensor boxes, Tensor box_ind, Tensor crop_size, string method, float extrapolation_value, string name) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("CropAndResize", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("CropAndResize", name: name, args: new | |||||
| { | { | ||||
| image, | image, | ||||
| boxes, | boxes, | ||||
| @@ -14,6 +14,7 @@ | |||||
| limitations under the License. | limitations under the License. | ||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using Tensorflow.Contexts; | |||||
| using Tensorflow.Eager; | using Tensorflow.Eager; | ||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| @@ -23,26 +24,26 @@ namespace Tensorflow | |||||
| { | { | ||||
| public Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, Tensor[] tensors, string name = null) | public Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, Tensor[] tensors, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("SaveV2", name: name, args: new { prefix, tensor_names, shape_and_slices, tensors }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("SaveV2", name: name, args: new { prefix, tensor_names, shape_and_slices, tensors }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| public Tensor[] restore_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, TF_DataType[] dtypes, string name = null) | public Tensor[] restore_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, TF_DataType[] dtypes, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("RestoreV2", name: name, args: new { prefix, tensor_names, shape_and_slices, dtypes }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("RestoreV2", name: name, args: new { prefix, tensor_names, shape_and_slices, dtypes }); | |||||
| return _op.outputs; | return _op.outputs; | ||||
| } | } | ||||
| public Tensor read_file<T>(T filename, string name = null) | public Tensor read_file<T>(T filename, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| return read_file_eager_fallback(filename, name: name, tf.context); | |||||
| return read_file_eager_fallback(filename, name: name, tf.Context); | |||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("ReadFile", name: name, args: new { filename }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ReadFile", name: name, args: new { filename }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -52,7 +53,7 @@ namespace Tensorflow | |||||
| var filename_tensor = ops.convert_to_tensor(filename, TF_DataType.TF_STRING); | var filename_tensor = ops.convert_to_tensor(filename, TF_DataType.TF_STRING); | ||||
| var _inputs_flat = new[] { filename_tensor }; | var _inputs_flat = new[] { filename_tensor }; | ||||
| return tf._execute.execute(ctx, "ReadFile", 1, _inputs_flat, null, name: name)[0]; | |||||
| return tf.Runner.Execute(ctx, "ReadFile", 1, _inputs_flat, null, name: name)[0]; | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -348,7 +348,7 @@ namespace Tensorflow | |||||
| /// <returns>A 1-D Tensor, the output shape as if keepdims were set to True.</returns> | /// <returns>A 1-D Tensor, the output shape as if keepdims were set to True.</returns> | ||||
| public static Tensor reduced_shape(Tensor input_shape, Tensor axes) | public static Tensor reduced_shape(Tensor input_shape, Tensor axes) | ||||
| { | { | ||||
| if(tf.context.executing_eagerly()) | |||||
| if(tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var input_shape_val = input_shape.numpy(); | var input_shape_val = input_shape.numpy(); | ||||
| var axes_val = (int)axes.numpy(); | var axes_val = (int)axes.numpy(); | ||||
| @@ -35,10 +35,10 @@ namespace Tensorflow | |||||
| public Tensor substr<T>(T input, int pos, int len, | public Tensor substr<T>(T input, int pos, int len, | ||||
| string @uint = "BYTE", string name = null) | string @uint = "BYTE", string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var input_tensor = tf.constant(input); | var input_tensor = tf.constant(input); | ||||
| var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "Substr", name, | "Substr", name, | ||||
| null, | null, | ||||
| input, pos, len, | input, pos, len, | ||||
| @@ -47,7 +47,7 @@ namespace Tensorflow | |||||
| return results[0]; | return results[0]; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("Substr", name: name, args: new | |||||
| var _op = tf.OpDefLib._apply_op_helper("Substr", name: name, args: new | |||||
| { | { | ||||
| input, | input, | ||||
| pos, | pos, | ||||
| @@ -235,7 +235,7 @@ namespace Tensorflow | |||||
| // Ensure any changes to the graph are reflected in the runtime. | // Ensure any changes to the graph are reflected in the runtime. | ||||
| _extend_graph(); | _extend_graph(); | ||||
| var status = tf.status; | |||||
| var status = tf.Status; | |||||
| var output_values = fetch_list.Select(x => IntPtr.Zero).ToArray(); | var output_values = fetch_list.Select(x => IntPtr.Zero).ToArray(); | ||||
| @@ -21,7 +21,11 @@ Building, training and infering deep learning models. | |||||
| https://tensorflownet.readthedocs.io</Description> | https://tensorflownet.readthedocs.io</Description> | ||||
| <AssemblyVersion>0.20.0.0</AssemblyVersion> | <AssemblyVersion>0.20.0.0</AssemblyVersion> | ||||
| <PackageReleaseNotes>tf.net 0.20.x and above are based on tensorflow native 2.x. | <PackageReleaseNotes>tf.net 0.20.x and above are based on tensorflow native 2.x. | ||||
| Eager Mode is added finally. | |||||
| * Eager Mode is added finally. | |||||
| * tf.keras is partially working. | |||||
| * tf.data is added. | |||||
| It's not stable at this moment and missing many APIs, tf.net 0.15.x is more stable for production. | It's not stable at this moment and missing many APIs, tf.net 0.15.x is more stable for production. | ||||
| Please be patient, we're working hard on missing functions, providing full tensorflow binding is our mission.</PackageReleaseNotes> | Please be patient, we're working hard on missing functions, providing full tensorflow binding is our mission.</PackageReleaseNotes> | ||||
| <FileVersion>0.20.0.0</FileVersion> | <FileVersion>0.20.0.0</FileVersion> | ||||
| @@ -15,14 +15,14 @@ namespace Tensorflow | |||||
| get | get | ||||
| { | { | ||||
| using var _ = EagerTensorHandle.Lease(); | using var _ = EagerTensorHandle.Lease(); | ||||
| return c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(EagerTensorHandle, tf.status.Handle)); | |||||
| return c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(EagerTensorHandle, tf.Status.Handle)); | |||||
| } | } | ||||
| } | } | ||||
| public EagerTensorV2(IntPtr handle) | public EagerTensorV2(IntPtr handle) | ||||
| { | { | ||||
| EagerTensorHandle = c_api.TFE_EagerTensorHandle(handle); | EagerTensorHandle = c_api.TFE_EagerTensorHandle(handle); | ||||
| _handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.status.Handle); | |||||
| _handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.Status.Handle); | |||||
| } | } | ||||
| public unsafe EagerTensorV2(NDArray nd, string device_name = "") | public unsafe EagerTensorV2(NDArray nd, string device_name = "") | ||||
| @@ -42,7 +42,7 @@ namespace Tensorflow | |||||
| }, IntPtr.Zero); | }, IntPtr.Zero); | ||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle); | |||||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle); | |||||
| } | } | ||||
| /*public unsafe EagerTensorV2(float[,] value) | /*public unsafe EagerTensorV2(float[,] value) | ||||
| @@ -71,8 +71,8 @@ namespace Tensorflow | |||||
| IntPtr stringStartAddress = IntPtr.Zero; | IntPtr stringStartAddress = IntPtr.Zero; | ||||
| ulong dstLen = 0; | ulong dstLen = 0; | ||||
| c_api.TF_StringDecode((byte*) this.buffer + 8, this.bytesize, (byte**) &stringStartAddress, ref dstLen, tf.status.Handle); | |||||
| tf.status.Check(true); | |||||
| c_api.TF_StringDecode((byte*) this.buffer + 8, this.bytesize, (byte**) &stringStartAddress, ref dstLen, tf.Status.Handle); | |||||
| tf.Status.Check(true); | |||||
| var dstLenInt = checked((int) dstLen); | var dstLenInt = checked((int) dstLen); | ||||
| var value = Encoding.UTF8.GetString((byte*) stringStartAddress, dstLenInt); | var value = Encoding.UTF8.GetString((byte*) stringStartAddress, dstLenInt); | ||||
| @@ -459,9 +459,9 @@ namespace Tensorflow | |||||
| IntPtr tensor = c_api.TF_TensorData(handle); | IntPtr tensor = c_api.TF_TensorData(handle); | ||||
| Marshal.WriteInt64(tensor, 0); | Marshal.WriteInt64(tensor, 0); | ||||
| fixed (byte* src = buffer) | fixed (byte* src = buffer) | ||||
| c_api.TF_StringEncode(src, (ulong)buffer.Length, (byte*)(tensor + sizeof(long)), size, tf.status.Handle); | |||||
| c_api.TF_StringEncode(src, (ulong)buffer.Length, (byte*)(tensor + sizeof(long)), size, tf.Status.Handle); | |||||
| _handle = handle; | _handle = handle; | ||||
| tf.status.Check(true); | |||||
| tf.Status.Check(true); | |||||
| } | } | ||||
| public unsafe Tensor(string[] strings) | public unsafe Tensor(string[] strings) | ||||
| @@ -490,8 +490,8 @@ namespace Tensorflow | |||||
| Marshal.WriteInt64(input, i * sizeof(ulong), (long)offset); | Marshal.WriteInt64(input, i * sizeof(ulong), (long)offset); | ||||
| fixed (byte* src = &buffer[i][0]) | fixed (byte* src = &buffer[i][0]) | ||||
| { | { | ||||
| var written = TF_StringEncode(src, (ulong)buffer[i].Length, (byte*)data_start, (ulong)(limit.ToInt64() - data_start.ToInt64()), tf.status.Handle); | |||||
| tf.status.Check(true); | |||||
| var written = TF_StringEncode(src, (ulong)buffer[i].Length, (byte*)data_start, (ulong)(limit.ToInt64() - data_start.ToInt64()), tf.Status.Handle); | |||||
| tf.Status.Check(true); | |||||
| //input += 8; | //input += 8; | ||||
| data_start += (int)written; | data_start += (int)written; | ||||
| offset += written; | offset += written; | ||||
| @@ -519,8 +519,8 @@ namespace Tensorflow | |||||
| IntPtr tensor = c_api.TF_TensorData(handle); | IntPtr tensor = c_api.TF_TensorData(handle); | ||||
| Marshal.WriteInt64(tensor, 0); | Marshal.WriteInt64(tensor, 0); | ||||
| c_api.TF_StringEncode((byte*) nd.Unsafe.Address, bytesLength, (byte*) (tensor + sizeof(long)), size, tf.status.Handle); | |||||
| tf.status.Check(true); | |||||
| c_api.TF_StringEncode((byte*) nd.Unsafe.Address, bytesLength, (byte*) (tensor + sizeof(long)), size, tf.Status.Handle); | |||||
| tf.Status.Check(true); | |||||
| _handle = handle; | _handle = handle; | ||||
| } else | } else | ||||
| { | { | ||||
| @@ -533,9 +533,9 @@ namespace Tensorflow | |||||
| Marshal.WriteInt64(tensor, 0); | Marshal.WriteInt64(tensor, 0); | ||||
| fixed (byte* src = buffer) | fixed (byte* src = buffer) | ||||
| c_api.TF_StringEncode(src, (ulong)buffer.Length, (byte*) (tensor + sizeof(Int64)), size, tf.status.Handle); | |||||
| c_api.TF_StringEncode(src, (ulong)buffer.Length, (byte*) (tensor + sizeof(Int64)), size, tf.Status.Handle); | |||||
| tf.status.Check(true); | |||||
| tf.Status.Check(true); | |||||
| _handle = handle; | _handle = handle; | ||||
| } | } | ||||
| @@ -610,9 +610,9 @@ namespace Tensorflow | |||||
| Marshal.WriteInt64(tensor, 0); | Marshal.WriteInt64(tensor, 0); | ||||
| fixed (byte* src = buffer) | fixed (byte* src = buffer) | ||||
| c_api.TF_StringEncode(src, (ulong)buffer.Length, (byte*)(tensor + sizeof(long)), size, tf.status.Handle); | |||||
| c_api.TF_StringEncode(src, (ulong)buffer.Length, (byte*)(tensor + sizeof(long)), size, tf.Status.Handle); | |||||
| tf.status.Check(true); | |||||
| tf.Status.Check(true); | |||||
| return handle; | return handle; | ||||
| } | } | ||||
| @@ -240,8 +240,8 @@ namespace Tensorflow | |||||
| { | { | ||||
| IntPtr dst = IntPtr.Zero; | IntPtr dst = IntPtr.Zero; | ||||
| ulong dstLen = 0; | ulong dstLen = 0; | ||||
| var read = c_api.TF_StringDecode((byte*)src, bytesize, (byte**)&dst, ref dstLen, tf.status.Handle); | |||||
| tf.status.Check(true); | |||||
| var read = c_api.TF_StringDecode((byte*)src, bytesize, (byte**)&dst, ref dstLen, tf.Status.Handle); | |||||
| tf.Status.Check(true); | |||||
| buffer[i] = new byte[(int)dstLen]; | buffer[i] = new byte[(int)dstLen]; | ||||
| Marshal.Copy(dst, buffer[i], 0, buffer[i].Length); | Marshal.Copy(dst, buffer[i], 0, buffer[i].Length); | ||||
| src += (int)read; | src += (int)read; | ||||
| @@ -274,8 +274,8 @@ namespace Tensorflow | |||||
| { | { | ||||
| IntPtr dst = IntPtr.Zero; | IntPtr dst = IntPtr.Zero; | ||||
| ulong dstLen = 0; | ulong dstLen = 0; | ||||
| var read = c_api.TF_StringDecode((byte*)src, bytesize, (byte**)&dst, ref dstLen, tf.status.Handle); | |||||
| tf.status.Check(true); | |||||
| var read = c_api.TF_StringDecode((byte*)src, bytesize, (byte**)&dst, ref dstLen, tf.Status.Handle); | |||||
| tf.Status.Check(true); | |||||
| buffer[i] = new byte[(int)dstLen]; | buffer[i] = new byte[(int)dstLen]; | ||||
| Marshal.Copy(dst, buffer[i], 0, buffer[i].Length); | Marshal.Copy(dst, buffer[i], 0, buffer[i].Length); | ||||
| src += (int)read; | src += (int)read; | ||||
| @@ -109,7 +109,7 @@ namespace Tensorflow | |||||
| if (_handle == IntPtr.Zero) | if (_handle == IntPtr.Zero) | ||||
| { | { | ||||
| c_api.TF_GraphGetTensorShape(op.graph, _as_tf_output(), dims, rank, tf.status.Handle); | |||||
| c_api.TF_GraphGetTensorShape(op.graph, _as_tf_output(), dims, rank, tf.Status.Handle); | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| @@ -123,11 +123,11 @@ namespace Tensorflow | |||||
| set | set | ||||
| { | { | ||||
| if (value == null) | if (value == null) | ||||
| c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), null, -1, tf.status.Handle); | |||||
| c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), null, -1, tf.Status.Handle); | |||||
| else | else | ||||
| c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), value.Select(Convert.ToInt64).ToArray(), value.Length, tf.status.Handle); | |||||
| c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), value.Select(Convert.ToInt64).ToArray(), value.Length, tf.Status.Handle); | |||||
| tf.status.Check(true); | |||||
| tf.Status.Check(true); | |||||
| } | } | ||||
| } | } | ||||
| @@ -172,7 +172,7 @@ namespace Tensorflow | |||||
| if (_handle == IntPtr.Zero) | if (_handle == IntPtr.Zero) | ||||
| { | { | ||||
| var output = _as_tf_output(); | var output = _as_tf_output(); | ||||
| int ndim = c_api.TF_GraphGetTensorNumDims(op.graph, output, tf.status.Handle); | |||||
| int ndim = c_api.TF_GraphGetTensorNumDims(op.graph, output, tf.Status.Handle); | |||||
| return ndim; | return ndim; | ||||
| } | } | ||||
| @@ -232,11 +232,11 @@ namespace Tensorflow | |||||
| switch (rank) | switch (rank) | ||||
| { | { | ||||
| case -1: | case -1: | ||||
| return $"tf.Tensor '{name}' shape=<unknown> dtype={dtype}"; | |||||
| return $"tf.Tensor '{name}' shape={TensorShape} dtype={dtype.as_numpy_name()}"; | |||||
| case 0: | case 0: | ||||
| return $"tf.Tensor '{name}' shape=() dtype={dtype}"; | |||||
| return $"tf.Tensor '{name}' shape={TensorShape} dtype={dtype.as_numpy_name()}"; | |||||
| default: | default: | ||||
| return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}"; | |||||
| return $"tf.Tensor '{name}' shape={TensorShape} dtype={dtype.as_numpy_name()}"; | |||||
| } | } | ||||
| } | } | ||||
| @@ -20,6 +20,7 @@ using System.Collections.Generic; | |||||
| using Tensorflow.Eager; | using Tensorflow.Eager; | ||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| using System.Linq; | using System.Linq; | ||||
| using Tensorflow.Contexts; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| @@ -49,9 +50,9 @@ namespace Tensorflow | |||||
| bool verify_shape, | bool verify_shape, | ||||
| bool allow_broadcast) | bool allow_broadcast) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var t = convert_to_eager_tensor(value, tf.context, dtype: dtype); | |||||
| var t = convert_to_eager_tensor(value, tf.Context, dtype: dtype); | |||||
| if (shape == null) | if (shape == null) | ||||
| return t; | return t; | ||||
| @@ -69,7 +70,7 @@ namespace Tensorflow | |||||
| if (t.dtype == dtypes.@bool) | if (t.dtype == dtypes.@bool) | ||||
| throw new NotImplementedException(""); | throw new NotImplementedException(""); | ||||
| else | else | ||||
| return _eager_fill(shape, t, tf.context); | |||||
| return _eager_fill(shape, t, tf.Context); | |||||
| } | } | ||||
| } | } | ||||
| @@ -105,7 +106,7 @@ namespace Tensorflow | |||||
| var dims_t = convert_to_eager_tensor(dims, ctx, dtypes.int32); | var dims_t = convert_to_eager_tensor(dims, ctx, dtypes.int32); | ||||
| var inputs_flat = new[] { dims_t, value }; | var inputs_flat = new[] { dims_t, value }; | ||||
| var attrs = new object[] { "T", attr_t, "index_type", TF_DataType.TF_INT32 }; | var attrs = new object[] { "T", attr_t, "index_type", TF_DataType.TF_INT32 }; | ||||
| var result = tf._execute.execute(ctx, "Fill", 1, inputs_flat, attrs); | |||||
| var result = tf.Runner.Execute(ctx, "Fill", 1, inputs_flat, attrs); | |||||
| return result[0]; | return result[0]; | ||||
| } | } | ||||
| @@ -135,7 +136,7 @@ namespace Tensorflow | |||||
| if(dtype == TF_DataType.TF_STRING && value is byte[] bytes) | if(dtype == TF_DataType.TF_STRING && value is byte[] bytes) | ||||
| { | { | ||||
| return new EagerTensor(bytes, ctx.device_name, TF_DataType.TF_STRING); | |||||
| return new EagerTensor(bytes, ctx.DeviceName, TF_DataType.TF_STRING); | |||||
| } | } | ||||
| switch (value) | switch (value) | ||||
| @@ -143,53 +144,53 @@ namespace Tensorflow | |||||
| case EagerTensor val: | case EagerTensor val: | ||||
| return val; | return val; | ||||
| case NDArray val: | case NDArray val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case string val: | case string val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case string[] val: | case string[] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case bool val: | case bool val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case byte val: | case byte val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case byte[] val: | case byte[] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case byte[,] val: | case byte[,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case byte[,,] val: | case byte[,,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case int val: | case int val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case int[] val: | case int[] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case int[,] val: | case int[,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case int[,,] val: | case int[,,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case long val: | case long val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case long[] val: | case long[] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case long[,] val: | case long[,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case long[,,] val: | case long[,,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case float val: | case float val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case float[] val: | case float[] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case float[,] val: | case float[,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case float[,,] val: | case float[,,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case double val: | case double val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case double[] val: | case double[] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case double[,] val: | case double[,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| case double[,,] val: | case double[,,] val: | ||||
| return new EagerTensor(val, ctx.device_name); | |||||
| return new EagerTensor(val, ctx.DeviceName); | |||||
| default: | default: | ||||
| throw new NotImplementedException($"convert_to_eager_tensor {value.GetType()}"); | throw new NotImplementedException($"convert_to_eager_tensor {value.GetType()}"); | ||||
| } | } | ||||
| @@ -212,7 +212,7 @@ namespace Tensorflow | |||||
| }); | }); | ||||
| } | } | ||||
| if (!tf.context.executing_eagerly()) | |||||
| if (!tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var train_op = ops.get_collection_ref<Operation>(tf.GraphKeys.TRAIN_OP); | var train_op = ops.get_collection_ref<Operation>(tf.GraphKeys.TRAIN_OP); | ||||
| if (train_op != null && train_op.Contains(apply_updates)) | if (train_op != null && train_op.Contains(apply_updates)) | ||||
| @@ -155,7 +155,7 @@ namespace Tensorflow | |||||
| private void _check_saver_def() | private void _check_saver_def() | ||||
| { | { | ||||
| if (!tf.context.executing_eagerly()) | |||||
| if (!tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| if (string.IsNullOrEmpty(_saver_def.SaveTensorName)) | if (string.IsNullOrEmpty(_saver_def.SaveTensorName)) | ||||
| throw new ValueError($"saver_def must specify the save_tensor_name: {_saver_def}"); | throw new ValueError($"saver_def must specify the save_tensor_name: {_saver_def}"); | ||||
| @@ -244,7 +244,7 @@ namespace Tensorflow | |||||
| Console.WriteLine($"Restoring parameters from {save_path}"); | Console.WriteLine($"Restoring parameters from {save_path}"); | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| #pragma warning disable CS0642 // Possible mistaken empty statement | #pragma warning disable CS0642 // Possible mistaken empty statement | ||||
| ; | ; | ||||
| #pragma warning restore CS0642 // Possible mistaken empty statement | #pragma warning restore CS0642 // Possible mistaken empty statement | ||||
| @@ -33,7 +33,7 @@ namespace Tensorflow.Train | |||||
| #pragma warning disable CS0219 // Variable is assigned but its value is never used | #pragma warning disable CS0219 // Variable is assigned but its value is never used | ||||
| IInitializer checkpoint_initializer = null; | IInitializer checkpoint_initializer = null; | ||||
| #pragma warning restore CS0219 // Variable is assigned but its value is never used | #pragma warning restore CS0219 // Variable is assigned but its value is never used | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| #pragma warning disable CS0642 // Possible mistaken empty statement | #pragma warning disable CS0642 // Possible mistaken empty statement | ||||
| ; | ; | ||||
| #pragma warning restore CS0642 // Possible mistaken empty statement | #pragma warning restore CS0642 // Possible mistaken empty statement | ||||
| @@ -27,7 +27,7 @@ namespace Tensorflow | |||||
| Tensor lr, Tensor beta1, Tensor beta2, Tensor epsilon, Tensor grad, | Tensor lr, Tensor beta1, Tensor beta2, Tensor epsilon, Tensor grad, | ||||
| bool use_locking = false, bool use_nesterov = false, string name = null) | bool use_locking = false, bool use_nesterov = false, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ApplyAdam", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("ApplyAdam", name, new | |||||
| { | { | ||||
| var, | var, | ||||
| m, | m, | ||||
| @@ -48,7 +48,7 @@ namespace Tensorflow | |||||
| public static Tensor apply_gradient_descent(RefVariable var, Tensor alpha, Tensor delta, bool use_locking = false, string name = null) | public static Tensor apply_gradient_descent(RefVariable var, Tensor alpha, Tensor delta, bool use_locking = false, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ApplyGradientDescent", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("ApplyGradientDescent", name, new | |||||
| { | { | ||||
| var, | var, | ||||
| alpha, | alpha, | ||||
| @@ -61,9 +61,9 @@ namespace Tensorflow | |||||
| public static Operation resource_apply_gradient_descent(Tensor var, Tensor alpha, Tensor delta, bool use_locking = false, string name = null) | public static Operation resource_apply_gradient_descent(Tensor var, Tensor alpha, Tensor delta, bool use_locking = false, string name = null) | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| var result = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
| var result = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||||
| "ResourceApplyGradientDescent", name, | "ResourceApplyGradientDescent", name, | ||||
| null, | null, | ||||
| var, alpha, delta, | var, alpha, delta, | ||||
| @@ -71,7 +71,7 @@ namespace Tensorflow | |||||
| return null; | return null; | ||||
| } | } | ||||
| var _op = tf._op_def_lib._apply_op_helper("ResourceApplyGradientDescent", name, new | |||||
| var _op = tf.OpDefLib._apply_op_helper("ResourceApplyGradientDescent", name, new | |||||
| { | { | ||||
| var, | var, | ||||
| alpha, | alpha, | ||||
| @@ -136,7 +136,7 @@ namespace Tensorflow | |||||
| public override string ToString() | public override string ToString() | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| return $"tf.Variable: '{Name}' shape={string.Join(",", shape)}, dtype={dtype.as_numpy_name()}, numpy={tensor_util.to_numpy_string(read_value())}"; | return $"tf.Variable: '{Name}' shape={string.Join(",", shape)}, dtype={dtype.as_numpy_name()}, numpy={tensor_util.to_numpy_string(read_value())}"; | ||||
| else | else | ||||
| return $"tf.Variable: '{Name}' shape={string.Join(",", shape)}, dtype={dtype.as_numpy_name()}"; | return $"tf.Variable: '{Name}' shape={string.Join(",", shape)}, dtype={dtype.as_numpy_name()}"; | ||||
| @@ -419,7 +419,7 @@ namespace Tensorflow | |||||
| public ITensorOrOperation assign_add<T>(T value, bool use_locking = false, string name = null, bool read_value = true) | public ITensorOrOperation assign_add<T>(T value, bool use_locking = false, string name = null, bool read_value = true) | ||||
| { | { | ||||
| var variable = this; | var variable = this; | ||||
| var _op = tf._op_def_lib._apply_op_helper("AssignAdd", name: name, args: new { variable, value, use_locking }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("AssignAdd", name: name, args: new { variable, value, use_locking }); | |||||
| return _op; | return _op; | ||||
| } | } | ||||
| } | } | ||||
| @@ -88,7 +88,7 @@ namespace Tensorflow | |||||
| collections.Add(tf.GraphKeys.TRAINABLE_VARIABLES); | collections.Add(tf.GraphKeys.TRAINABLE_VARIABLES); | ||||
| ops.init_scope(); | ops.init_scope(); | ||||
| _in_graph_mode = !tf.context.executing_eagerly(); | |||||
| _in_graph_mode = !tf.Context.executing_eagerly(); | |||||
| tf_with(ops.name_scope(name, "Variable"), scope => | tf_with(ops.name_scope(name, "Variable"), scope => | ||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| @@ -104,7 +104,7 @@ namespace Tensorflow | |||||
| else | else | ||||
| { | { | ||||
| unique_id = $"{handle_name}_{ops.uid()}"; | unique_id = $"{handle_name}_{ops.uid()}"; | ||||
| shared_name = tf.context.shared_name(); | |||||
| shared_name = tf.Context.shared_name(); | |||||
| } | } | ||||
| var attr = new AttrValue(); | var attr = new AttrValue(); | ||||
| @@ -34,7 +34,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor variable_v2(int[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") | public static Tensor variable_v2(int[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); | |||||
| var _result = _op.outputs; | var _result = _op.outputs; | ||||
| var _inputs_flat = _op.inputs; | var _inputs_flat = _op.inputs; | ||||
| @@ -61,7 +61,7 @@ namespace Tensorflow | |||||
| bool use_locking = true, | bool use_locking = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
| var _result = _op.outputs; | var _result = _op.outputs; | ||||
| var _inputs_flat = _op.inputs; | var _inputs_flat = _op.inputs; | ||||
| @@ -79,7 +79,7 @@ namespace Tensorflow | |||||
| bool use_locking = true, | bool use_locking = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
| var _result = _op.outputs; | var _result = _op.outputs; | ||||
| var _inputs_flat = _op.inputs; | var _inputs_flat = _op.inputs; | ||||
| @@ -97,7 +97,7 @@ namespace Tensorflow | |||||
| bool use_locking = true, | bool use_locking = true, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
| var _result = _op.outputs; | var _result = _op.outputs; | ||||
| var _inputs_flat = _op.inputs; | var _inputs_flat = _op.inputs; | ||||
| @@ -115,7 +115,7 @@ namespace Tensorflow | |||||
| bool use_locking = false, | bool use_locking = false, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| @@ -131,13 +131,13 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor scatter_add(IVariableV1 @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | public static Tensor scatter_add(IVariableV1 @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("ScatterAdd", name: name, args: new { @ref, indices, updates, use_locking }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("ScatterAdd", name: name, args: new { @ref, indices, updates, use_locking }); | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor is_variable_initialized(RefVariable @ref, string name = null) | public static Tensor is_variable_initialized(RefVariable @ref, string name = null) | ||||
| { | { | ||||
| var _op = tf._op_def_lib._apply_op_helper("IsVariableInitialized", name: name, args: new { @ref }); | |||||
| var _op = tf.OpDefLib._apply_op_helper("IsVariableInitialized", name: name, args: new { @ref }); | |||||
| return _op.output; | return _op.output; | ||||
| } | } | ||||
| } | } | ||||
| @@ -24,6 +24,7 @@ using NumSharp; | |||||
| using Tensorflow.Util; | using Tensorflow.Util; | ||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| using Tensorflow.Eager; | using Tensorflow.Eager; | ||||
| using Tensorflow.Contexts; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| @@ -176,7 +177,7 @@ namespace Tensorflow | |||||
| throw new NotImplementedException("_create_c_op"); | throw new NotImplementedException("_create_c_op"); | ||||
| } | } | ||||
| var status = tf.status; | |||||
| var status = tf.Status; | |||||
| // Add control inputs | // Add control inputs | ||||
| foreach (var control_input in control_inputs) | foreach (var control_input in control_inputs) | ||||
| @@ -240,7 +241,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static void init_scope() | public static void init_scope() | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| return; | return; | ||||
| // Retrieve the active name scope: entering an `init_scope` preserves | // Retrieve the active name scope: entering an `init_scope` preserves | ||||
| @@ -16,6 +16,7 @@ | |||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Diagnostics; | using System.Diagnostics; | ||||
| using Tensorflow.Contexts; | |||||
| using Tensorflow.Eager; | using Tensorflow.Eager; | ||||
| using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
| @@ -48,9 +49,9 @@ namespace Tensorflow | |||||
| public void __enter__() | public void __enter__() | ||||
| { | { | ||||
| _name = _name ?? _default_name; | _name = _name ?? _default_name; | ||||
| if (tf.context.executing_eagerly()) | |||||
| if (tf.Context.executing_eagerly()) | |||||
| { | { | ||||
| (scope_name, old_scope_name) = enter_eager_name_scope(tf.context, _name); | |||||
| (scope_name, old_scope_name) = enter_eager_name_scope(tf.Context, _name); | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| @@ -75,7 +76,7 @@ namespace Tensorflow | |||||
| name = ""; | name = ""; | ||||
| var scope_name = name; | var scope_name = name; | ||||
| var old_name = ctx.scope_name; | |||||
| var old_name = ctx.ScopeName; | |||||
| // A trailing slash breaks out of nested name scopes, indicating a | // A trailing slash breaks out of nested name scopes, indicating a | ||||
| // fully specified scope name, for compatibility with Graph.name_scope. | // fully specified scope name, for compatibility with Graph.name_scope. | ||||
| if (!name.EndsWith("/")) | if (!name.EndsWith("/")) | ||||
| @@ -85,14 +86,14 @@ namespace Tensorflow | |||||
| scope_name = old_name + scope_name; | scope_name = old_name + scope_name; | ||||
| } | } | ||||
| ctx.scope_name = scope_name; | |||||
| ctx.ScopeName = scope_name; | |||||
| return (scope_name, old_name); | return (scope_name, old_name); | ||||
| } | } | ||||
| public void Dispose() | public void Dispose() | ||||
| { | { | ||||
| if (tf.context.executing_eagerly()) | |||||
| tf.context.scope_name = old_scope_name; | |||||
| if (tf.Context.executing_eagerly()) | |||||
| tf.Context.ScopeName = old_scope_name; | |||||
| else | else | ||||
| get_default_graph()._name_stack = old_scope_name; | get_default_graph()._name_stack = old_scope_name; | ||||
| } | } | ||||
| @@ -20,6 +20,7 @@ using System.Collections.Generic; | |||||
| using System.Linq; | using System.Linq; | ||||
| using System.Runtime.InteropServices; | using System.Runtime.InteropServices; | ||||
| using System.Threading; | using System.Threading; | ||||
| using Tensorflow.Contexts; | |||||
| using Tensorflow.Eager; | using Tensorflow.Eager; | ||||
| using Tensorflow.Gradients; | using Tensorflow.Gradients; | ||||
| @@ -41,19 +42,24 @@ namespace Tensorflow | |||||
| public delegate Tensor[] BackwardFunction(Tensor[] grads, long[] unneeded_gradients); | public delegate Tensor[] BackwardFunction(Tensor[] grads, long[] unneeded_gradients); | ||||
| public Status status = new Status(); | |||||
| public OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||||
| public Context context = new Context(new ContextOptions(), new Status()); | |||||
| public Execute _execute = new Execute(); | |||||
| public IEagerRunner Runner = new EagerRunner(); | |||||
| public Status Status; | |||||
| public OpDefLibrary OpDefLib; | |||||
| public Context Context; | |||||
| public IEagerRunner Runner; | |||||
| public tensorflow() | public tensorflow() | ||||
| { | { | ||||
| Status = new Status(); | |||||
| Context = new Context(new ContextOptions(), Status); | |||||
| enable_eager_execution(); | enable_eager_execution(); | ||||
| _constructThreadingObjects(); | |||||
| OpDefLib = new OpDefLibrary(); | |||||
| ConstructThreadingObjects(); | |||||
| InitGradientEnvironment(); | InitGradientEnvironment(); | ||||
| Runner = new EagerRunner(); | |||||
| } | } | ||||
| public string VERSION => c_api.StringPiece(c_api.TF_Version()); | |||||
| private void InitGradientEnvironment() | private void InitGradientEnvironment() | ||||
| { | { | ||||
| ops.RegisterFromAssembly(); | ops.RegisterFromAssembly(); | ||||
| @@ -74,15 +80,10 @@ namespace Tensorflow | |||||
| shape: shape); | shape: shape); | ||||
| public Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) | public Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) | ||||
| => gen_array_ops.placeholder(dtype, shape, name); | |||||
| => array_ops.placeholder(dtype, shape, name); | |||||
| public void enable_eager_execution() | public void enable_eager_execution() | ||||
| { | |||||
| // contex = new Context(); | |||||
| context.default_execution_mode = Context.EAGER_MODE; | |||||
| } | |||||
| public string VERSION => c_api.StringPiece(c_api.TF_Version()); | |||||
| => Context.eager_mode(); | |||||
| public Session get_default_session() | public Session get_default_session() | ||||
| => ops.get_default_session(); | => ops.get_default_session(); | ||||
| @@ -21,12 +21,12 @@ namespace Tensorflow | |||||
| { | { | ||||
| public partial class tensorflow : ITensorFlowObject | public partial class tensorflow : ITensorFlowObject | ||||
| { | { | ||||
| protected ThreadLocal<Session> _defaultSessionFactory; | |||||
| protected ThreadLocal<Session> defaultSessionFactory; | |||||
| [MethodImpl(MethodImplOptions.AggressiveInlining)] | [MethodImpl(MethodImplOptions.AggressiveInlining)] | ||||
| public void _constructThreadingObjects() | |||||
| public void ConstructThreadingObjects() | |||||
| { | { | ||||
| _defaultSessionFactory = new ThreadLocal<Session>(() => new Session()); | |||||
| defaultSessionFactory = new ThreadLocal<Session>(() => new Session()); | |||||
| } | } | ||||
| public Session defaultSession | public Session defaultSession | ||||
| @@ -34,7 +34,7 @@ namespace Tensorflow | |||||
| get | get | ||||
| { | { | ||||
| if (!ops.IsSingleThreaded) | if (!ops.IsSingleThreaded) | ||||
| return _defaultSessionFactory.Value; | |||||
| return defaultSessionFactory.Value; | |||||
| return ops.get_default_session(); | return ops.get_default_session(); | ||||
| } | } | ||||
| @@ -42,7 +42,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| if (!ops.IsSingleThreaded) | if (!ops.IsSingleThreaded) | ||||
| { | { | ||||
| _defaultSessionFactory.Value = value; | |||||
| defaultSessionFactory.Value = value; | |||||
| return; | return; | ||||
| } | } | ||||
| @@ -8,6 +8,7 @@ | |||||
| <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'"> | <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'"> | ||||
| <AllowUnsafeBlocks>true</AllowUnsafeBlocks> | <AllowUnsafeBlocks>true</AllowUnsafeBlocks> | ||||
| <DefineConstants>DEBUG;TRACE</DefineConstants> | |||||
| </PropertyGroup> | </PropertyGroup> | ||||
| <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> | <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> | ||||
| @@ -52,7 +52,7 @@ Set ENV `BAZEL_VC=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\ | |||||
| 1. Build static library | 1. Build static library | ||||
| `bazel build --config=opt //tensorflow:libtensorflow.so` | |||||
| `bazel build --config=opt //tensorflow:tensorflow` | |||||
| 2. Build pip package | 2. Build pip package | ||||
| @@ -6,13 +6,14 @@ using Tensorflow.Keras.Engine; | |||||
| using Tensorflow.Keras.Layers; | using Tensorflow.Keras.Layers; | ||||
| using NumSharp; | using NumSharp; | ||||
| using Tensorflow.UnitTest; | using Tensorflow.UnitTest; | ||||
| using static Tensorflow.Binding; | |||||
| namespace TensorFlowNET.UnitTest.Keras | namespace TensorFlowNET.UnitTest.Keras | ||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/keras/layers/Embedding | /// https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/keras/layers/Embedding | ||||
| /// </summary> | /// </summary> | ||||
| [TestClass] | |||||
| [TestClass, Ignore] | |||||
| public class EmbeddingTest : GraphModeTestBase | public class EmbeddingTest : GraphModeTestBase | ||||
| { | { | ||||
| [TestMethod] | [TestMethod] | ||||
| @@ -29,5 +30,13 @@ namespace TensorFlowNET.UnitTest.Keras | |||||
| var input_array = np.random.randint(1000, size: (32, 10)); | var input_array = np.random.randint(1000, size: (32, 10)); | ||||
| model.compile("rmsprop", "mse"); | model.compile("rmsprop", "mse"); | ||||
| } | } | ||||
| [TestMethod] | |||||
| public void Dense() | |||||
| { | |||||
| var model = tf.keras.Sequential(); | |||||
| var dense_layer = tf.keras.layers.Dense(5, input_shape: 3); | |||||
| model.add(dense_layer); | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -47,7 +47,7 @@ | |||||
| <PackageReference Include="MSTest.TestAdapter" Version="2.1.2" /> | <PackageReference Include="MSTest.TestAdapter" Version="2.1.2" /> | ||||
| <PackageReference Include="MSTest.TestFramework" Version="2.1.2" /> | <PackageReference Include="MSTest.TestFramework" Version="2.1.2" /> | ||||
| <PackageReference Include="NumSharp.Lite" Version="0.1.7" /> | <PackageReference Include="NumSharp.Lite" Version="0.1.7" /> | ||||
| <PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.2.0.2" /> | |||||
| <PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.3.0" /> | |||||
| </ItemGroup> | </ItemGroup> | ||||
| <ItemGroup> | <ItemGroup> | ||||