diff --git a/README.md b/README.md
index 9087c023..f437ad8e 100644
--- a/README.md
+++ b/README.md
@@ -26,11 +26,11 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr
### How to use
-| TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.2 |
-| ----------- | ------- | ------- | ------- | ------ |
-| tf.net 0.20 | | | x | x |
-| tf.net 0.15 | | x | x | |
-| tf.net 0.14 | x | x | | |
+| TensorFlow | tf native1.14 | tf native 1.15 | tf native 2.3 |
+| ----------- | ------------- | -------------- | ------------- |
+| tf.net 0.20 | | x | x |
+| tf.net 0.15 | x | x | |
+| tf.net 0.14 | x | | |
Install TF.NET and TensorFlow binary through NuGet.
```sh
@@ -138,6 +138,10 @@ Scan QR code to join Tencent TIM group:

+WeChat Sponsor 微信打赏:
+
+
+
TensorFlow.NET is a part of [SciSharp STACK](https://scisharp.github.io/SciSharp/)
-
+
\ No newline at end of file
diff --git a/docs/assets/WeChatCollection.jpg b/docs/assets/WeChatCollection.jpg
new file mode 100644
index 00000000..587b5499
Binary files /dev/null and b/docs/assets/WeChatCollection.jpg differ
diff --git a/src/TensorFlowNET.Console/MemoryMonitor.cs b/src/TensorFlowNET.Console/MemoryMonitor.cs
index 86130583..a7e5be37 100644
--- a/src/TensorFlowNET.Console/MemoryMonitor.cs
+++ b/src/TensorFlowNET.Console/MemoryMonitor.cs
@@ -10,7 +10,7 @@ namespace Tensorflow
{
public void WarmUp()
{
- print(tf.VERSION);
+ print($"tensorflow native version: v{tf.VERSION}");
}
public void Execute(int epoch, int iterate, Action process)
diff --git a/src/TensorFlowNET.Console/TensorFlowNET.Console.csproj b/src/TensorFlowNET.Console/TensorFlowNET.Console.csproj
index a047afb9..8c31d20e 100644
--- a/src/TensorFlowNET.Console/TensorFlowNET.Console.csproj
+++ b/src/TensorFlowNET.Console/TensorFlowNET.Console.csproj
@@ -8,7 +8,7 @@
-
+
diff --git a/src/TensorFlowNET.Core/APIs/keras.layers.cs b/src/TensorFlowNET.Core/APIs/keras.layers.cs
deleted file mode 100644
index 92900e76..00000000
--- a/src/TensorFlowNET.Core/APIs/keras.layers.cs
+++ /dev/null
@@ -1,64 +0,0 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System.Linq;
-using Tensorflow.Keras.Layers;
-
-namespace Tensorflow
-{
- public static partial class keras
- {
- public static class layers
- {
- public static Embedding Embedding(int input_dim, int output_dim,
- IInitializer embeddings_initializer = null,
- bool mask_zero = false) => new Embedding(input_dim, output_dim,
- embeddings_initializer,
- mask_zero);
-
- public static Tensor[] Input(int[] batch_shape = null,
- TF_DataType dtype = TF_DataType.DtInvalid,
- string name = null,
- bool sparse = false,
- Tensor tensor = null)
- {
- var batch_size = batch_shape[0];
- var shape = batch_shape.Skip(1).ToArray();
-
- InputLayer input_layer = null;
- if (batch_shape != null)
- input_layer = new InputLayer(
- batch_input_shape: batch_shape,
- name: name,
- dtype: dtype,
- sparse: sparse,
- input_tensor: tensor);
- else
- input_layer = new InputLayer(
- input_shape: shape,
- batch_size: batch_size,
- name: name,
- dtype: dtype,
- sparse: sparse,
- input_tensor: tensor);
-
- var outputs = input_layer.inbound_nodes[0].output_tensors;
-
- return outputs;
- }
- }
- }
-}
diff --git a/src/TensorFlowNET.Core/APIs/tf.compat.cs b/src/TensorFlowNET.Core/APIs/tf.compat.cs
index 870ef82f..d29de189 100644
--- a/src/TensorFlowNET.Core/APIs/tf.compat.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.compat.cs
@@ -26,5 +26,8 @@ namespace Tensorflow
{
public CompatV1Api v1 { get; } = new CompatV1Api();
}
+
+ public bool executing_eagerly()
+ => Context.executing_eagerly();
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs b/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs
index 63833991..a08597a8 100644
--- a/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs
@@ -16,7 +16,7 @@
using System;
using System.Collections.Generic;
-using Tensorflow.Eager;
+using Tensorflow.Contexts;
using static Tensorflow.Binding;
namespace Tensorflow
@@ -24,9 +24,7 @@ namespace Tensorflow
public class CompatV1Api
{
public void disable_eager_execution()
- {
- tf.context.default_execution_mode = Context.GRAPH_MODE;
- }
+ => tf.Context.graph_mode();
public IVariableV1 get_variable(string name,
TensorShape shape = null,
diff --git a/src/TensorFlowNET.Core/APIs/tf.layers.cs b/src/TensorFlowNET.Core/APIs/tf.layers.cs
index 3ebddbcf..085df3c5 100644
--- a/src/TensorFlowNET.Core/APIs/tf.layers.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.layers.cs
@@ -18,6 +18,7 @@ using System;
using System.Collections.Generic;
using System.Linq;
using NumSharp;
+using Tensorflow.Keras;
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.Layers;
using Tensorflow.Operations.Activation;
@@ -164,7 +165,7 @@ namespace Tensorflow
///
public Tensor dense(Tensor inputs,
int units,
- IActivation activation = null,
+ Activation activation = null,
bool use_bias = true,
IInitializer kernel_initializer = null,
IInitializer bias_initializer = null,
diff --git a/src/TensorFlowNET.Core/Contexts/Context.cs b/src/TensorFlowNET.Core/Contexts/Context.cs
new file mode 100644
index 00000000..68e816df
--- /dev/null
+++ b/src/TensorFlowNET.Core/Contexts/Context.cs
@@ -0,0 +1,90 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using Tensorflow.Eager;
+
+namespace Tensorflow.Contexts
+{
+ ///
+ /// Environment in which eager operations execute.
+ ///
+ public sealed class Context : IDisposable
+ {
+ public const int GRAPH_MODE = 0;
+ public const int EAGER_MODE = 1;
+
+ int defaultExecutionMode = EAGER_MODE;
+ public string DeviceName { get; set; } = "";
+ public string ScopeName { get; set; } = "";
+ bool initialized = false;
+ bool isEager;
+ ContextSwitchStack contextSwitches;
+
+ public SafeContextHandle Handle { get; }
+
+ public Context(ContextOptions opts, Status status)
+ {
+ Handle = c_api.TFE_NewContext(opts.Handle, status.Handle);
+ status.Check(true);
+ isEager = defaultExecutionMode == EAGER_MODE;
+ contextSwitches = new ContextSwitchStack(isEager);
+ initialized = true;
+ }
+
+ ///
+ /// Initialize handle and devices if not already done so.
+ ///
+ public void ensure_initialized()
+ {
+ if (initialized)
+ return;
+ initialized = true;
+ }
+
+ public void start_step()
+ => c_api.TFE_ContextStartStep(Handle);
+
+ public void end_step()
+ => c_api.TFE_ContextEndStep(Handle);
+
+ ///
+ /// Checks whether the current thread has eager execution enabled.
+ ///
+ ///
+ public bool executing_eagerly()
+ => isEager;
+
+ public string shared_name(string name = null)
+ => !string.IsNullOrEmpty(name) || !executing_eagerly() ?
+ name :
+ "cd2c89b7-88b7-44c8-ad83-06c2a9158347";
+
+ public void graph_mode()
+ => mode(false);
+
+ public void eager_mode()
+ => mode(true);
+
+ void mode(bool mode)
+ {
+ isEager = mode;
+ }
+
+ public void Dispose()
+ => Handle.Dispose();
+ }
+}
diff --git a/src/TensorFlowNET.Core/Eager/ContextOptions.cs b/src/TensorFlowNET.Core/Contexts/ContextOptions.cs
similarity index 95%
rename from src/TensorFlowNET.Core/Eager/ContextOptions.cs
rename to src/TensorFlowNET.Core/Contexts/ContextOptions.cs
index 399c7a0e..6c2156a9 100644
--- a/src/TensorFlowNET.Core/Eager/ContextOptions.cs
+++ b/src/TensorFlowNET.Core/Contexts/ContextOptions.cs
@@ -15,8 +15,9 @@
******************************************************************************/
using System;
+using Tensorflow.Eager;
-namespace Tensorflow.Eager
+namespace Tensorflow.Contexts
{
public sealed class ContextOptions : IDisposable
{
diff --git a/src/TensorFlowNET.Core/APIs/keras.preprocessing.cs b/src/TensorFlowNET.Core/Contexts/ContextSwitch.cs
similarity index 62%
rename from src/TensorFlowNET.Core/APIs/keras.preprocessing.cs
rename to src/TensorFlowNET.Core/Contexts/ContextSwitch.cs
index 125b26f7..00c9a42a 100644
--- a/src/TensorFlowNET.Core/APIs/keras.preprocessing.cs
+++ b/src/TensorFlowNET.Core/Contexts/ContextSwitch.cs
@@ -14,15 +14,24 @@
limitations under the License.
******************************************************************************/
-using Tensorflow.Keras;
-using Tensorflow.Keras.Engine;
+using System;
+using System.Collections.Generic;
+using System.Text;
-namespace Tensorflow
+namespace Tensorflow.Contexts
{
- public static partial class keras
+ public class ContextSwitch
{
- public static Preprocessing preprocessing => new Preprocessing();
- public static Sequence sequence = new Sequence();
- public static Sequential Sequential() => new Sequential();
+ ///
+ /// Whether the context is building a function.
+ ///
+ public bool IsBuildingFunction { get; set; }
+
+ ///
+ /// A callable that executes the context switch.
+ ///
+ public Action EnterContextFn { get; set; }
+
+ public string DeviceStack { get; set; }
}
}
diff --git a/src/TensorFlowNET.Core/Contexts/ContextSwitchStack.cs b/src/TensorFlowNET.Core/Contexts/ContextSwitchStack.cs
new file mode 100644
index 00000000..69cefe9b
--- /dev/null
+++ b/src/TensorFlowNET.Core/Contexts/ContextSwitchStack.cs
@@ -0,0 +1,40 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Contexts
+{
+ ///
+ /// Match the semantics of DefaultGraphStack
+ ///
+ public class ContextSwitchStack
+ {
+ Stack stack;
+
+ public ContextSwitchStack(bool isEager)
+ {
+ stack = new Stack();
+ if (isEager)
+ stack.Push(new ContextSwitch
+ {
+ IsBuildingFunction = false
+ });
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Data/ShuffleDataset.cs b/src/TensorFlowNET.Core/Data/ShuffleDataset.cs
index bc23715b..1804ba53 100644
--- a/src/TensorFlowNET.Core/Data/ShuffleDataset.cs
+++ b/src/TensorFlowNET.Core/Data/ShuffleDataset.cs
@@ -25,7 +25,7 @@ namespace Tensorflow
(_seed, _seed2) = random_seed.get_seed_tensor(seed);
_reshuffle_each_iteration = reshuffle_each_iteration;
var seed_generator = ops.dummy_seed_generator();
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
variant_tensor = ops.shuffle_dataset_v3(input_dataset.variant_tensor, _buffer_size,
_seed, _seed2, seed_generator,
output_types, output_shapes,
diff --git a/src/TensorFlowNET.Core/Eager/Context.cs b/src/TensorFlowNET.Core/Eager/Context.cs
deleted file mode 100644
index 95c2a832..00000000
--- a/src/TensorFlowNET.Core/Eager/Context.cs
+++ /dev/null
@@ -1,50 +0,0 @@
-using System;
-
-namespace Tensorflow.Eager
-{
- public sealed class Context : IDisposable
- {
- public const int GRAPH_MODE = 0;
- public const int EAGER_MODE = 1;
-
- public int default_execution_mode;
- public string device_name = "";
- public string scope_name = "";
- bool _initialized = false;
-
- public SafeContextHandle Handle { get; }
-
- public Context(ContextOptions opts, Status status)
- {
- Handle = c_api.TFE_NewContext(opts.Handle, status.Handle);
- status.Check(true);
- }
-
- ///
- /// Initialize handle and devices if not already done so.
- ///
- public void ensure_initialized()
- {
- if (_initialized)
- return;
- _initialized = true;
- }
-
- public void start_step()
- => c_api.TFE_ContextStartStep(Handle);
-
- public void end_step()
- => c_api.TFE_ContextEndStep(Handle);
-
- public bool executing_eagerly()
- => default_execution_mode == EAGER_MODE;
-
- public string shared_name(string name = null)
- => !string.IsNullOrEmpty(name) || !executing_eagerly() ?
- name :
- "cd2c89b7-88b7-44c8-ad83-06c2a9158347";
-
- public void Dispose()
- => Handle.Dispose();
- }
-}
diff --git a/src/TensorFlowNET.Core/Eager/EagerOperation.cs b/src/TensorFlowNET.Core/Eager/EagerOperation.cs
index 982198f8..2aa7c04c 100644
--- a/src/TensorFlowNET.Core/Eager/EagerOperation.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerOperation.cs
@@ -53,7 +53,7 @@ namespace Tensorflow.Eager
{
object value = null;
byte isList = 0;
- var attrType = c_api.TFE_OpNameGetAttrType(tf.context.Handle, Name, attr_name, ref isList, tf.status.Handle);
+ var attrType = c_api.TFE_OpNameGetAttrType(tf.Context.Handle, Name, attr_name, ref isList, tf.Status.Handle);
switch (attrType)
{
case TF_AttrType.TF_ATTR_BOOL:
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.ArgsToMatchingEager.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.ArgsToMatchingEager.cs
new file mode 100644
index 00000000..7c3f1307
--- /dev/null
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.ArgsToMatchingEager.cs
@@ -0,0 +1,58 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using System;
+using System.Linq;
+using static Tensorflow.Binding;
+using Tensorflow.Contexts;
+
+namespace Tensorflow.Eager
+{
+ public partial class EagerRunner
+ {
+ public (TF_DataType, Tensor[]) ArgsToMatchingEager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null)
+ {
+ if (args.Length == 0 && default_dtype != TF_DataType.DtInvalid)
+ return (default_dtype, null);
+
+ if (args.Count(x => x is Tensor) == args.Length)
+ return ((args[0] as Tensor).dtype, args.Select(x => x as Tensor).ToArray());
+
+ var dtype = TF_DataType.DtInvalid;
+ foreach (var x in args)
+ {
+ if (x is Tensor et)
+ dtype = et.dtype;
+ }
+
+ if (dtype == TF_DataType.DtInvalid)
+ {
+ var ret = new List();
+ foreach (var t in args)
+ {
+ ret.Add(ops.convert_to_tensor(t, dtype, preferred_dtype: default_dtype, ctx: ctx) as Tensor);
+ if (dtype == TF_DataType.DtInvalid)
+ dtype = ret.Last().dtype;
+ }
+
+ return (dtype, ret.ToArray());
+ }
+ else
+ throw new NotImplementedException("");
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Eager/Execute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.Execute.cs
similarity index 50%
rename from src/TensorFlowNET.Core/Eager/Execute.cs
rename to src/TensorFlowNET.Core/Eager/EagerRunner.Execute.cs
index 04c11a1d..2bb39bad 100644
--- a/src/TensorFlowNET.Core/Eager/Execute.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.Execute.cs
@@ -1,11 +1,30 @@
-using System.Collections.Generic;
-using System;
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
using System.Linq;
+using System;
using static Tensorflow.Binding;
+using Tensorflow.Contexts;
namespace Tensorflow.Eager
{
- public class Execute
+ ///
+ /// python\eager\pywrap_tfe_src.cc
+ ///
+ public partial class EagerRunner
{
///
/// Execute a TensorFlow operation.
@@ -28,14 +47,14 @@ namespace Tensorflow.Eager
/// The value of context.context().
/// Customized name for the operation.
/// List of output Tensor objects. The list is empty if there are no outputs
- public Tensor[] execute(Context ctx, string op_name, int num_outputs,
- Tensor[] inputs, object[] attrs,
+ public Tensor[] Execute(Context ctx, string op_name, int num_outputs,
+ Tensor[] inputs, object[] attrs,
string name = null)
{
ctx.ensure_initialized();
var results = tf.Runner.TFE_Execute(ctx,
- ctx.device_name,
+ ctx.DeviceName,
op_name,
inputs,
attrs,
@@ -43,36 +62,5 @@ namespace Tensorflow.Eager
return results;
}
-
- public (TF_DataType, Tensor[]) args_to_matching_eager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null)
- {
- if (args.Length == 0 && default_dtype != TF_DataType.DtInvalid)
- return (default_dtype, null);
-
- if (args.Count(x => x is Tensor) == args.Length)
- return ((args[0] as Tensor).dtype, args.Select(x => x as Tensor).ToArray());
-
- var dtype = TF_DataType.DtInvalid;
- foreach (var x in args)
- {
- if (x is Tensor et)
- dtype = et.dtype;
- }
-
- if (dtype == TF_DataType.DtInvalid)
- {
- var ret = new List();
- foreach (var t in args)
- {
- ret.Add(ops.convert_to_tensor(t, dtype, preferred_dtype: default_dtype, ctx: ctx) as Tensor);
- if (dtype == TF_DataType.DtInvalid)
- dtype = ret.Last().dtype;
- }
-
- return (dtype, ret.ToArray());
- }
- else
- throw new NotImplementedException("");
- }
}
-}
+}
\ No newline at end of file
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_Execute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_Execute.cs
index c2764cae..a4f2f345 100644
--- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_Execute.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_Execute.cs
@@ -1,7 +1,23 @@
-using System.Collections.Generic;
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
using System.Linq;
using System;
using static Tensorflow.Binding;
+using Tensorflow.Contexts;
namespace Tensorflow.Eager
{
@@ -25,7 +41,7 @@ namespace Tensorflow.Eager
object[] attrs,
int num_outputs)
{
- var status = tf.status;
+ var status = tf.Status;
var op = GetOp(ctx, op_name, status);
status.Check(true);
c_api.TFE_OpSetDevice(op, device_name, status.Handle);
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
index 13189b1d..52b811de 100644
--- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
@@ -3,11 +3,9 @@ using System.Linq;
using System;
using static Tensorflow.OpDef.Types;
using static Tensorflow.Binding;
-using Google.Protobuf.WellKnownTypes;
-using System.Threading;
using Tensorflow.Util;
-using System.Runtime.InteropServices.ComTypes;
using System.Runtime.InteropServices;
+using Tensorflow.Contexts;
namespace Tensorflow.Eager
{
@@ -17,6 +15,7 @@ namespace Tensorflow.Eager
public partial class EagerRunner
{
int kFastPathExecuteInputStartIndex = 0;
+ UnorderedMap thread_local_eager_operation_map = new UnorderedMap();
public Tensor[] TFE_FastPathExecute(Context ctx,
string device_name,
@@ -45,7 +44,7 @@ namespace Tensorflow.Eager
op_exec_info.run_post_exec_callbacks = callbacks != null;
op_exec_info.run_callbacks = op_exec_info.run_gradient_callback || op_exec_info.run_post_exec_callbacks;
- var status = tf.status;
+ var status = tf.Status;
var op = GetOp(ctx, opName, status);
var op_def = tf.get_default_graph().GetOpDef(opName);
@@ -173,7 +172,7 @@ namespace Tensorflow.Eager
SafeOpHandle GetOp(Context ctx, string op_or_function_name, Status status)
{
if (thread_local_eager_operation_map.find(ctx, out var op))
- c_api.TFE_OpReset(op, op_or_function_name, ctx.device_name, status.Handle);
+ c_api.TFE_OpReset(op, op_or_function_name, ctx.DeviceName, status.Handle);
else
{
op = c_api.TFE_NewOp(ctx.Handle, op_or_function_name, status.Handle);
@@ -184,8 +183,6 @@ namespace Tensorflow.Eager
return op;
}
- static UnorderedMap thread_local_eager_operation_map = new UnorderedMap();
-
bool HasAccumulator()
{
//return !GetAccumulatorSet()->empty();
@@ -252,7 +249,7 @@ namespace Tensorflow.Eager
public void SetOpAttrs(SafeOpHandle op, params object[] attrs)
{
- var status = tf.status;
+ var status = tf.Status;
var len = attrs.Length;
for (int i = 0; i < len; i += 2)
{
@@ -263,9 +260,9 @@ namespace Tensorflow.Eager
var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, status.Handle);
if (!status.ok()) return;
if (is_list != 0)
- SetOpAttrList(tf.context, op, key, value as object[], type, null, status);
+ SetOpAttrList(tf.Context, op, key, value as object[], type, null, status);
else
- SetOpAttrScalar(tf.context, op, key, value, type, null, status);
+ SetOpAttrScalar(tf.Context, op, key, value, type, null, status);
status.Check(true);
}
}
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.cs
index 28e6e2f5..637af1f4 100644
--- a/src/TensorFlowNET.Core/Eager/EagerRunner.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.cs
@@ -11,6 +11,6 @@ namespace Tensorflow.Eager
///
public partial class EagerRunner : IEagerRunner
{
-
+
}
}
diff --git a/src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs b/src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
index 543f671a..68ef56b8 100644
--- a/src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
@@ -22,25 +22,25 @@ namespace Tensorflow.Eager
public EagerTensor(string value, string device_name) : base(value)
{
- EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle);
+ EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle);
Resolve();
}
public EagerTensor(byte[] value, string device_name, TF_DataType dtype) : base(value, dType: dtype)
{
- EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle);
+ EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle);
Resolve();
}
public EagerTensor(string[] value, string device_name) : base(value)
{
- EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle);
+ EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle);
Resolve();
}
public EagerTensor(NDArray value, string device_name) : base(value)
{
- EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.status.Handle);
+ EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle);
Resolve();
}
@@ -49,7 +49,7 @@ namespace Tensorflow.Eager
_id = ops.uid();
if (_handle == IntPtr.Zero)
- _handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.status.Handle);
+ _handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.Status.Handle);
//print($"new Tensor {Id} {_handle.ToString("x16")}");
//print($"new TensorHandle {Id} {EagerTensorHandle.ToString("x16")}");
diff --git a/src/TensorFlowNET.Core/Eager/EagerTensor.cs b/src/TensorFlowNET.Core/Eager/EagerTensor.cs
index bddd8188..cf66ca48 100644
--- a/src/TensorFlowNET.Core/Eager/EagerTensor.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerTensor.cs
@@ -13,24 +13,24 @@ namespace Tensorflow.Eager
get
{
using var _ = EagerTensorHandle.Lease();
- return c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(EagerTensorHandle, tf.status.Handle));
+ return c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(EagerTensorHandle, tf.Status.Handle));
}
}
- public override int rank => c_api.TFE_TensorHandleNumDims(EagerTensorHandle, tf.status.Handle);
+ public override int rank => c_api.TFE_TensorHandleNumDims(EagerTensorHandle, tf.Status.Handle);
public static int GetRank(IntPtr handle)
{
var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle);
- return c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, tf.status.Handle);
+ return c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, tf.Status.Handle);
}
public static int[] GetDims(IntPtr handle)
{
var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle);
- var dims = new int[c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, tf.status.Handle)];
+ var dims = new int[c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, tf.Status.Handle)];
for (int i = 0; i < dims.Length; i++)
- dims[i] = c_api.TFE_TensorHandleDim(tfe_tensor_handle, i, tf.status.Handle);
+ dims[i] = c_api.TFE_TensorHandleDim(tfe_tensor_handle, i, tf.Status.Handle);
return dims;
}
}
diff --git a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs
index e1dc1192..4a45b2df 100644
--- a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs
+++ b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs
@@ -1,6 +1,7 @@
using System;
using System.Collections.Generic;
using System.Text;
+using Tensorflow.Contexts;
namespace Tensorflow.Eager
{
diff --git a/src/TensorFlowNET.Core/Eager/IEagerRunner.cs b/src/TensorFlowNET.Core/Eager/IEagerRunner.cs
index 0e0deb3c..a752bf9e 100644
--- a/src/TensorFlowNET.Core/Eager/IEagerRunner.cs
+++ b/src/TensorFlowNET.Core/Eager/IEagerRunner.cs
@@ -1,27 +1,37 @@
using System;
using System.Collections.Generic;
using System.Text;
+using Tensorflow.Contexts;
using Tensorflow.Gradients;
namespace Tensorflow.Eager
{
public interface IEagerRunner
{
- public Tensor[] TFE_FastPathExecute(Context ctx,
+ Tensor[] Execute(Context ctx, string op_name,
+ int num_outputs,
+ Tensor[] inputs, object[] attrs,
+ string name = null);
+
+ (TF_DataType, Tensor[]) ArgsToMatchingEager(Context ctx,
+ TF_DataType default_dtype = TF_DataType.DtInvalid,
+ object[] args = null);
+
+ Tensor[] TFE_FastPathExecute(Context ctx,
string device_name,
string opName,
string name,
Action callbacks,
params object[] args);
- public Tensor[] TFE_Execute(Context ctx,
+ Tensor[] TFE_Execute(Context ctx,
string device_name,
string op_name,
Tensor[] inputs,
object[] attrs,
int num_outputs);
- public Tensor[] TFE_TapeGradient(ITape tape,
+ Tensor[] TFE_TapeGradient(ITape tape,
Tensor[] target,
Tensor[] sources,
Tensor[] output_gradients);
diff --git a/src/TensorFlowNET.Core/Framework/Models/DenseSpec.cs b/src/TensorFlowNET.Core/Framework/Models/DenseSpec.cs
index 0ff9738c..f4f0a62f 100644
--- a/src/TensorFlowNET.Core/Framework/Models/DenseSpec.cs
+++ b/src/TensorFlowNET.Core/Framework/Models/DenseSpec.cs
@@ -18,9 +18,9 @@ namespace Tensorflow.Framework.Models
protected string _name;
public string name => _name;
- public DenseSpec(int[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
+ public DenseSpec(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
{
- _shape = new TensorShape(shape);
+ _shape = shape;
_dtype = dtype;
_name = name;
}
diff --git a/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs b/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs
index 4ea66082..bb5874ed 100644
--- a/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs
+++ b/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs
@@ -7,7 +7,7 @@ namespace Tensorflow.Framework.Models
{
public class TensorSpec : DenseSpec
{
- public TensorSpec(int[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) :
+ public TensorSpec(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) :
base(shape, dtype, name)
{
diff --git a/src/TensorFlowNET.Core/Gradients/GradientTape.cs b/src/TensorFlowNET.Core/Gradients/GradientTape.cs
index c47e8af8..a8c0d8fd 100644
--- a/src/TensorFlowNET.Core/Gradients/GradientTape.cs
+++ b/src/TensorFlowNET.Core/Gradients/GradientTape.cs
@@ -37,14 +37,14 @@ namespace Tensorflow.Gradients
{
_persistent = persistent;
_watch_accessed_variables = watch_accessed_variables;
- _created_eagerly = tf.context.executing_eagerly();
+ _created_eagerly = tf.Context.executing_eagerly();
_recording = false;
- _created_eagerly = tf.context.executing_eagerly();
+ _created_eagerly = tf.Context.executing_eagerly();
// Enters a context inside which operations are recorded on this tape.
if (_created_eagerly)
{
- tf.context.ensure_initialized();
- tf.context.start_step();
+ tf.Context.ensure_initialized();
+ tf.Context.start_step();
}
_push_tape();
}
@@ -156,7 +156,7 @@ namespace Tensorflow.Gradients
_pop_tape();
if (_created_eagerly)
- tf.context.end_step();
+ tf.Context.end_step();
}
}
}
diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs
index 0d6afbc9..46f44eb9 100644
--- a/src/TensorFlowNET.Core/Gradients/math_grad.cs
+++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs
@@ -515,7 +515,7 @@ namespace Tensorflow.Gradients
var rank = input_0_shape.Length;
if (Enumerable.SequenceEqual(Enumerable.Range(0, rank), axes.Data()))
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
// should add ones_rank_cache
var new_shape = constant_op.constant(range(0, rank).Select(x => 1).ToArray(), dtype: TF_DataType.TF_INT32);
@@ -534,7 +534,7 @@ namespace Tensorflow.Gradients
input_shape = array_ops.shape(op.inputs[0]);
return new Tensor[] { gen_array_ops.tile(grad, input_shape), null };
}
- else if (!input_0_shape.Contains(-1) && !tf.context.executing_eagerly())
+ else if (!input_0_shape.Contains(-1) && !tf.Context.executing_eagerly())
{
throw new NotImplementedException("");
}
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Control.cs b/src/TensorFlowNET.Core/Graphs/Graph.Control.cs
index b119745c..4db8ab2f 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.Control.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.Control.cs
@@ -82,7 +82,7 @@ namespace Tensorflow
///
public _ControlDependenciesController control_dependencies(object[] control_inputs)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
return new _ControlDependenciesController(this, null);
if (control_inputs == null)
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs
index 12dc66fb..6c404276 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.cs
@@ -518,7 +518,7 @@ namespace Tensorflow
public TensorShape GetTensorShape(TF_Output output)
{
- var status = tf.status;
+ var status = tf.Status;
var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status.Handle);
status.Check();
diff --git a/src/TensorFlowNET.Core/Keras/Activations.cs b/src/TensorFlowNET.Core/Keras/Activations.cs
new file mode 100644
index 00000000..77a83fbc
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/Activations.cs
@@ -0,0 +1,17 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using static Tensorflow.Binding;
+
+namespace Tensorflow.Keras
+{
+ public delegate Tensor Activation(Tensor x);
+
+ public class Activations
+ {
+ ///
+ /// Linear activation function (pass-through).
+ ///
+ public Activation Linear = x => x;
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DenseArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DenseArgs.cs
index ef05f929..e79a7c25 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DenseArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DenseArgs.cs
@@ -16,7 +16,7 @@ namespace Tensorflow.Keras.ArgsDefinition
///
/// Activation function to use.
///
- public IActivation Activation { get; set; }
+ public Activation Activation { get; set; }
///
/// Whether the layer uses a bias vector.
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/InputLayerArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/InputLayerArgs.cs
new file mode 100644
index 00000000..f02a42b1
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/InputLayerArgs.cs
@@ -0,0 +1,13 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class InputLayerArgs : LayerArgs
+ {
+ public Tensor InputTensor { get; set; }
+ public bool Sparse { get; set; }
+ public bool Ragged { get; set; }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/LayerArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/LayerArgs.cs
index 8c7f6597..aaf89a0c 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/LayerArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/LayerArgs.cs
@@ -17,7 +17,7 @@ namespace Tensorflow.Keras.ArgsDefinition
///
/// Only applicable to input layers.
///
- public TF_DataType DType { get; set; }
+ public TF_DataType DType { get; set; } = TF_DataType.TF_FLOAT;
///
/// Whether the `call` method can be used to build a TF graph without issues.
@@ -36,6 +36,8 @@ namespace Tensorflow.Keras.ArgsDefinition
///
public TensorShape BatchInputShape { get; set; }
+ public int BatchSize { get; set; } = -1;
+
///
/// Initial weight values.
///
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/NodeArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/NodeArgs.cs
new file mode 100644
index 00000000..5e38da99
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/NodeArgs.cs
@@ -0,0 +1,17 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Keras.Engine;
+using Tensorflow.Keras.Layers;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class NodeArgs
+ {
+ public Layer[] InboundLayers { get; set; }
+ public int[] NodeIndices { get; set; }
+ public int[] TensorIndices { get; set; }
+ public Tensor[] InputTensors { get; set; }
+ public Tensor[] Outputs { get; set; }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/backend.cs b/src/TensorFlowNET.Core/Keras/BackendImpl.cs
similarity index 68%
rename from src/TensorFlowNET.Core/Keras/backend.cs
rename to src/TensorFlowNET.Core/Keras/BackendImpl.cs
index 704de00e..84b244a2 100644
--- a/src/TensorFlowNET.Core/Keras/backend.cs
+++ b/src/TensorFlowNET.Core/Keras/BackendImpl.cs
@@ -20,20 +20,21 @@ using static Tensorflow.Binding;
namespace Tensorflow.Keras
{
- public class backend : BackendBase
+ public class BackendImpl : BackendBase
{
/* ---------------------------------------- KERAS BACKEND NATIVE OBJECTS ---------------------------------------- */
- public static Func py_sum = sum;
- public static Func py_all = all;
+ public Func py_sum = sum;
+ public Func py_all = all;
//Func py_any = any;
//Func> py_slice = slice;
- public static Session _SESSION = ops.get_default_session();
- public static Graph _GRAPH = null;
- public static Dictionary _GRAPH_LEARNING_PHASES;
+ public Session _SESSION => ops.get_default_session();
+
+ public Graph _GRAPH;
+ public Dictionary _GRAPH_LEARNING_PHASES;
//Dictionary> PER_GRAPH_LAYER_NAME_UIDS;
- public static bool _MANUAL_VAR_INIT = false;
- public static List _LOCAL_DEVICES = null;
+ public bool _MANUAL_VAR_INIT = false;
+ public List _LOCAL_DEVICES = null;
/* -------------------------------------- KERAS BACKEND NATIVE OBJECTS END -------------------------------------- */
///
@@ -41,23 +42,28 @@ namespace Tensorflow.Keras
/// for various layer names in each graph.
/// Allows to give unique autogenerated names to layers, in a graph-specific way.
///
- public static Dictionary> PER_GRAPH_LAYER_NAME_UIDS = new Dictionary>();
- public static Dictionary _GRAPH_VARIABLES = new Dictionary();
- public static Dictionary _GRAPH_TF_OPTIMIZERS = new Dictionary();
+ public Dictionary> PER_GRAPH_LAYER_NAME_UIDS = new Dictionary>();
+ public Dictionary _GRAPH_VARIABLES = new Dictionary();
+ public Dictionary _GRAPH_TF_OPTIMIZERS = new Dictionary();
+
+ public _DummyEagerGraph _DUMMY_EAGER_GRAPH = new _DummyEagerGraph();
- public static _DummyEagerGraph _DUMMY_EAGER_GRAPH = new _DummyEagerGraph();
+ public BackendImpl()
+ {
+ }
- public static void track_variable(IVariableV1 v)
+ public void track_variable(IVariableV1 v)
{
var graph = v.Graph;
_GRAPH_VARIABLES[graph.graph_key] = v;
}
- public static Tensor placeholder(int[] shape = null,
+ public Tensor placeholder(TensorShape shape = null,
int ndim = -1,
TF_DataType dtype = TF_DataType.DtInvalid,
bool sparse = false,
- string name = null)
+ string name = null,
+ bool ragged = false)
{
if (sparse)
{
@@ -65,16 +71,16 @@ namespace Tensorflow.Keras
}
else
{
- return gen_array_ops.placeholder(dtype: dtype, shape: new TensorShape(shape), name: name);
+ return array_ops.placeholder(dtype: dtype, shape: shape, name: name);
}
}
- public static Graph get_graph()
+ public Graph get_graph()
{
return ops.get_default_graph();
}
- public static int get_uid(string prefix, string @namespace = "")
+ public int get_uid(string prefix, string @namespace = "")
{
var graph = tf.get_default_graph();
if (!PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph))
@@ -83,7 +89,7 @@ namespace Tensorflow.Keras
return PER_GRAPH_LAYER_NAME_UIDS[graph][(@namespace, prefix)];
}
- public static int get_uid((string, string) name)
+ public int get_uid((string, string) name)
{
var graph = tf.get_default_graph();
if (!PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph))
@@ -92,21 +98,21 @@ namespace Tensorflow.Keras
return PER_GRAPH_LAYER_NAME_UIDS[graph][name];
}
- public static void reset_uids() => PER_GRAPH_LAYER_NAME_UIDS = new Dictionary>();
- public static void clear_session()
+ public void reset_uids() => PER_GRAPH_LAYER_NAME_UIDS = new Dictionary>();
+ public void clear_session()
{
ops.reset_default_graph();
reset_uids();
- _SESSION = null;
+ ops.set_default_session(tf.Session(ops.get_default_graph()));
var phase = tf.placeholder_with_default(false, new int[] { }, name: "keras_learning_phase");
_GRAPH_LEARNING_PHASES = new Dictionary();
_GRAPH_LEARNING_PHASES[tf.get_default_graph()] = 0;
}
- public static void manual_variable_initialization(bool value)
+ public void manual_variable_initialization(bool value)
{
_MANUAL_VAR_INIT = value;
}
- public static GraphLearningPhase learning_phase()
+ public GraphLearningPhase learning_phase()
{
var graph = tf.get_default_graph();
if (_GRAPH_LEARNING_PHASES.ContainsKey(graph))
@@ -116,7 +122,7 @@ namespace Tensorflow.Keras
}
return _GRAPH_LEARNING_PHASES[graph];
}
- public static void set_learning_phase(bool value)
+ public void set_learning_phase(bool value)
{
_GRAPH_LEARNING_PHASES[tf.get_default_graph()] = (GraphLearningPhase)((value) ? 1 : 0);
}
diff --git a/src/TensorFlowNET.Core/Keras/Engine/ILayer.cs b/src/TensorFlowNET.Core/Keras/Engine/ILayer.cs
deleted file mode 100644
index 0b1f422d..00000000
--- a/src/TensorFlowNET.Core/Keras/Engine/ILayer.cs
+++ /dev/null
@@ -1,15 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.Text;
-
-namespace Tensorflow.Keras.Engine
-{
- ///
- /// A layer is a callable object that takes as input one or more tensors and
- /// that outputs one or more tensors.
- ///
- public interface ILayer
- {
- Tensor Apply(Tensor inputs, bool is_training = false);
- }
-}
diff --git a/src/TensorFlowNET.Core/Keras/Engine/Layer.cs b/src/TensorFlowNET.Core/Keras/Engine/Layer.cs
index 349d7908..7ddb79c7 100644
--- a/src/TensorFlowNET.Core/Keras/Engine/Layer.cs
+++ b/src/TensorFlowNET.Core/Keras/Engine/Layer.cs
@@ -31,20 +31,22 @@ namespace Tensorflow.Keras.Engine
/// A layer is a class implementing common neural networks operations, such
/// as convolution, batch norm, etc. These operations require managing weights,
/// losses, updates, and inter-layer connectivity.
- ///
- /// tensorflow\python\keras\engine\base_layer.py
///
- public class Layer : AutoTrackable, ILayer
+ public abstract class Layer : AutoTrackable
{
- protected LayerArgs _args;
+ ///
+ /// Arguments initialize layer.
+ ///
+ LayerArgs args;
///
/// Indicates whether `build` needs to be called upon layer call, to create
/// the layer's weights.
///
protected bool built;
- protected bool trainable;
- public TF_DataType _dtype;
+ public bool Trainable => args.Trainable;
+ public TF_DataType DType => args.DType;
+
///
/// A stateful layer is a layer whose updates are run during inference too,
/// for instance stateful RNNs.
@@ -53,53 +55,53 @@ namespace Tensorflow.Keras.Engine
///
/// Provides information about which inputs are compatible with the layer.
///
- protected InputSpec input_spec;
- protected bool supports_masking;
- protected List _trainable_weights;
- public List trainable_variables => _trainable_weights;
- protected List _non_trainable_weights;
- private string _name;
- public string name => _name;
- protected string _base_name;
- protected bool _compute_previous_mask;
- protected List _updates;
- public int[] _batch_input_shape;
-
- private List _inbound_nodes;
- public List inbound_nodes => _inbound_nodes;
-
-#pragma warning disable CS0649 // Field 'Layer._outbound_nodes' is never assigned to, and will always have its default value null
- private List _outbound_nodes;
-#pragma warning restore CS0649 // Field 'Layer._outbound_nodes' is never assigned to, and will always have its default value null
- public List outbound_nodes => _outbound_nodes;
-
-#pragma warning disable CS0169 // The field 'Layer._initial_weights' is never used
- float _initial_weights;
-#pragma warning restore CS0169 // The field 'Layer._initial_weights' is never used
-
- ThreadLocal _call_context;
- public CallContext CallContext => _call_context.Value;
+ protected InputSpec inputSpec;
+ public bool SupportsMasking { get; set; }
+ protected List trainableWeights;
+ public List TrainableVariables => trainableWeights;
+ protected List nonTrainableWeights;
+
+ string name;
+ public string Name => name;
+
+ protected string baseName;
+ protected bool computePreviousMask;
+ protected List updates;
+ public TensorShape BatchInputShape => args.BatchInputShape;
+
+ List inboundNodes;
+ public List InboundNodes => inboundNodes;
+
+ List outboundNodes;
+ public List OutboundNodes => outboundNodes;
+
+ ThreadLocal callContext;
+ public CallContext CallContext => callContext.Value;
public Layer(LayerArgs args)
{
- _args = args;
+ this.args = args;
// A stateful layer is a layer whose updates are run during inference too,
// for instance stateful RNNs.
stateful = false;
// Indicates whether `build` needs to be called upon layer call, to create
// the layer's weights.
built = false;
- this.supports_masking = false;
+ this.SupportsMasking = false;
_init_set_name(name);
- _trainable_weights = new List();
- _non_trainable_weights = new List();
- _compute_previous_mask = false;
- _updates = new List();
+ trainableWeights = new List();
+ nonTrainableWeights = new List();
+ computePreviousMask = false;
+ updates = new List();
+
+ inboundNodes = new List();
// Manage input shape information if passed.
-
- _inbound_nodes = new List();
+ if(args.BatchInputShape == null && args.InputShape != null)
+ {
+ args.BatchInputShape = new int[] { args.BatchSize }.Concat(args.InputShape.dims).ToArray();
+ }
}
///
@@ -108,39 +110,37 @@ namespace Tensorflow.Keras.Engine
///
///
///
- public Tensor Apply(Tensor input, bool is_training = false)
+ public Tensor Apply(Tensor[] inputs, bool is_training = false)
{
- var input_list = new Tensor[] { input };
-
- if (_call_context == null)
- _call_context = new ThreadLocal()
- {
- Value = new CallContext()
- };
+ callContext = callContext ?? new ThreadLocal()
+ {
+ Value = new CallContext()
+ };
using var ctxManager = CallContext.enter();
- string name_scope = "";
- if (tf.context.executing_eagerly())
+ string nameScope = "";
+ if (tf.Context.executing_eagerly())
{
- name_scope = _name;
+ nameScope = name;
}
else
{
throw new NotImplementedException("");
}
- tf_with(ops.name_scope(name_scope), scope =>
+ tf_with(ops.name_scope(nameScope), scope =>
{
if (!built)
- _maybe_build(input);
+ MaybeBuild(inputs);
- call(input, is_training: is_training);
+ call(inputs, is_training: is_training);
});
throw new NotImplementedException("");
}
+ [Obsolete("User Apply()")]
public Tensor[] __call__(Tensor[] inputs,
Tensor training = null,
Tensor state = null,
@@ -173,14 +173,14 @@ namespace Tensorflow.Keras.Engine
{
// Symbolic execution on symbolic tensors. We will attempt to build
// the corresponding TF subgraph inside `backend.get_graph()`
- var graph = backend.get_graph().as_default();
+ var graph = tf.keras.backend.get_graph().as_default();
tf_with(ops.name_scope(_name_scope()), delegate
{
// Build layer if applicable (if the `build` method has been
// overridden).
- _maybe_build(inputs[0]);
+ MaybeBuild(inputs);
- outputs = call(inputs[0],
+ outputs = call(inputs,
// training: training,
state: state);
@@ -217,25 +217,25 @@ namespace Tensorflow.Keras.Engine
return null;
}
- protected virtual Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null)
+ protected virtual Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null)
{
throw new NotImplementedException("");
}
protected virtual string _name_scope()
{
- return name;
+ return Name;
}
- protected void _maybe_build(Tensor input)
+ protected void MaybeBuild(Tensor[] inputs)
{
// Check input assumptions set before layer building, e.g. input rank.
if (built)
return;
- if (_dtype == TF_DataType.DtInvalid)
- _dtype = input.dtype;
+ if (DType == TF_DataType.DtInvalid)
+ args.DType = inputs[0].dtype;
- var input_shapes = input.TensorShape;
+ var input_shapes = inputs[0].TensorShape;
build(input_shapes);
built = true;
}
@@ -246,7 +246,7 @@ namespace Tensorflow.Keras.Engine
}
protected virtual IVariableV1 add_weight(string name,
- int[] shape,
+ TensorShape shape,
TF_DataType dtype = TF_DataType.DtInvalid,
IInitializer initializer = null,
bool? trainable = null,
@@ -267,10 +267,10 @@ namespace Tensorflow.Keras.Engine
else if (dtype.is_integer())
initializer = tf.zeros_initializer;
else
- throw new ValueError($"An initializer for variable {name} of type {dtype.as_base_dtype()} is required for layer {this.name}");
+ throw new ValueError($"An initializer for variable {name} of type {dtype.as_base_dtype()} is required for layer {this.Name}");
}
- var variable = _add_variable_with_custom_getter(new VariableArgs
+ var args = new VariableArgs
{
Name = name,
Shape = shape,
@@ -279,13 +279,14 @@ namespace Tensorflow.Keras.Engine
Overwrite = true,
Initializer = initializer,
Trainable = trainable.Value
- });
+ };
+ var variable = _add_variable_with_custom_getter(args);
//backend.track_variable(variable);
if (trainable == true)
- _trainable_weights.Add(variable);
+ trainableWeights.Add(variable);
else
- _non_trainable_weights.Add(variable);
+ nonTrainableWeights.Add(variable);
return variable;
}
@@ -293,17 +294,16 @@ namespace Tensorflow.Keras.Engine
protected virtual void add_update(Tensor[] updates, bool inputs = false)
{
var updates_op = updates.Select(x => x.op).ToArray();
- _updates.AddRange(updates_op);
+ this.updates.AddRange(updates_op);
}
// Determine layer name (non-unique).
protected virtual void _init_set_name(string name, bool zero_based = true)
{
var base_name = name;
- _name = name;
+ this.name = name;
if (name == null)
- (_name, base_name) = _make_unique_name();
- _base_name = base_name;
+ (this.name, baseName) = _make_unique_name();
}
protected virtual (string, string) _make_unique_name()
diff --git a/src/TensorFlowNET.Core/Keras/Engine/Node.cs b/src/TensorFlowNET.Core/Keras/Engine/Node.cs
new file mode 100644
index 00000000..5ada8791
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/Engine/Node.cs
@@ -0,0 +1,61 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using System.Linq;
+using Tensorflow.Keras.ArgsDefinition;
+using Tensorflow.Keras.Layers;
+
+namespace Tensorflow.Keras.Engine
+{
+ ///
+ /// A `Node` describes the connectivity between two layers.
+ ///
+ /// Each time a layer is connected to some new input,
+ /// a node is added to `layer._inbound_nodes`.
+ /// Each time the output of a layer is used by another layer,
+ /// a node is added to `layer._outbound_nodes`.
+ ///
+ public class Node
+ {
+ NodeArgs args;
+
+ public int[] node_indices;
+ public int[] tensor_indices;
+ public Tensor[] input_tensors;
+ public Tensor[] Outputs => args.Outputs;
+ public TensorShape[] input_shapes;
+ public TensorShape[] output_shapes;
+ List kerasInputs;
+
+ public Node(InputLayer layer, NodeArgs args)
+ {
+ this.args = args;
+
+ kerasInputs = new List();
+
+ // Wire up Node to Layers.
+ layer.InboundNodes.Add(this);
+ foreach (var input in kerasInputs)
+ {
+ if (input != null)
+ input.OutboundNodes.Add(this);
+ }
+
+ // Set metadata on outputs.
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/Engine/Sequential.cs b/src/TensorFlowNET.Core/Keras/Engine/Sequential.cs
index ff9392c8..3883b2c5 100644
--- a/src/TensorFlowNET.Core/Keras/Engine/Sequential.cs
+++ b/src/TensorFlowNET.Core/Keras/Engine/Sequential.cs
@@ -16,6 +16,7 @@
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.Layers;
+using static Tensorflow.Binding;
namespace Tensorflow.Keras.Engine
{
@@ -28,11 +29,21 @@ namespace Tensorflow.Keras.Engine
Tensor[] outputs;
#pragma warning restore CS0169 // The field 'Sequential.outputs' is never used
- public Sequential(string name = null)
+ bool computeOutputAndMaskJointly;
+ bool autoTrackSubLayers;
+ TensorShape inferredInputShape;
+ bool hasExplicitInputShape;
+ TF_DataType inputDType;
+ Layer[] layers;
+
+ public Sequential(Layer[] layers = null, string name = null)
: base(new ModelArgs { Name = name})
{
- supports_masking = true;
- // _compute_output_and_mask_jointly = true;
+ this.layers = layers ?? new Layer[0];
+ SupportsMasking = true;
+ computeOutputAndMaskJointly = true;
+ autoTrackSubLayers = false;
+ hasExplicitInputShape = false;
}
public void __enter__()
@@ -48,27 +59,26 @@ namespace Tensorflow.Keras.Engine
{
built = false;
var set_inputs = false;
- //if(_layers.Count == 0)
+ if(layers.Length == 0)
{
if(layer is InputLayer)
{
-
+ set_inputs = true;
}
else
{
- var (batch_shape, dtype) = (layer._batch_input_shape, layer._dtype);
- if (batch_shape != null)
+ if (layer.BatchInputShape != null)
{
// Instantiate an input layer.
- var x = keras.layers.Input(
- batch_shape: batch_shape,
- dtype: dtype,
- name: layer.name + "_input");
+ var x = tf.keras.Input(
+ batch_shape: layer.BatchInputShape,
+ dtype: layer.DType,
+ name: layer.Name + "_input");
// This will build the current layer
// and create the node connecting the current layer
// to the input layer we just created.
- layer.__call__(x);
+ layer.Apply(x);
set_inputs = true;
}
}
diff --git a/src/TensorFlowNET.Core/Keras/KerasApi.cs b/src/TensorFlowNET.Core/Keras/KerasApi.cs
index f8dcadff..982ec023 100644
--- a/src/TensorFlowNET.Core/Keras/KerasApi.cs
+++ b/src/TensorFlowNET.Core/Keras/KerasApi.cs
@@ -1,11 +1,12 @@
using System;
using System.Data;
+using System.Linq;
using Tensorflow.Keras;
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.Datasets;
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Layers;
-using Tensorflow.Operations.Activation;
+using static Tensorflow.Binding;
namespace Tensorflow
{
@@ -14,15 +15,56 @@ namespace Tensorflow
public KerasDataset datasets { get; } = new KerasDataset();
public Initializers initializers { get; } = new Initializers();
public Layers layers { get; } = new Layers();
+ public Activations activations { get; } = new Activations();
+
+ public BackendImpl backend { get; } = new BackendImpl();
+
+ public Sequential Sequential()
+ => new Sequential();
+
+ public Tensor[] Input(int[] batch_shape = null,
+ int batch_size = -1,
+ TF_DataType dtype = TF_DataType.DtInvalid,
+ string name = null,
+ bool sparse = false,
+ bool ragged = false,
+ Tensor tensor = null)
+ {
+ var args = new InputLayerArgs
+ {
+ Name = name,
+ BatchInputShape = batch_shape,
+ BatchSize = batch_size,
+ DType = dtype,
+ Sparse = sparse,
+ Ragged = ragged,
+ InputTensor = tensor
+ };
+
+ var layer = new InputLayer(args);
+
+ return layer.InboundNodes[0].Outputs;
+ }
+
+ public static Embedding Embedding(int input_dim,
+ int output_dim,
+ IInitializer embeddings_initializer = null,
+ bool mask_zero = false)
+ => new Embedding(input_dim,
+ output_dim,
+ embeddings_initializer,
+ mask_zero);
public class Layers
{
- public ILayer Dense(int units,
- IActivation activation = null)
+ public Layer Dense(int units,
+ Activation activation = null,
+ TensorShape input_shape = null)
=> new Dense(new DenseArgs
{
Units = units,
- Activation = activation
+ Activation = activation ?? tf.keras.activations.Linear,
+ InputShape = input_shape
});
}
}
diff --git a/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs b/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs
index ef71cd37..23992b56 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs
@@ -80,7 +80,7 @@ namespace Tensorflow.Keras.Layers
this.moving_variance_initializer = moving_variance_initializer;
this.renorm = renorm;
this.fused = true;
- this.supports_masking = true;
+ this.SupportsMasking = true;
this._bessels_correction_test_only = true;
}
@@ -95,7 +95,7 @@ namespace Tensorflow.Keras.Layers
if (Enumerable.SequenceEqual(axis, new int[] { 3 }))
_data_format = "NHWC";
- var param_dtype = _dtype == TF_DataType.DtInvalid ? TF_DataType.TF_FLOAT : _dtype;
+ var param_dtype = DType == TF_DataType.DtInvalid ? TF_DataType.TF_FLOAT : DType;
var param_shape = new int[] { input_shape.dims[axis[0]] };
if (scale)
@@ -143,14 +143,14 @@ namespace Tensorflow.Keras.Layers
built = true;
}
- protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null)
+ protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null)
{
Tensor outputs = null;
if (fused)
{
Tensor training = tf.convert_to_tensor(is_training);
- outputs = _fused_batch_norm(inputs, training: training);
+ outputs = _fused_batch_norm(inputs[0], training: training);
return new[] { outputs, outputs };
}
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Conv.cs b/src/TensorFlowNET.Core/Keras/Layers/Conv.cs
index cc04bc0f..a1d48be1 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/Conv.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/Conv.cs
@@ -65,7 +65,7 @@ namespace Tensorflow.Keras.Layers
this.use_bias = use_bias;
this.kernel_initializer = kernel_initializer;
this.bias_initializer = bias_initializer;
- input_spec = new InputSpec(ndim: rank + 2);
+ inputSpec = new InputSpec(ndim: rank + 2);
}
protected override void build(TensorShape input_shape)
@@ -79,17 +79,17 @@ namespace Tensorflow.Keras.Layers
shape: kernel_shape,
initializer: kernel_initializer,
trainable: true,
- dtype: _dtype);
+ dtype: DType);
if (use_bias)
bias = (RefVariable)add_weight(name: "bias",
shape: new int[] { filters },
initializer: bias_initializer,
trainable: true,
- dtype: _dtype);
+ dtype: DType);
var axes = new Dictionary();
axes.Add(-1, input_dim);
- input_spec = new InputSpec(ndim: rank + 2, axes: axes);
+ inputSpec = new InputSpec(ndim: rank + 2, axes: axes);
string op_padding;
if (padding == "causal")
@@ -108,9 +108,9 @@ namespace Tensorflow.Keras.Layers
built = true;
}
- protected override Tensor[] call(Tensor inputs, bool training = false, Tensor state = null)
+ protected override Tensor[] call(Tensor[] inputs, bool training = false, Tensor state = null)
{
- var outputs = _convolution_op.__call__(inputs, kernel);
+ var outputs = _convolution_op.__call__(inputs[0], kernel);
if (use_bias)
{
if (data_format == "channels_first")
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Dense.cs b/src/TensorFlowNET.Core/Keras/Layers/Dense.cs
index 906747b8..90109c1e 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/Dense.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/Dense.cs
@@ -29,19 +29,16 @@ namespace Tensorflow.Keras.Layers
///
public class Dense : Layer
{
- protected int units;
- protected IActivation activation;
- protected bool use_bias;
- protected IInitializer kernel_initializer;
- protected IInitializer bias_initializer;
+ DenseArgs args;
protected IVariableV1 kernel;
protected IVariableV1 bias;
public Dense(DenseArgs args) :
base(args)
{
- this.supports_masking = true;
- this.input_spec = new InputSpec(min_ndim: 2);
+ this.args = args;
+ this.SupportsMasking = true;
+ this.inputSpec = new InputSpec(min_ndim: 2);
}
protected override void build(TensorShape input_shape)
@@ -49,41 +46,41 @@ namespace Tensorflow.Keras.Layers
var last_dim = input_shape.dims.Last();
var axes = new Dictionary();
axes[-1] = last_dim;
- input_spec = new InputSpec(min_ndim: 2, axes: axes);
+ inputSpec = new InputSpec(min_ndim: 2, axes: axes);
kernel = add_weight(
"kernel",
- shape: new int[] { last_dim, units },
- initializer: kernel_initializer,
- dtype: _dtype,
+ shape: new TensorShape(last_dim, args.Units),
+ initializer: args.KernelInitializer,
+ dtype: DType,
trainable: true);
- if (use_bias)
+ if (args.UseBias)
bias = add_weight(
"bias",
- shape: new int[] { units },
- initializer: bias_initializer,
- dtype: _dtype,
+ shape: new TensorShape(args.Units),
+ initializer: args.BiasInitializer,
+ dtype: DType,
trainable: true);
built = true;
}
- protected override Tensor[] call(Tensor inputs, bool training = false, Tensor state = null)
+ protected override Tensor[] call(Tensor[] inputs, bool training = false, Tensor state = null)
{
Tensor outputs = null;
- var rank = inputs.rank;
+ var rank = inputs[0].rank;
if(rank > 2)
{
throw new NotImplementedException("call rank > 2");
}
else
{
- outputs = gen_math_ops.mat_mul(inputs, kernel.Handle);
+ outputs = gen_math_ops.mat_mul(inputs[0], kernel.Handle);
}
- if (use_bias)
+ if (args.UseBias)
outputs = tf.nn.bias_add(outputs, bias);
- if (activation != null)
- outputs = activation.Activate(outputs);
+ //if (args.Activation != null)
+ //outputs = args.Activation.Activate(outputs);
return new[] { outputs, outputs };
}
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Embedding.cs b/src/TensorFlowNET.Core/Keras/Layers/Embedding.cs
index 628b5ef4..5080c425 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/Embedding.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/Embedding.cs
@@ -45,7 +45,7 @@ namespace Tensorflow.Keras.Layers
this.output_dim = output_dim;
this.embeddings_initializer = embeddings_initializer == null ? tf.uniform_initializer : embeddings_initializer;
this.mask_zero = mask_zero;
- supports_masking = mask_zero;
+ SupportsMasking = mask_zero;
this.input_length = input_length;
}
@@ -57,13 +57,13 @@ namespace Tensorflow.Keras.Layers
built = true;
}
- protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null)
+ protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null)
{
- var dtype = inputs.dtype;
+ var dtype = inputs[0].dtype;
if (dtype != tf.int32 && dtype != tf.int64)
- inputs = math_ops.cast(inputs, tf.int32);
+ inputs[0] = math_ops.cast(inputs[0], tf.int32);
- var @out = embedding_ops.embedding_lookup(embeddings, inputs);
+ var @out = embedding_ops.embedding_lookup(embeddings, inputs[0]);
return new[] { @out, @out };
}
}
diff --git a/src/TensorFlowNET.Core/Keras/Layers/InputLayer.cs b/src/TensorFlowNET.Core/Keras/Layers/InputLayer.cs
index 66ba6625..02473904 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/InputLayer.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/InputLayer.cs
@@ -17,8 +17,10 @@
using System;
using System.Collections.Generic;
using System.Linq;
+using Tensorflow.Framework.Models;
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.Engine;
+using static Tensorflow.Binding;
namespace Tensorflow.Keras.Layers
{
@@ -27,82 +29,68 @@ namespace Tensorflow.Keras.Layers
///
public class InputLayer : Layer
{
- public bool sparse;
- public int? batch_size;
- public bool is_placeholder;
+ InputLayerArgs args;
+ bool isPlaceholder;
+ TensorSpec typeSpec;
- public InputLayer(int[] input_shape = null,
- int[] batch_input_shape = null,
- int? batch_size = null,
- TF_DataType dtype = TF_DataType.DtInvalid,
- string name = null,
- bool sparse = false,
- Tensor input_tensor = null) :
- base(new LayerArgs
- {
- DType = dtype, Name = name
- })
+ public InputLayer(InputLayerArgs args) :
+ base(args)
{
+ this.args = args;
built = true;
- this.sparse = sparse;
- this.batch_size = batch_size;
- this.supports_masking = true;
+ this.SupportsMasking = true;
- if(batch_input_shape != null)
+ if(BatchInputShape != null)
{
- batch_size = batch_input_shape[0];
- input_shape = batch_input_shape.Skip(1).ToArray();
+ args.BatchSize = BatchInputShape.dims[0];
+ args.InputShape = BatchInputShape.dims[1..];
}
// moved to base class
- if (string.IsNullOrEmpty(name))
+ if (string.IsNullOrEmpty(Name))
{
var prefix = "input";
- name = prefix + '_' + backend.get_uid(prefix);
+ args.Name = prefix + '_' + tf.keras.backend.get_uid(prefix);
}
-
- if (input_tensor == null)
+
+ if (args.InputTensor == null)
{
- if(input_shape != null)
+ if(args.InputShape != null)
{
- var dims = new List { batch_size.HasValue ? batch_size.Value : -1 };
- dims.AddRange(input_shape);
- batch_input_shape = dims.ToArray();
+ args.BatchInputShape = new int[] { args.BatchSize }
+ .Concat(args.InputShape.dims)
+ .ToArray();
}
else
{
- batch_input_shape = null;
+ args.BatchInputShape = null;
}
- var graph = backend.get_graph().as_default();
-
// In graph mode, create a graph placeholder to call the layer on.
- if (sparse)
- {
- throw new NotImplementedException("InputLayer sparse is true");
- }
- else
- {
- input_tensor = backend.placeholder(
- shape: batch_input_shape,
- dtype: dtype,
- name: name);
- }
+ tf.Context.graph_mode();
+ args.InputTensor = tf.keras.backend.placeholder(
+ shape: BatchInputShape,
+ dtype: DType,
+ name: Name,
+ sparse: args.Sparse);
+ tf.Context.eager_mode();
- is_placeholder = true;
- _batch_input_shape = batch_input_shape;
+ isPlaceholder = true;
}
// Create an input node to add to self.outbound_node
// and set output_tensors' _keras_history.
// input_tensor._keras_history = base_layer.KerasHistory(self, 0, 0)
// input_tensor._keras_mask = None
- new Node(this,
- inbound_layers: new Layer[0],
- node_indices: new int[0],
- tensor_indices: new int[0],
- input_tensors: new Tensor[] { input_tensor },
- output_tensors: new Tensor[] { input_tensor });
+ new Node(this, new NodeArgs
+ {
+ InputTensors = new Tensor[] { args.InputTensor },
+ Outputs = new Tensor[] { args.InputTensor }
+ });
+
+ typeSpec = new TensorSpec(args.InputTensor.TensorShape,
+ dtype: args.InputTensor.dtype,
+ name: Name);
}
}
}
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Node.cs b/src/TensorFlowNET.Core/Keras/Layers/Node.cs
deleted file mode 100644
index 11862f06..00000000
--- a/src/TensorFlowNET.Core/Keras/Layers/Node.cs
+++ /dev/null
@@ -1,85 +0,0 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System.Linq;
-using Tensorflow.Keras.Engine;
-
-namespace Tensorflow.Keras.Layers
-{
- ///
- /// A `Node` describes the connectivity between two layers.
- ///
- public class Node
- {
- public InputLayer outbound_layer;
- public Layer[] inbound_layers;
- public int[] node_indices;
- public int[] tensor_indices;
- public Tensor[] input_tensors;
- public Tensor[] output_tensors;
- public int[][] input_shapes;
- public int[][] output_shapes;
-
- ///
- ///
- ///
- ///
- /// the layer that takes
- /// `input_tensors` and turns them into `output_tensors`
- /// (the node gets created when the `call`
- /// method of the layer was called).
- ///
- ///
- /// a list of layers, the same length as `input_tensors`,
- /// the layers from where `input_tensors` originate.
- ///
- ///
- /// a list of integers, the same length as `inbound_layers`.
- /// `node_indices[i]` is the origin node of `input_tensors[i]`
- /// (necessary since each inbound layer might have several nodes,
- /// e.g. if the layer is being shared with a different data stream).
- ///
- ///
- /// list of input tensors.
- /// list of output tensors.
- public Node(InputLayer outbound_layer,
- Layer[] inbound_layers,
- int[] node_indices,
- int[] tensor_indices,
- Tensor[] input_tensors,
- Tensor[] output_tensors)
- {
- this.outbound_layer = outbound_layer;
- this.inbound_layers = inbound_layers;
- this.node_indices = node_indices;
- this.tensor_indices = tensor_indices;
- this.input_tensors = input_tensors;
- this.output_tensors = output_tensors;
-
- input_shapes = input_tensors.Select(x => x._shape_tuple()).ToArray();
- output_shapes = output_tensors.Select(x => x._shape_tuple()).ToArray();
-
- // Add nodes to all layers involved.
- foreach (var layer in inbound_layers)
- {
- if (layer != null)
- layer.outbound_nodes.Add(this);
- }
-
- outbound_layer.inbound_nodes.Add(this);
- }
- }
-}
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Pooling2D.cs b/src/TensorFlowNET.Core/Keras/Layers/Pooling2D.cs
index 3c9d0a38..6ee054bf 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/Pooling2D.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/Pooling2D.cs
@@ -45,7 +45,7 @@ namespace Tensorflow.Keras.Layers
this.input_spec = new InputSpec(ndim: 4);
}
- protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null)
+ protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null)
{
int[] pool_shape;
if (data_format == "channels_last")
@@ -60,7 +60,7 @@ namespace Tensorflow.Keras.Layers
}
var outputs = pool_function.Apply(
- inputs,
+ inputs[0],
ksize: pool_shape,
strides: strides,
padding: padding.ToUpper(),
diff --git a/src/TensorFlowNET.Core/Keras/Utils/base_layer_utils.cs b/src/TensorFlowNET.Core/Keras/Utils/base_layer_utils.cs
index a3667867..18325a49 100644
--- a/src/TensorFlowNET.Core/Keras/Utils/base_layer_utils.cs
+++ b/src/TensorFlowNET.Core/Keras/Utils/base_layer_utils.cs
@@ -40,7 +40,7 @@ namespace Tensorflow.Keras.Utils
var variable_dtype = args.DType.as_base_dtype();
var v = tf.Variable(init_val,
- dtype: args.DType,
+ dtype: variable_dtype,
shape: args.Shape,
name: args.Name,
trainable: args.Trainable,
@@ -94,14 +94,14 @@ namespace Tensorflow.Keras.Utils
{
var graph = ops.get_default_graph();
Dictionary<(string, string), int> name_uid_map = null;
- if (backend.PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph))
+ if (tf.keras.backend.PER_GRAPH_LAYER_NAME_UIDS.ContainsKey(graph))
{
- name_uid_map = backend.PER_GRAPH_LAYER_NAME_UIDS[graph];
+ name_uid_map = tf.keras.backend.PER_GRAPH_LAYER_NAME_UIDS[graph];
}
else
{
name_uid_map = new Dictionary<(string, string), int>();
- backend.PER_GRAPH_LAYER_NAME_UIDS[graph] = name_uid_map;
+ tf.keras.backend.PER_GRAPH_LAYER_NAME_UIDS[graph] = name_uid_map;
}
return name_uid_map;
diff --git a/src/TensorFlowNET.Core/Layers/Layer.cs b/src/TensorFlowNET.Core/Layers/Layer.cs
index 2d98d081..48003b94 100644
--- a/src/TensorFlowNET.Core/Layers/Layer.cs
+++ b/src/TensorFlowNET.Core/Layers/Layer.cs
@@ -49,8 +49,8 @@ namespace Tensorflow.Layers
this._reuse = _reuse;
// Avoid an incorrect lint error
- _trainable_weights = new List();
- _non_trainable_weights = new List();
+ trainableWeights = new List();
+ nonTrainableWeights = new List();
this.built = false;
_keras_style = false;
}
@@ -95,7 +95,7 @@ namespace Tensorflow.Layers
// Update global default collections.
- _add_elements_to_collection(_updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS });
+ _add_elements_to_collection(updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS });
return outputs;
}
@@ -202,7 +202,7 @@ namespace Tensorflow.Layers
}
else
{
- tf_with(tf.variable_scope(scope, default_name: _base_name), captured_scope =>
+ tf_with(tf.variable_scope(scope, default_name: baseName), captured_scope =>
{
// convert variable_scope to VariableScope
_scope = captured_scope;
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
index b08fc78d..a0fbc007 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
@@ -40,7 +40,7 @@ namespace Tensorflow
IActivation activation = null, bool? reuse = null, string name = null,
TF_DataType dtype = TF_DataType.DtInvalid) : base(_reuse: reuse, name: name, dtype: dtype)
{
- input_spec = new InputSpec(ndim: 2);
+ inputSpec = new InputSpec(ndim: 2);
_num_units = num_units;
_forget_bias = forget_bias;
_state_is_tuple = state_is_tuple;
@@ -74,7 +74,7 @@ namespace Tensorflow
///
///
///
- protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null)
+ protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null)
{
var one = constant_op.constant(1, dtype: dtypes.int32);
// Parameters of gates are concatenated into one multiply for efficiency.
@@ -87,7 +87,7 @@ namespace Tensorflow
// array_ops.split(value: state, num_or_size_splits: 2, axis: one);
throw new NotImplementedException("BasicLstmCell call");
}
- var gate_inputs = math_ops.matmul(array_ops.concat(new[] { inputs, h }, 1), _kernel as RefVariable);
+ var gate_inputs = math_ops.matmul(array_ops.concat(new[] { inputs[0], h }, 1), _kernel as RefVariable);
gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable);
// i = input_gate, j = new_input, f = forget_gate, o = output_gate
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
index 8ddd4599..de8e7b95 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
@@ -42,7 +42,7 @@ namespace Tensorflow
dtype: dtype)
{
// Inputs must be 2-dimensional.
- input_spec = new InputSpec(ndim: 2);
+ inputSpec = new InputSpec(ndim: 2);
_num_units = num_units;
if (activation == null)
@@ -67,10 +67,10 @@ namespace Tensorflow
built = true;
}
- protected override Tensor[] call(Tensor inputs, bool is_training = false, Tensor state = null)
+ protected override Tensor[] call(Tensor[] inputs, bool is_training = false, Tensor state = null)
{
// Most basic RNN: output = new_state = act(W * input + U * state + B).
- var concat = array_ops.concat(new[] { inputs, state }, 1);
+ var concat = array_ops.concat(new[] { inputs[0], state }, 1);
var gate_inputs = math_ops.matmul(concat, _kernel as RefVariable);
gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable);
var output = _activation(gate_inputs, null);
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
index 659247b0..ccc83864 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
@@ -42,7 +42,7 @@ namespace Tensorflow.Operations
///
public static Tensor conv2d(Conv2dParams parameters)
{
- var _op = tf._op_def_lib._apply_op_helper("Conv2D", name: parameters.Name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("Conv2D", name: parameters.Name, args: new
{
input = parameters.Input,
filter = parameters.Filter,
@@ -64,7 +64,7 @@ namespace Tensorflow.Operations
///
public static Tensor conv2d_backprop_filter(Conv2dParams parameters)
{
- var _op = tf._op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new
{
input = parameters.Input,
filter_sizes = parameters.FilterSizes,
@@ -87,7 +87,7 @@ namespace Tensorflow.Operations
///
public static Tensor conv2d_backprop_input(Conv2dParams parameters)
{
- var _op = tf._op_def_lib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new
{
input_sizes = parameters.InputSizes,
filter = parameters.Filter,
@@ -111,7 +111,7 @@ namespace Tensorflow.Operations
if (data_format == null)
data_format = "NHWC";
- var _op = tf._op_def_lib._apply_op_helper("BiasAdd", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("BiasAdd", name: name, args: new
{
value,
bias,
@@ -128,7 +128,7 @@ namespace Tensorflow.Operations
if (data_format == null)
data_format = "NHWC";
- var _op = tf._op_def_lib._apply_op_helper("BiasAddGrad", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("BiasAddGrad", name: name, args: new
{
out_backprop,
data_format
@@ -154,7 +154,7 @@ namespace Tensorflow.Operations
///
public static Tensor elu(Tensor features, string name = "Elu")
{
- var op = tf._op_def_lib._apply_op_helper("Elu", name: name, args: new { features });
+ var op = tf.OpDefLib._apply_op_helper("Elu", name: name, args: new { features });
return op.output;
}
@@ -165,7 +165,7 @@ namespace Tensorflow.Operations
///
public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params)
{
- var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new
+ var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new
{
y_backprop = @params.YBackprop,
x = @params.X,
@@ -181,7 +181,7 @@ namespace Tensorflow.Operations
public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params)
{
- var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGradV3", name: @params.Name, args: new
+ var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGradV3", name: @params.Name, args: new
{
y_backprop = @params.YBackprop,
x = @params.X,
@@ -206,7 +206,7 @@ namespace Tensorflow.Operations
bool is_training = true,
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("FusedBatchNorm", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("FusedBatchNorm", name: name, args: new
{
x,
scale,
@@ -231,7 +231,7 @@ namespace Tensorflow.Operations
bool is_training = true,
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("FusedBatchNormV3", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormV3", name: name, args: new
{
x,
scale,
@@ -259,7 +259,7 @@ namespace Tensorflow.Operations
public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1,
int alpha = 1, float beta = 0.5f, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("LRN", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("LRN", name: name, args: new
{
input,
depth_radius,
@@ -273,7 +273,7 @@ namespace Tensorflow.Operations
public static Tensor log_softmax(Tensor logits, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("LogSoftmax", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("LogSoftmax", name: name, args: new
{
logits
});
@@ -291,7 +291,7 @@ namespace Tensorflow.Operations
/// A `Tensor` of type `bool`.
public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("InTopKV2", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("InTopKV2", name: name, args: new
{
predictions,
targets,
@@ -303,7 +303,7 @@ namespace Tensorflow.Operations
public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("LeakyRelu", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("LeakyRelu", name: name, args: new
{
features,
alpha
@@ -319,7 +319,7 @@ namespace Tensorflow.Operations
string data_format = "NHWC",
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("MaxPool", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("MaxPool", name: name, args: new
{
input,
ksize,
@@ -334,7 +334,7 @@ namespace Tensorflow.Operations
public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding,
string data_format= "NHWC", string name= null)
{
- var _op = tf._op_def_lib._apply_op_helper("MaxPoolGrad", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("MaxPoolGrad", name: name, args: new
{
orig_input,
orig_output,
@@ -350,7 +350,7 @@ namespace Tensorflow.Operations
public static Tensor[] top_kv2(Tensor input, int k, bool sorted = true, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("TopKV2", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("TopKV2", name: name, args: new
{
input,
k,
@@ -362,9 +362,9 @@ namespace Tensorflow.Operations
public static Tensor relu_grad(Tensor gradients, Tensor features, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"ReluGrad", name,
null,
gradients, features);
@@ -372,7 +372,7 @@ namespace Tensorflow.Operations
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("ReluGrad", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("ReluGrad", name: name, args: new
{
gradients,
features
@@ -383,7 +383,7 @@ namespace Tensorflow.Operations
public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("LeakyReluGrad", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("LeakyReluGrad", name: name, args: new
{
gradients,
features,
@@ -395,9 +395,9 @@ namespace Tensorflow.Operations
public static Tensor softmax(Tensor logits, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Softmax", name,
null,
logits);
@@ -405,7 +405,7 @@ namespace Tensorflow.Operations
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Softmax", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("Softmax", name: name, args: new
{
logits
});
@@ -422,7 +422,7 @@ namespace Tensorflow.Operations
///
public static (Tensor, Tensor) softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name: name, args: new
{
features,
labels
@@ -460,7 +460,7 @@ namespace Tensorflow.Operations
///
public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits")
{
- var op = tf._op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels });
+ var op = tf.OpDefLib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels });
int _idx = 0;
var loss = op.outputs[_idx++];
var backprop = op.outputs[_idx++];
@@ -475,9 +475,9 @@ namespace Tensorflow.Operations
/// A `Tensor`. Has the same type as `features`.
public static Tensor relu(Tensor features, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Relu", name,
null,
features);
@@ -485,15 +485,15 @@ namespace Tensorflow.Operations
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Relu", name: name, args: new { features });
+ var _op = tf.OpDefLib._apply_op_helper("Relu", name: name, args: new { features });
return _op.outputs[0];
}
public static Tensor tanh(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Tanh", name,
null,
x);
@@ -501,7 +501,7 @@ namespace Tensorflow.Operations
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Tanh", name: name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Tanh", name: name, args: new { x });
return _op.outputs[0];
}
}
diff --git a/src/TensorFlowNET.Core/Operations/Operation.Input.cs b/src/TensorFlowNET.Core/Operations/Operation.Input.cs
index 336d1d5c..62c03203 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.Input.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.Input.cs
@@ -31,8 +31,8 @@ namespace Tensorflow
public int InputListLength(string name)
{
int num = 0;
- num = c_api.TF_OperationInputListLength(_handle, name, tf.status.Handle);
- tf.status.Check(true);
+ num = c_api.TF_OperationInputListLength(_handle, name, tf.Status.Handle);
+ tf.Status.Check(true);
return num;
}
public int NumInputs => c_api.TF_OperationNumInputs(_handle);
diff --git a/src/TensorFlowNET.Core/Operations/Operation.Output.cs b/src/TensorFlowNET.Core/Operations/Operation.Output.cs
index 779ed185..0235cafc 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.Output.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.Output.cs
@@ -28,8 +28,8 @@ namespace Tensorflow
public int OutputListLength(string name)
{
- int num = c_api.TF_OperationOutputListLength(_handle, name, tf.status.Handle);
- tf.status.Check(true);
+ int num = c_api.TF_OperationOutputListLength(_handle, name, tf.Status.Handle);
+ tf.Status.Check(true);
return num;
}
diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs
index d2188e34..37ecdb7f 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.cs
@@ -237,8 +237,8 @@ namespace Tensorflow
lock (Locks.ProcessWide)
{
using var buf = new Buffer();
- c_api.TF_OperationGetAttrValueProto(_handle, name, buf.Handle, tf.status.Handle);
- tf.status.Check(true);
+ c_api.TF_OperationGetAttrValueProto(_handle, name, buf.Handle, tf.Status.Handle);
+ tf.Status.Check(true);
x = AttrValue.Parser.ParseFrom(buf.DangerousMemoryBlock.Stream());
}
@@ -297,9 +297,9 @@ namespace Tensorflow
// the updated inputs are reloaded from the c_api
lock (Locks.ProcessWide)
{
- c_api.UpdateEdge(_graph, output, input, tf.status.Handle);
+ c_api.UpdateEdge(_graph, output, input, tf.Status.Handle);
//var updated_inputs = inputs;
- tf.status.Check();
+ tf.Status.Check();
}
}
diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs
index ac101061..cd38d4f8 100644
--- a/src/TensorFlowNET.Core/Operations/array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/array_ops.cs
@@ -18,6 +18,8 @@ using NumSharp;
using System;
using System.Collections.Generic;
using System.Linq;
+using System.Reflection;
+using Tensorflow.Contexts;
using Tensorflow.Eager;
using Tensorflow.Framework;
using static Tensorflow.Binding;
@@ -459,7 +461,7 @@ namespace Tensorflow
{
name = scope;
- if (!tf.context.executing_eagerly())
+ if (!tf.Context.executing_eagerly())
{
var input_tensor = ops.convert_to_tensor(input);
var input_shape = input_tensor.TensorShape;
@@ -607,9 +609,9 @@ namespace Tensorflow
float padding_value = 0,
string align = "RIGHT_LEFT")
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"MatrixDiagV3", name,
null,
diagonal, k, num_rows, num_cols, padding_value,
@@ -626,9 +628,9 @@ namespace Tensorflow
int k = 0,
string align = "RIGHT_LEFT")
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"MatrixSetDiagV3", name,
null,
input, diagonal, k,
@@ -714,24 +716,24 @@ namespace Tensorflow
{
var size_splits = ops.convert_to_tensor(num_split);
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- return split_eager_fallback(axis, value, num_split: num_split, name: name, ctx: tf.context);
+ return split_eager_fallback(axis, value, num_split: num_split, name: name, ctx: tf.Context);
}
- var _op = tf._op_def_lib._apply_op_helper("Split", name, new { split_dim = axis, value, num_split });
+ var _op = tf.OpDefLib._apply_op_helper("Split", name, new { split_dim = axis, value, num_split });
return _op.outputs;
}
private static Tensor[] split_eager_fallback(Ta axis, Tv value, int num_split, string name, Context ctx = null)
{
- var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: new object[] { value });
+ var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { value });
var axis_tensor = ops.convert_to_tensor(axis, dtype: TF_DataType.TF_INT32);
var _inputs_flat = new List { axis_tensor };
_inputs_flat.AddRange(input);
var _attrs = new object[] { "num_split", num_split, "T", _attr_T };
- return tf._execute.execute(ctx, "Split", num_split, _inputs_flat.ToArray(), _attrs, name: name);
+ return tf.Runner.Execute(ctx, "Split", num_split, _inputs_flat.ToArray(), _attrs, name: name);
}
public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null)
@@ -780,9 +782,13 @@ namespace Tensorflow
return result;
}
- public static Tensor placeholder(TF_DataType dtype)
+ public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null)
{
- throw new NotImplementedException("array_ops.placeholder");
+ if (tf.Context.executing_eagerly())
+ throw new RuntimeError("tf.placeholder() is not compatible with eager execution.");
+
+ var _op = tf.OpDefLib._apply_op_helper("Placeholder", name: name, args: new { dtype, shape });
+ return _op.output;
}
}
}
diff --git a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs
index 3a34525d..ec3824ed 100644
--- a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs
@@ -376,7 +376,7 @@ namespace Tensorflow
{
return tf_with(ops.name_scope(name, "cond", new { pred }), delegate
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
if (pred.ToArray()[0])
return true_fn() as Tensor;
@@ -460,7 +460,7 @@ namespace Tensorflow
{
return tf_with(ops.name_scope(name, "cond", new { pred }), delegate
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
if (pred.ToArray()[0])
return true_fn() as Tensor[];
diff --git a/src/TensorFlowNET.Core/Operations/dataset_ops.cs b/src/TensorFlowNET.Core/Operations/dataset_ops.cs
index 514208e2..f3aca7a7 100644
--- a/src/TensorFlowNET.Core/Operations/dataset_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/dataset_ops.cs
@@ -17,9 +17,9 @@ namespace Tensorflow
///
public Tensor tensor_slice_dataset(Tensor[] components, TensorShape[] output_shapes, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"TensorSliceDataset", name,
null,
new object[]
@@ -35,9 +35,9 @@ namespace Tensorflow
public Tensor repeat_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"RepeatDataset", name,
null,
input_dataset, count,
@@ -55,9 +55,9 @@ namespace Tensorflow
bool reshuffle_each_iteration = true,
string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"ShuffleDatasetV3", name,
null,
input_dataset, buffer_size,
@@ -73,9 +73,9 @@ namespace Tensorflow
public Tensor dummy_seed_generator(string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"DummySeedGenerator", name,
null);
return results[0];
@@ -101,9 +101,9 @@ namespace Tensorflow
bool parallel_copy = false,
string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"BatchDatasetV2", name,
null,
input_dataset, buffer_size, drop_remainder,
@@ -133,9 +133,9 @@ namespace Tensorflow
bool legacy_autotune = true,
string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"PrefetchDataset", name,
null,
input_dataset, buffer_size,
@@ -162,9 +162,9 @@ namespace Tensorflow
TF_DataType[] output_types, TensorShape[] output_shapes,
string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"TakeDataset", name,
null,
input_dataset, count,
@@ -194,9 +194,9 @@ namespace Tensorflow
if (optimization_configs == null)
optimization_configs = new string[0];
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"OptimizeDataset", name,
null,
input_dataset, optimizations,
@@ -224,9 +224,9 @@ namespace Tensorflow
AutotuneAlgorithm algorithm, long cpu_budget,
string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"ModelDataset", name,
null,
input_dataset,
@@ -249,9 +249,9 @@ namespace Tensorflow
/// A tuple of `Tensor` objects (handle, deleter).
public (Tensor, Tensor) anonymous_iterator_v2(TF_DataType[] output_types, TensorShape[] output_shapes, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"AnonymousIteratorV2", name,
null,
"output_types", output_types,
@@ -271,9 +271,9 @@ namespace Tensorflow
/// The created Operation.
public ITensorOrOperation make_iterator(Tensor dataset, Tensor iterator, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"MakeIterator", name,
null,
dataset, iterator);
@@ -292,9 +292,9 @@ namespace Tensorflow
/// The created Operation.
public ITensorOrOperation delete_iterator(Tensor handle, Tensor deleter, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"DeleteIterator", name,
null,
handle, deleter);
@@ -314,9 +314,9 @@ namespace Tensorflow
///
public Tensor[] iterator_get_next(Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"IteratorGetNext", name,
null,
iterator,
diff --git a/src/TensorFlowNET.Core/Operations/functional_ops.cs b/src/TensorFlowNET.Core/Operations/functional_ops.cs
index 5e7a7240..37f1b07e 100644
--- a/src/TensorFlowNET.Core/Operations/functional_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/functional_ops.cs
@@ -60,7 +60,7 @@ namespace Tensorflow
var elems_flat = input_flatten(elems);
- bool in_graph_mode = tf.context.executing_eagerly();
+ bool in_graph_mode = tf.Context.executing_eagerly();
return tf_with(ops.name_scope(name, "scan", new { elems_flat }), scope =>
{
diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
index 28115584..21c48c4a 100644
--- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
@@ -19,6 +19,7 @@ using System.Collections.Generic;
using static Tensorflow.Binding;
using Tensorflow.Eager;
using System.Linq;
+using Tensorflow.Contexts;
namespace Tensorflow
{
@@ -26,14 +27,14 @@ namespace Tensorflow
{
public static Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops });
+ var _op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops });
return _op.output;
}
public static Tensor check_numerics(Tensor tensor, string message, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message });
+ var _op = tf.OpDefLib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message });
return _op.output;
}
@@ -47,9 +48,9 @@ namespace Tensorflow
///
public static Tensor concat_v2(T[] values, Ta axis, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"ConcatV2", name,
null,
values, axis);
@@ -57,35 +58,35 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
+ var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
return _op.output;
}
public static Tensor concat_v2(Tensor[] values, Tensor axis, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- return concat_v2_eager_fallback(values, axis, name, tf.context);
+ return concat_v2_eager_fallback(values, axis, name, tf.Context);
}
- var _op = tf._op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
+ var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
return _op.output;
}
private static Tensor concat_v2_eager_fallback(T1[] values, T2 axis, string name, Context ctx)
{
var _attr_N = len(values);
- var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: values.Select(x => (object)x).ToArray());
- var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new object[] { axis });
+ var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: values.Select(x => (object)x).ToArray());
+ var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new object[] { axis });
var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx };
- return tf._execute.execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0];
+ return tf.Runner.Execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0];
}
public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape });
+ var _op = tf.OpDefLib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape });
return _op.outputs;
}
@@ -123,9 +124,9 @@ namespace Tensorflow
///
public static Tensor diag(Tensor diagonal, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Diag", name,
null,
diagonal);
@@ -133,16 +134,16 @@ namespace Tensorflow
return results[0];
}
- var op = tf._op_def_lib._apply_op_helper("Diag", name: name, args: new { diagonal });
+ var op = tf.OpDefLib._apply_op_helper("Diag", name: name, args: new { diagonal });
return op.output;
}
public static Tensor expand_dims(Tensor input, int axis, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"ExpandDims", name,
null,
input, tf.convert_to_tensor(axis));
@@ -150,30 +151,30 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("ExpandDims", name: name, args: new { input, dim = axis });
+ var _op = tf.OpDefLib._apply_op_helper("ExpandDims", name: name, args: new { input, dim = axis });
return _op.outputs[0];
}
public static Tensor gather_v2(T1 @params, T2 indices, int axis, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("GatherV2", name: name, new { @params, indices, axis });
+ var _op = tf.OpDefLib._apply_op_helper("GatherV2", name: name, new { @params, indices, axis });
return _op.outputs[0];
}
public static Tensor pad(Tensor input, Tensor paddings, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Pad", name: name, args: new { input, paddings });
+ var _op = tf.OpDefLib._apply_op_helper("Pad", name: name, args: new { input, paddings });
return _op.output;
}
public static Tensor pack(Tensor[] values, int axis = 0, string name = null)
{
- if(tf.context.executing_eagerly())
+ if(tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Pack", name,
null,
values,
@@ -181,23 +182,10 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis });
+ var _op = tf.OpDefLib._apply_op_helper("Pack", name: name, args: new { values, axis });
return _op.output;
}
- public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null)
- {
- var _op = tf._op_def_lib._apply_op_helper("Placeholder", name: name, args: new { dtype, shape });
- var _result = _op.outputs;
- var _inputs_flat = _op.inputs;
-
- var _attrs = new Dictionary();
- _attrs["dtype"] = _op.get_attr("dtype");
- _attrs["shape"] = _op.get_attr("shape");
-
- return new Tensor(_op, 0, dtype);
- }
-
///
/// An identity op that triggers an error if a gradient is requested.
///
@@ -226,7 +214,7 @@ namespace Tensorflow
///
public static Tensor prevent_gradient(Tensor input, string message = "", string name = null)
{
- var op = tf._op_def_lib._apply_op_helper("PreventGradient", name: name, args: new { input, message });
+ var op = tf.OpDefLib._apply_op_helper("PreventGradient", name: name, args: new { input, message });
return op.output;
}
@@ -237,9 +225,9 @@ namespace Tensorflow
///
public static Tensor identity(Tensor input, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Identity", name,
null,
input);
@@ -247,30 +235,30 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Identity", name, new { input });
+ var _op = tf.OpDefLib._apply_op_helper("Identity", name, new { input });
return _op.output;
}
public static Tensor invert_permutation(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("InvertPermutation", name, new { x });
+ var _op = tf.OpDefLib._apply_op_helper("InvertPermutation", name, new { x });
return _op.outputs[0];
}
public static Tensor log(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Log", name: name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Log", name: name, args: new { x });
return _op.outputs[0];
}
public static Tensor rank(Tensor input, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Rank", name,
null,
input);
@@ -278,7 +266,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Rank", name: name, args: new { input });
+ var _op = tf.OpDefLib._apply_op_helper("Rank", name: name, args: new { input });
return _op.outputs[0];
}
@@ -292,9 +280,9 @@ namespace Tensorflow
/// A `Tensor`. Has the same type as `value`.
public static Tensor fill(Tensor dims, T value, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Fill", name,
null,
dims, value);
@@ -302,7 +290,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Fill", name, new { dims, value });
+ var _op = tf.OpDefLib._apply_op_helper("Fill", name, new { dims, value });
return _op.output;
}
@@ -315,9 +303,9 @@ namespace Tensorflow
/// A tuple of `Tensor` objects (r0, r1).
public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "")
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"BroadcastGradientArgs", name,
null,
s0,s1);
@@ -325,22 +313,22 @@ namespace Tensorflow
return (results[0], results[1]);
}
- var _op = tf._op_def_lib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 });
+ var _op = tf.OpDefLib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 });
return (_op.outputs[0], _op.outputs[1]);
}
public static Tensor reverse(Tensor tensor, T axis, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("ReverseV2", name, new { tensor, axis });
+ var _op = tf.OpDefLib._apply_op_helper("ReverseV2", name, new { tensor, axis });
return _op.output;
}
public static Tensor reshape(T1 tensor, T2 shape, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Reshape", name,
null,
tensor, shape);
@@ -348,13 +336,13 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape });
+ var _op = tf.OpDefLib._apply_op_helper("Reshape", name, new { tensor, shape });
return _op.output;
}
public static Tensor reshape(Tensor tensor, int[] shape, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape });
+ var _op = tf.OpDefLib._apply_op_helper("Reshape", name, new { tensor, shape });
return _op.outputs[0];
}
@@ -367,7 +355,7 @@ namespace Tensorflow
///
public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Unique", name, new { x, out_idx });
+ var _op = tf.OpDefLib._apply_op_helper("Unique", name, new { x, out_idx });
// TODO
//var _result = _UniqueOutput._make(_op.outputs);
return (_op.outputs[0], _op.outputs[1]);
@@ -375,13 +363,13 @@ namespace Tensorflow
public static Tensor[] unpack(Tensor value, int num, int axis = 0, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Unpack", name, new { value, num, axis });
+ var _op = tf.OpDefLib._apply_op_helper("Unpack", name, new { value, num, axis });
return _op.outputs;
}
public static Tensor where(Tensor condition, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Where", name, new { input = condition });
+ var _op = tf.OpDefLib._apply_op_helper("Where", name, new { input = condition });
return _op.output;
}
@@ -392,9 +380,9 @@ namespace Tensorflow
int axis = -1,
string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"OneHot", name,
null,
indices, depth, on_value, off_value,
@@ -403,7 +391,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("OneHot", name, new { indices, depth, on_value, off_value, axis });
+ var _op = tf.OpDefLib._apply_op_helper("OneHot", name, new { indices, depth, on_value, off_value, axis });
return _op.outputs[0];
}
@@ -416,15 +404,15 @@ namespace Tensorflow
///
public static Tensor placeholder_with_default(T input, int[] shape, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name });
+ var _op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name });
return _op.outputs[0];
}
public static Tensor select(Tensor condition, Tx t, Ty e, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"SelectV2", name,
null,
condition, t, e);
@@ -432,21 +420,21 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Select", name, new { condition, t, e });
+ var _op = tf.OpDefLib._apply_op_helper("Select", name, new { condition, t, e });
return _op.outputs[0];
}
public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor[] shape, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("ScatterNd", name, new { indices, updates, shape });
+ var _op = tf.OpDefLib._apply_op_helper("ScatterNd", name, new { indices, updates, shape });
return _op.outputs[0];
}
public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Shape", name,
null,
input,
@@ -455,7 +443,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Shape", name, new { input, out_type });
+ var _op = tf.OpDefLib._apply_op_helper("Shape", name, new { input, out_type });
return _op.outputs[0];
}
@@ -468,13 +456,13 @@ namespace Tensorflow
///
public static Tensor[] shape_n(Tensor[] input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("ShapeN", name, new { input, out_type });
+ var _op = tf.OpDefLib._apply_op_helper("ShapeN", name, new { input, out_type });
return _op.outputs;
}
public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Size", name, new { input, out_type });
+ var _op = tf.OpDefLib._apply_op_helper("Size", name, new { input, out_type });
return _op.outputs[0];
}
@@ -488,15 +476,15 @@ namespace Tensorflow
///
public static Tensor slice(Tensor input, Tensor begin, Tensor size, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Slice", name, new { input, begin, size });
+ var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size });
return _op.outputs[0];
}
public static Tensor tile(Tensor input, T multiples, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Tile", name,
null,
input, multiples);
@@ -504,34 +492,34 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Tile", name, new { input, multiples });
+ var _op = tf.OpDefLib._apply_op_helper("Tile", name, new { input, multiples });
return _op.outputs[0];
}
public static Tensor transpose(T1 x, T2 perm, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Transpose", name,
null,
x, perm);
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Transpose", name, new { x, perm });
+ var _op = tf.OpDefLib._apply_op_helper("Transpose", name, new { x, perm });
return _op.outputs[0];
}
public static Tensor zeros_like(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("ZerosLike", name, new { x });
+ var _op = tf.OpDefLib._apply_op_helper("ZerosLike", name, new { x });
return _op.outputs[0];
}
public static Tensor stop_gradient(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("StopGradient", name, args: new { input = x, name });
+ var _op = tf.OpDefLib._apply_op_helper("StopGradient", name, args: new { input = x, name });
return _op.output;
}
@@ -544,9 +532,9 @@ namespace Tensorflow
int shrink_axis_mask = 0,
string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"StridedSlice", name,
null,
input, begin, end, strides,
@@ -559,7 +547,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("StridedSlice", name, new
+ var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, new
{
input,
begin,
@@ -583,7 +571,7 @@ namespace Tensorflow
int shrink_axis_mask = 0,
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("StridedSlice", name, new
+ var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, new
{
input,
begin,
@@ -623,7 +611,7 @@ namespace Tensorflow
int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0,
int shrink_axis_mask = 0, string name = null)
{
- var op = tf._op_def_lib._apply_op_helper("StridedSliceGrad", name: name, args: new
+ var op = tf.OpDefLib._apply_op_helper("StridedSliceGrad", name: name, args: new
{
shape,
begin,
@@ -642,7 +630,7 @@ namespace Tensorflow
public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Slice", name, new { input, begin, size });
+ var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size });
return _op.outputs[0];
}
@@ -659,9 +647,9 @@ namespace Tensorflow
/// A `Tensor`. Has the same type as `input`.
public static Tensor squeeze(Tensor input, int[] axis = null, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Squeeze", name,
null,
input,
@@ -671,7 +659,7 @@ namespace Tensorflow
}
if (axis == null) axis = new int[0];
- var _op = tf._op_def_lib._apply_op_helper("Squeeze", name, args: new { input, squeeze_dims = axis });
+ var _op = tf.OpDefLib._apply_op_helper("Squeeze", name, args: new { input, squeeze_dims = axis });
return _op.outputs[0];
}
@@ -687,7 +675,7 @@ namespace Tensorflow
/// `Tensor`. Has the same type as `s0`.
public static Tensor broadcast_args(Tensor s0, Tensor s1, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("BroadcastArgs", name, args: new { s0, s1, name });
+ var _op = tf.OpDefLib._apply_op_helper("BroadcastArgs", name, args: new { s0, s1, name });
return _op.outputs[0];
}
@@ -701,9 +689,9 @@ namespace Tensorflow
///
public static Tensor broadcast_to(Tensor input, T shape, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"BroadcastTo", name,
null,
input, shape);
@@ -711,7 +699,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("BroadcastTo", name, args: new { input, shape, name });
+ var _op = tf.OpDefLib._apply_op_helper("BroadcastTo", name, args: new { input, shape, name });
return _op.outputs[0];
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_control_flow_ops.cs b/src/TensorFlowNET.Core/Operations/gen_control_flow_ops.cs
index a860d9dc..c4c2d441 100644
--- a/src/TensorFlowNET.Core/Operations/gen_control_flow_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_control_flow_ops.cs
@@ -23,7 +23,7 @@ namespace Tensorflow
{
public static Operation control_trigger(string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("ControlTrigger", name, new
+ var _op = tf.OpDefLib._apply_op_helper("ControlTrigger", name, new
{
});
@@ -41,7 +41,7 @@ namespace Tensorflow
///
public static Tensor enter(Tensor data, string frame_name = "frame_name", bool is_constant = false, int parallel_iterations = 10, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Enter", name, new
+ var _op = tf.OpDefLib._apply_op_helper("Enter", name, new
{
data,
frame_name,
@@ -60,7 +60,7 @@ namespace Tensorflow
///
public static Tensor loop_cond(Tensor input, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("LoopCond", name, new { input });
+ var _op = tf.OpDefLib._apply_op_helper("LoopCond", name, new { input });
return _op.output;
}
@@ -73,7 +73,7 @@ namespace Tensorflow
///
public static Tensor ref_next_iteration(Tensor data, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("RefNextIteration", name, new { data });
+ var _op = tf.OpDefLib._apply_op_helper("RefNextIteration", name, new { data });
return _op;
}
@@ -86,7 +86,7 @@ namespace Tensorflow
///
public static Tensor next_iteration(Tensor data, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("NextIteration", name, new { data });
+ var _op = tf.OpDefLib._apply_op_helper("NextIteration", name, new { data });
return _op;
}
@@ -99,7 +99,7 @@ namespace Tensorflow
///
public static Tensor ref_exit(Tensor data, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("RefExit", name, new { data });
+ var _op = tf.OpDefLib._apply_op_helper("RefExit", name, new { data });
return _op;
}
@@ -112,21 +112,21 @@ namespace Tensorflow
///
public static Tensor _exit(Tensor data, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Exit", name, new { data });
+ var _op = tf.OpDefLib._apply_op_helper("Exit", name, new { data });
return _op;
}
public static Operation no_op(string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("NoOp", name, null);
+ var _op = tf.OpDefLib._apply_op_helper("NoOp", name, null);
return _op;
}
public static Tensor[] ref_switch(Tensor data, Tensor pred, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("RefSwitch", name, new { data, pred });
+ var _op = tf.OpDefLib._apply_op_helper("RefSwitch", name, new { data, pred });
return _op.outputs;
}
@@ -150,7 +150,7 @@ namespace Tensorflow
///
public static Tensor[] @switch(Tensor data, Tensor pred, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Switch", name, new { data, pred });
+ var _op = tf.OpDefLib._apply_op_helper("Switch", name, new { data, pred });
var _inputs_flat = _op.inputs;
#pragma warning disable CS0219 // Variable is assigned but its value is never used
var _attrs = ("T", _op.get_attr("T"));
@@ -162,14 +162,14 @@ namespace Tensorflow
public static MergeOutput ref_merge(Tensor[] inputs, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("RefMerge", name, new { inputs });
+ var _op = tf.OpDefLib._apply_op_helper("RefMerge", name, new { inputs });
return new MergeOutput(_op.outputs);
}
public static MergeOutput merge(Tensor[] inputs, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Merge", name, new { inputs });
+ var _op = tf.OpDefLib._apply_op_helper("Merge", name, new { inputs });
return new MergeOutput(_op.outputs);
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs
index 69f0ac04..37ecbba8 100644
--- a/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs
@@ -22,7 +22,7 @@ namespace Tensorflow
{
public static Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = "CTCGreedyDecoder")
{
- var op = tf._op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, args: new
+ var op = tf.OpDefLib._apply_op_helper("CTCGreedyDecoder", name: name, args: new
{
inputs,
sequence_length,
diff --git a/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs b/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs
index e96c8a95..beaebce0 100644
--- a/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs
@@ -22,7 +22,7 @@ namespace Tensorflow
{
public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("DynamicStitch", name, new { indices, data });
+ var _op = tf.OpDefLib._apply_op_helper("DynamicStitch", name, new { indices, data });
return _op.output;
}
@@ -30,7 +30,7 @@ namespace Tensorflow
public static Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions,
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("DynamicPartition", name, new
+ var _op = tf.OpDefLib._apply_op_helper("DynamicPartition", name, new
{
data,
partitions,
@@ -44,7 +44,7 @@ namespace Tensorflow
TensorShape element_shape = null, bool dynamic_size = false, bool clear_after_read = true,
bool identical_element_shapes = false, string tensor_array_name = "", string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("TensorArrayV3", name, new
+ var _op = tf.OpDefLib._apply_op_helper("TensorArrayV3", name, new
{
size,
dtype,
@@ -61,7 +61,7 @@ namespace Tensorflow
public static Tensor tensor_array_scatter_v3(Tensor handle, Tensor indices, Tensor value,
Tensor flow_in, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("TensorArrayScatterV3", name, new
+ var _op = tf.OpDefLib._apply_op_helper("TensorArrayScatterV3", name, new
{
handle,
indices,
@@ -76,7 +76,7 @@ namespace Tensorflow
int capacity = -1, string container = "", string shared_name = "",
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("PaddingFIFOQueueV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("PaddingFIFOQueueV2", name, new
{
component_types,
shapes,
@@ -92,7 +92,7 @@ namespace Tensorflow
int capacity = -1, string container = "", string shared_name = "",
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("FIFOQueueV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("FIFOQueueV2", name, new
{
component_types,
shapes,
@@ -108,7 +108,7 @@ namespace Tensorflow
int capacity = -1, string container = "", string shared_name = "",
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("PriorityQueueV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("PriorityQueueV2", name, new
{
component_types,
shapes,
@@ -124,7 +124,7 @@ namespace Tensorflow
int capacity = -1, int min_after_dequeue = 0, int seed = 0, int seed2 = 0,
string container = "", string shared_name = "", string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("RandomShuffleQueueV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("RandomShuffleQueueV2", name, new
{
component_types,
shapes,
@@ -141,7 +141,7 @@ namespace Tensorflow
public static Operation queue_enqueue(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("QueueEnqueue", name, new
+ var _op = tf.OpDefLib._apply_op_helper("QueueEnqueue", name, new
{
handle,
components,
@@ -153,7 +153,7 @@ namespace Tensorflow
public static Operation queue_enqueue_v2(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("QueueEnqueueV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("QueueEnqueueV2", name, new
{
handle,
components,
@@ -165,7 +165,7 @@ namespace Tensorflow
public static Tensor[] queue_dequeue_v2(Tensor handle, TF_DataType[] component_types, int timeout_ms = -1, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("QueueDequeueV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("QueueDequeueV2", name, new
{
handle,
component_types,
@@ -177,7 +177,7 @@ namespace Tensorflow
public static Tensor[] queue_dequeue(Tensor handle, TF_DataType[] component_types, int timeout_ms = -1, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("QueueDequeue", name, new
+ var _op = tf.OpDefLib._apply_op_helper("QueueDequeue", name, new
{
handle,
component_types,
@@ -189,7 +189,7 @@ namespace Tensorflow
public static Operation queue_enqueue_many_v2(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("QueueEnqueueManyV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("QueueEnqueueManyV2", name, new
{
handle,
components,
@@ -201,7 +201,7 @@ namespace Tensorflow
public static Tensor[] queue_dequeue_many_v2(Tensor handle, int n, TF_DataType[] component_types, int timeout_ms = -1, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("QueueDequeueManyV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("QueueDequeueManyV2", name, new
{
handle,
n,
@@ -223,7 +223,7 @@ namespace Tensorflow
///
public static Tensor tensor_array_read_v3(Tensor handle, Tensor index, Tensor flow_in, TF_DataType dtype, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("TensorArrayReadV3", name, new
+ var _op = tf.OpDefLib._apply_op_helper("TensorArrayReadV3", name, new
{
handle,
index,
@@ -236,7 +236,7 @@ namespace Tensorflow
public static Tensor tensor_array_write_v3(Tensor handle, Tensor index, Tensor value, Tensor flow_in, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("TensorArrayWriteV3", name, new
+ var _op = tf.OpDefLib._apply_op_helper("TensorArrayWriteV3", name, new
{
handle,
index,
@@ -249,7 +249,7 @@ namespace Tensorflow
public static Tensor tensor_array_size_v3(Tensor handle, Tensor flow_in, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("TensorArraySizeV3", name, new
+ var _op = tf.OpDefLib._apply_op_helper("TensorArraySizeV3", name, new
{
handle,
flow_in
@@ -261,7 +261,7 @@ namespace Tensorflow
public static Tensor tensor_array_gather_v3(Tensor handle, Tensor indices, Tensor flow_in,
TF_DataType dtype, TensorShape element_shape = null, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("TensorArrayGatherV3", name, new
+ var _op = tf.OpDefLib._apply_op_helper("TensorArrayGatherV3", name, new
{
handle,
indices,
@@ -276,7 +276,7 @@ namespace Tensorflow
public static Tensor stack_v2(Tensor max_size, TF_DataType elem_type, string stack_name = "",
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("StackV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("StackV2", name, new
{
max_size,
elem_type,
@@ -289,7 +289,7 @@ namespace Tensorflow
public static Tensor stack_push_v2(Tensor handle, Tensor elem, bool swap_memory = false,
string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("StackPushV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("StackPushV2", name, new
{
handle,
elem,
@@ -301,7 +301,7 @@ namespace Tensorflow
public static Tensor stack_pop_v2(Tensor handle, TF_DataType elem_type, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("StackPopV2", name, new
+ var _op = tf.OpDefLib._apply_op_helper("StackPopV2", name, new
{
handle,
elem_type
diff --git a/src/TensorFlowNET.Core/Operations/gen_image_ops.cs b/src/TensorFlowNET.Core/Operations/gen_image_ops.cs
index 173b8e3a..0775f736 100644
--- a/src/TensorFlowNET.Core/Operations/gen_image_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_image_ops.cs
@@ -65,13 +65,13 @@ namespace Tensorflow
string name = null)
{
// Add nodes to the TensorFlow graph.
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
throw new NotImplementedException("decode_jpeg");
}
else
{
- var _op = tf._op_def_lib._apply_op_helper("DecodeJpeg", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("DecodeJpeg", name: name, args: new
{
contents,
channels,
@@ -90,13 +90,13 @@ namespace Tensorflow
string name = null)
{
// Add nodes to the TensorFlow graph.
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
throw new NotImplementedException("decode_gif");
}
else
{
- var _op = tf._op_def_lib._apply_op_helper("DecodeGif", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("DecodeGif", name: name, args: new
{
contents
});
@@ -111,13 +111,13 @@ namespace Tensorflow
string name = null)
{
// Add nodes to the TensorFlow graph.
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
throw new NotImplementedException("decode_png");
}
else
{
- var _op = tf._op_def_lib._apply_op_helper("DecodePng", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("DecodePng", name: name, args: new
{
contents,
channels,
@@ -133,13 +133,13 @@ namespace Tensorflow
string name = null)
{
// Add nodes to the TensorFlow graph.
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
throw new NotImplementedException("decode_bmp");
}
else
{
- var _op = tf._op_def_lib._apply_op_helper("DecodeBmp", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("DecodeBmp", name: name, args: new
{
contents,
channels
@@ -151,13 +151,13 @@ namespace Tensorflow
public static Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
throw new NotImplementedException("resize_bilinear");
}
else
{
- var _op = tf._op_def_lib._apply_op_helper("ResizeBilinear", name: name, args: new
+ var _op = tf.OpDefLib._apply_op_helper("ResizeBilinear", name: name, args: new
{
images,
size,
@@ -171,7 +171,7 @@ namespace Tensorflow
public static Tensor resize_nearest_neighbor(Tensor images, Tsize size, bool align_corners = false,
bool half_pixel_centers = false, string name = null)
{
- var op = tf._op_def_lib._apply_op_helper("ResizeNearestNeighbor", name: name, args: new
+ var op = tf.OpDefLib._apply_op_helper("ResizeNearestNeighbor", name: name, args: new
{
images,
size,
@@ -185,7 +185,7 @@ namespace Tensorflow
public static Tensor resize_nearest_neighbor_grad(Tensor grads, Tsize size, bool align_corners = false,
bool half_pixel_centers = false, string name = null)
{
- var op = tf._op_def_lib._apply_op_helper("ResizeNearestNeighborGrad", name: name, args: new
+ var op = tf.OpDefLib._apply_op_helper("ResizeNearestNeighborGrad", name: name, args: new
{
grads,
size,
diff --git a/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs b/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs
index 8d4353f8..e9a31442 100644
--- a/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs
@@ -26,7 +26,7 @@ namespace Tensorflow
if (!summarize.HasValue)
summarize = 3;
- var _op = tf._op_def_lib._apply_op_helper("Assert", name, args: new { condition, data, summarize });
+ var _op = tf.OpDefLib._apply_op_helper("Assert", name, args: new { condition, data, summarize });
return _op;
}
@@ -34,7 +34,7 @@ namespace Tensorflow
public static Tensor histogram_summary(string tag, Tensor values, string name = null)
{
var dict = new Dictionary();
- var op = tf._op_def_lib._apply_op_helper("HistogramSummary", name: name, args: new { tag, values });
+ var op = tf.OpDefLib._apply_op_helper("HistogramSummary", name: name, args: new { tag, values });
return op.output;
}
@@ -63,7 +63,7 @@ namespace Tensorflow
var dict = new Dictionary();
dict["tags"] = tags;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("ScalarSummary", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ScalarSummary", name: name, keywords: dict);
return op.output;
}
@@ -94,7 +94,7 @@ namespace Tensorflow
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = tf._op_def_lib._apply_op_helper("MergeSummary", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MergeSummary", name: name, keywords: dict);
return op.output;
}
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
index b057cd15..6ec7e261 100644
--- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
@@ -17,6 +17,7 @@
using System;
using System.Linq;
using System.Runtime.InteropServices;
+using Tensorflow.Contexts;
using Tensorflow.Eager;
using static Tensorflow.Binding;
@@ -26,7 +27,7 @@ namespace Tensorflow
{
public static Tensor _all(Tensor input, Tensor axis, bool keep_dims = false, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
return _op.outputs[0];
}
@@ -39,16 +40,16 @@ namespace Tensorflow
///
public static Tensor add_n(Tensor[] inputs, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"AddN", name,
null,
new[] { inputs });
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("AddN", name, args: new { inputs });
+ var _op = tf.OpDefLib._apply_op_helper("AddN", name, args: new { inputs });
return _op.outputs[0];
}
@@ -63,9 +64,9 @@ namespace Tensorflow
///
public static Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"ArgMax", name,
null,
input, dimension,
@@ -74,7 +75,7 @@ namespace Tensorflow
return results[0];
}
- return tf._op_def_lib._apply_op_helper("ArgMax", name, args: new { input, dimension, output_type }).output;
+ return tf.OpDefLib._apply_op_helper("ArgMax", name, args: new { input, dimension, output_type }).output;
}
///
@@ -86,7 +87,7 @@ namespace Tensorflow
///
///
public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type= TF_DataType.TF_INT64, string name= null)
- => tf._op_def_lib._apply_op_helper("ArgMin", name, args: new { input, dimension, output_type }).outputs[0];
+ => tf.OpDefLib._apply_op_helper("ArgMin", name, args: new { input, dimension, output_type }).outputs[0];
///
/// Computes Psi, the derivative of Lgamma (the log of the absolute value of
@@ -96,7 +97,7 @@ namespace Tensorflow
///
///
public static Tensor digamma(Tensor x, string name = null)
- => tf._op_def_lib._apply_op_helper("Digamma", name, args: new { x }).output;
+ => tf.OpDefLib._apply_op_helper("Digamma", name, args: new { x }).output;
///
/// Returns 0 if the denominator is zero.
@@ -118,7 +119,7 @@ namespace Tensorflow
///
public static Tensor div_no_nan(Tensor x, Tensor y, string name = null)
{
- var op = tf._op_def_lib._apply_op_helper("DivNoNan", name: name, args: new { x, y });
+ var op = tf.OpDefLib._apply_op_helper("DivNoNan", name: name, args: new { x, y });
return op.output;
}
@@ -137,9 +138,9 @@ namespace Tensorflow
/// A `Tensor`. Has the same type as `input`.
public static Tensor mean(T1 input, T2 axis, bool keep_dims= false, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Mean", name,
null,
input, axis,
@@ -148,40 +149,40 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
return _op.output;
}
public static Tensor mean(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- return mean_eager_fallback(inputs, axis, keep_dims: keep_dims, name: name, ctx: tf.context);
+ return mean_eager_fallback(inputs, axis, keep_dims: keep_dims, name: name, ctx: tf.Context);
}
- var _op = tf._op_def_lib._apply_op_helper("Mean", name, args: new { inputs, reduction_indices = axis, keep_dims = keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("Mean", name, args: new { inputs, reduction_indices = axis, keep_dims = keep_dims });
return _op.output;
}
private static Tensor mean_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null)
{
- var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: new[] { inputs });
- var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new[] { axis });
+ var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs });
+ var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { axis });
var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };
- return tf._execute.execute(ctx, "Mean", 1, _inputs_flat, _attrs, name: name)[0];
+ return tf.Runner.Execute(ctx, "Mean", 1, _inputs_flat, _attrs, name: name)[0];
}
public static Tensor prod(T1 input, T2 axis, bool keep_dims = false, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
try
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Prod", name,
null,
input, axis,
@@ -191,58 +192,58 @@ namespace Tensorflow
}
catch (Exception)
{
- return prod_eager_fallback(input as Tensor, axis as int[], keep_dims, name, tf.context);
+ return prod_eager_fallback(input as Tensor, axis as int[], keep_dims, name, tf.Context);
}
}
- var _op = tf._op_def_lib._apply_op_helper("Prod", name, args: new { input, reduction_indices = axis, keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("Prod", name, args: new { input, reduction_indices = axis, keep_dims });
return _op.output;
}
private static Tensor prod_eager_fallback(Tensor input_t, int[] axis, bool keep_dims, string name, Context ctx = null)
{
- var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: new[] { input_t });
- var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new[] { axis });
+ var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { input_t });
+ var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { axis });
var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };
- return tf._execute.execute(ctx, "Prod", 1, _inputs_flat, _attrs, name: name)[0];
+ return tf.Runner.Execute(ctx, "Prod", 1, _inputs_flat, _attrs, name: name)[0];
}
public static Tensor acos(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Acos", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Acos", name, args: new { x });
return _op.outputs[0];
}
public static Tensor asin(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Asin", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Asin", name, args: new { x });
return _op.outputs[0];
}
public static Tensor add(Tensor x, Tensor y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Add", name, null,
x, y);
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Add", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Add", name, args: new { x, y });
return _op.output;
}
public static Tensor add(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Add", name,
null,
x, y);
@@ -250,7 +251,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Add", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Add", name, args: new { x, y });
return _op.output;
}
@@ -258,39 +259,39 @@ namespace Tensorflow
public static Tensor add_v2(Tx x, Ty y, string name = null)
{
// forward_compatible(2019, 6, 25):
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"AddV2", name,
null,
x, y);
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("AddV2", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("AddV2", name, args: new { x, y });
return _op.output;
}
public static Tensor atan(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Atan", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Atan", name, args: new { x });
return _op.outputs[0];
}
public static Tensor ceil(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Ceil", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Ceil", name, args: new { x });
return _op.outputs[0];
}
public static Tensor sin(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Sin", name,
null,
x);
@@ -298,7 +299,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Sin", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Sin", name, args: new { x });
return _op.outputs[0];
}
@@ -319,9 +320,9 @@ namespace Tensorflow
///
public static Tensor sigmoid(Tensor x, string name = "Sigmoid")
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Sigmoid", name,
null,
x);
@@ -329,7 +330,7 @@ namespace Tensorflow
return results[0];
}
- var op = tf._op_def_lib._apply_op_helper("Sigmoid", name: name, new { x });
+ var op = tf.OpDefLib._apply_op_helper("Sigmoid", name: name, new { x });
return op.output;
}
@@ -353,42 +354,42 @@ namespace Tensorflow
///
public static Tensor sigmoid_grad(Tensor y, Tensor dy, string name = "SigmoidGrad")
{
- var op = tf._op_def_lib._apply_op_helper("SigmoidGrad", name: name, args: new { y, dy });
+ var op = tf.OpDefLib._apply_op_helper("SigmoidGrad", name: name, args: new { y, dy });
return op.outputs[0];
}
public static Tensor sign(T x, string name = "Sign")
{
- var op = tf._op_def_lib._apply_op_helper("Sign", name: name, args: new {x});
+ var op = tf.OpDefLib._apply_op_helper("Sign", name: name, args: new {x});
return op.outputs[0];
}
public static Tensor sinh(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Sinh", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Sinh", name, args: new { x });
return _op.outputs[0];
}
public static Tensor cos(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Cos", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Cos", name, args: new { x });
return _op.outputs[0];
}
public static Tensor cosh(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Cosh", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Cosh", name, args: new { x });
return _op.outputs[0];
}
public static Tensor cumsum(Tensor x, T axis, bool exclusive = false, bool reverse = false, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Cumsum", name, args: new { x, axis, exclusive, reverse });
+ var _op = tf.OpDefLib._apply_op_helper("Cumsum", name, args: new { x, axis, exclusive, reverse });
return _op.outputs[0];
}
@@ -403,15 +404,15 @@ namespace Tensorflow
///
public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments });
+ var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments });
return _op.outputs[0];
}
public static Tensor tan(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Tan", name,
null,
x);
@@ -419,16 +420,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Tan", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Tan", name, args: new { x });
return _op.outputs[0];
}
public static Tensor tanh(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Tanh", name,
null,
x);
@@ -436,7 +437,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Tanh", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Tanh", name, args: new { x });
return _op.outputs[0];
}
@@ -450,9 +451,9 @@ namespace Tensorflow
///
public static Tensor tanh_grad(Tensor y, Tensor dy, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"TanhGrad", name,
null,
y, dy);
@@ -460,29 +461,29 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("TanhGrad", name: name, args: new { y, dy }).output;
+ var _op = tf.OpDefLib._apply_op_helper("TanhGrad", name: name, args: new { y, dy }).output;
return _op.outputs[0];
}
public static Tensor floor(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Floor", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Floor", name, args: new { x });
return _op.outputs[0];
}
public static Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max });
+ var _op = tf.OpDefLib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max });
return _op.outputs[0];
}
public static Tensor greater(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Greater", name,
null,
x, y);
@@ -490,7 +491,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Greater", name: name, args: new { x, y });
return _op.outputs[0];
}
@@ -508,16 +509,16 @@ namespace Tensorflow
///
public static Tensor lgamma(Tensor x, string name = null)
{
- var op = tf._op_def_lib._apply_op_helper("Lgamma", name: name, args: new { x });
+ var op = tf.OpDefLib._apply_op_helper("Lgamma", name: name, args: new { x });
return op.output;
}
public static Tensor greater_equal(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"GreaterEqual", name,
null,
x, y);
@@ -525,16 +526,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("GreaterEqual", name: name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("GreaterEqual", name: name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor less(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Less", name,
null,
x, y);
@@ -542,16 +543,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Less", name: name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Less", name: name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor less_equal(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"LessEqual", name,
null,
x, y);
@@ -559,35 +560,35 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("LessEqual", name: name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("LessEqual", name: name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor log1p(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Log1p", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Log1p", name, args: new { x });
return _op.outputs[0];
}
public static Tensor logical_and(Tensor x, Tensor y, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("LogicalAnd", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor logical_not(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("LogicalNot", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("LogicalNot", name, args: new { x });
return _op.outputs[0];
}
public static Tensor logical_or(Tensor x, Tensor y, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("LogicalOr", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("LogicalOr", name, args: new { x, y });
return _op.outputs[0];
}
@@ -602,7 +603,7 @@ namespace Tensorflow
public static Tensor squared_difference(Tensor x, Tensor y, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name });
+ var _op = tf.OpDefLib._apply_op_helper("SquaredDifference", name, args: new { x, y, name });
return _op.outputs[0];
}
@@ -615,9 +616,9 @@ namespace Tensorflow
/// A `Tensor`. Has the same type as `x`.
public static Tensor square(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Square", name,
null,
x);
@@ -625,7 +626,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Square", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Square", name, args: new { x });
return _op.outputs[0];
}
@@ -638,14 +639,14 @@ namespace Tensorflow
/// A `Tensor` of type `bool`.
public static Tensor is_finite(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("IsFinite", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("IsFinite", name, args: new { x });
return _op.outputs[0];
}
public static Tensor is_nan(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("IsNan", name: name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("IsNan", name: name, args: new { x });
return _op.outputs[0];
}
@@ -658,7 +659,7 @@ namespace Tensorflow
/// A `Tensor`. Has the same type as `x`.
public static Tensor exp(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Exp", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Exp", name, args: new { x });
return _op.outputs[0];
}
@@ -671,9 +672,9 @@ namespace Tensorflow
/// A `Tensor`. Has the same type as `x`.
public static Tensor log(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Log", name,
null,
x);
@@ -681,16 +682,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Log", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Log", name, args: new { x });
return _op.outputs[0];
}
public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Cast", name,
null,
x,
@@ -699,16 +700,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate });
+ var _op = tf.OpDefLib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate });
return _op.outputs[0];
}
public static Tensor neg(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Neg", name,
null,
x);
@@ -716,16 +717,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Neg", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Neg", name, args: new { x });
return _op.outputs[0];
}
public static Tensor sqrt(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Sqrt", name,
null,
x);
@@ -733,32 +734,32 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Sqrt", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Sqrt", name, args: new { x });
return _op.outputs[0];
}
public static Tensor sub(Tensor x, Tensor y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Sub", name,
null,
x, y);
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Sub", name, args: new { x, y });
return _op.output;
}
public static Tensor sub(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Sub", name,
null,
x, y);
@@ -766,7 +767,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Sub", name, args: new { x, y });
return _op.outputs[0];
}
@@ -780,9 +781,9 @@ namespace Tensorflow
///
public static Tensor equal(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Equal", name,
null,
x, y);
@@ -790,7 +791,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Equal", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Equal", name, args: new { x, y });
return _op.output;
}
@@ -805,9 +806,9 @@ namespace Tensorflow
///
public static Tensor not_equal(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"NotEqual", name,
null,
x, y);
@@ -815,16 +816,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("NotEqual", name, args: new { x, y });
return _op.output;
}
public static Tensor atan2(Tensor y, Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Atan2", name,
null,
y, x);
@@ -832,31 +833,31 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Atan2", name, args: new { y, x });
+ var _op = tf.OpDefLib._apply_op_helper("Atan2", name, args: new { y, x });
return _op.output;
}
public static Tensor mul(Tensor x, Tensor y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Mul", name,
null,
x, y);
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Mul", name, args: new { x, y });
return _op.output;
}
public static Tensor mul(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Mul", name,
null,
x, y);
@@ -864,39 +865,39 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Mul", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor mul_no_nan(Tx x, Ty y, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("MulNoNan", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("MulNoNan", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor real_div(Tensor x, Tensor y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"RealDiv", name,
null,
x, y);
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("RealDiv", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor reciprocal(Tensor x, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Reciprocal", name,
null,
x);
@@ -904,16 +905,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Reciprocal", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Reciprocal", name, args: new { x });
return _op.outputs[0];
}
public static Tensor floor_mod(Tensor x, Tensor y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"FloorMod", name,
null,
x, y);
@@ -921,16 +922,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("FloorMod", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor floor_div(Tensor x, Tensor y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"FloorDiv", name,
null,
x, y);
@@ -938,7 +939,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("FloorDiv", name, args: new { x, y });
return _op.outputs[0];
}
@@ -954,9 +955,9 @@ namespace Tensorflow
///
public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"MatMul", name,
null,
a, b,
@@ -964,7 +965,7 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b });
+ var _op = tf.OpDefLib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b });
return _op.output;
}
@@ -996,7 +997,7 @@ namespace Tensorflow
///
public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper(
+ var _op = tf.OpDefLib._apply_op_helper(
"BatchMatMul",
name,
args: new { x, y, adj_x, adj_y });
@@ -1013,9 +1014,9 @@ namespace Tensorflow
///
public static Tensor maximum(T1 x, T2 y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Maximum", name,
null,
x, y);
@@ -1023,16 +1024,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Maximum", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Maximum", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor minimum(T1 x, T2 y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Minimum", name,
null,
x, y);
@@ -1040,44 +1041,44 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Minimum", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Minimum", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor _abs(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Abs", name, args: new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Abs", name, args: new { x });
return _op.output;
}
public static Tensor _any(Tx input, Ty axis, bool keep_dims = false, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
public static Tensor _max(Tx input, Ty axis, bool keep_dims=false, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Max", name, new { input, reduction_indices = axis, keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("Max", name, new { input, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
public static Tensor _min(Tx input, Ty axis, bool keep_dims = false, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Min", name, new { input, reduction_indices = axis, keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("Min", name, new { input, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
public static Tensor pow(Tx x, Ty y, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Pow", name,
null,
x, y);
@@ -1085,16 +1086,16 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Pow", name, args: new { x, y });
+ var _op = tf.OpDefLib._apply_op_helper("Pow", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor _sum(Tx input, Ty axis = default, bool keep_dims = false, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Sum", name,
null,
input, axis,
@@ -1103,32 +1104,32 @@ namespace Tensorflow
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
public static Tensor _sum(Tensor[] inputs, Tensor axis = default, bool keep_dims = false, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
return _sum_eager_fallback(inputs, axis,
- keep_dims: keep_dims, name: name, ctx: tf.context);
+ keep_dims: keep_dims, name: name, ctx: tf.Context);
}
- var _op = tf._op_def_lib._apply_op_helper("Sum", name, args: new { inputs, reduction_indices = axis, keep_dims });
+ var _op = tf.OpDefLib._apply_op_helper("Sum", name, args: new { inputs, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
private static Tensor _sum_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null)
{
- var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: new[] { inputs });
- var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, tf.int32, new[] { axis });
+ var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs });
+ var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, tf.int32, new[] { axis });
var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };
- return tf._execute.execute(ctx, "Sum", 1, _inputs_flat, _attrs, name: name)[0];
+ return tf.Runner.Execute(ctx, "Sum", 1, _inputs_flat, _attrs, name: name)[0];
}
///
@@ -1141,16 +1142,16 @@ namespace Tensorflow
///
public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null)
{
- if (tf.context.executing_eagerly())
+ if (tf.Context.executing_eagerly())
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Range", name,
null,
start, limit, delta);
return results[0];
}
- var _op = tf._op_def_lib._apply_op_helper("Range", name, new { start, limit, delta });
+ var _op = tf.OpDefLib._apply_op_helper("Range", name, new { start, limit, delta });
return _op.outputs[0];
}
@@ -1172,7 +1173,7 @@ namespace Tensorflow
///
public static Tensor round(Tensor x, string name = "Round")
{
- var op = tf._op_def_lib._apply_op_helper("Round", name: name, new { x });
+ var op = tf.OpDefLib._apply_op_helper("Round", name: name, new { x });
return op.output;
}
@@ -1185,7 +1186,7 @@ namespace Tensorflow
///
public static Tensor rsqrt(Tensor x, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("Rsqrt", name, new { x });
+ var _op = tf.OpDefLib._apply_op_helper("Rsqrt", name, new { x });
return _op.outputs[0];
}
@@ -1198,7 +1199,7 @@ namespace Tensorflow
/// The fraction of zeros in value, with type float32.
public static Tensor zero_fraction(Tensor value, string name = null)
{
- var _op = tf._op_def_lib._apply_op_helper("zero_fraction", name, new { value, name });
+ var _op = tf.OpDefLib._apply_op_helper("zero_fraction", name, new { value, name });
return _op.outputs[0];
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs
index 1991c4b1..dc86c1e5 100644
--- a/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs
@@ -11,7 +11,7 @@ namespace Tensorflow
{
public static Tensor mul(IntPtr x, IntPtr y, string name = null)
{
- var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
"Mul", name,
null,
x, y);
diff --git a/src/TensorFlowNET.Core/Operations/gen_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ops.cs
index 386537b7..98e61957 100644
--- a/src/TensorFlowNET.Core/Operations/gen_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_ops.cs
@@ -33,7 +33,7 @@ namespace Tensorflow.Operations
dict["error_msg"] = error_msg;
if (exit_without_error.HasValue)
dict["exit_without_error"] = exit_without_error.Value;
- var op = tf._op_def_lib._apply_op_helper("Abort", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Abort", name: name, keywords: dict);
return op;
}
@@ -57,7 +57,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Abs", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Abs", name: name, keywords: dict);
return op.output;
}
@@ -92,7 +92,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["inputs"] = inputs;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("AccumulateNV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AccumulateNV2", name: name, keywords: dict);
return op.output;
}
@@ -123,7 +123,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
dict["local_step"] = local_step;
dict["gradient"] = gradient;
- var op = tf._op_def_lib._apply_op_helper("AccumulatorApplyGradient", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AccumulatorApplyGradient", name: name, keywords: dict);
return op;
}
@@ -144,7 +144,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = tf._op_def_lib._apply_op_helper("AccumulatorNumAccumulated", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AccumulatorNumAccumulated", name: name, keywords: dict);
return op.output;
}
@@ -172,7 +172,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["handle"] = handle;
dict["new_global_step"] = new_global_step;
- var op = tf._op_def_lib._apply_op_helper("AccumulatorSetGlobalStep", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AccumulatorSetGlobalStep", name: name, keywords: dict);
return op;
}
@@ -210,7 +210,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
dict["num_required"] = num_required;
dict["dtype"] = dtype;
- var op = tf._op_def_lib._apply_op_helper("AccumulatorTakeGradient", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AccumulatorTakeGradient", name: name, keywords: dict);
return op.output;
}
@@ -229,7 +229,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Acos", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Acos", name: name, keywords: dict);
return op.output;
}
@@ -248,7 +248,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Acosh", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Acosh", name: name, keywords: dict);
return op.output;
}
@@ -274,7 +274,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Add", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Add", name: name, keywords: dict);
return op.output;
}
@@ -343,7 +343,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("AddManySparseToTensorsMap", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AddManySparseToTensorsMap", name: name, keywords: dict);
return op.output;
}
@@ -363,7 +363,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = tf._op_def_lib._apply_op_helper("AddN", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AddN", name: name, keywords: dict);
return op.output;
}
@@ -420,7 +420,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("AddSparseToTensorsMap", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AddSparseToTensorsMap", name: name, keywords: dict);
return op.output;
}
@@ -446,7 +446,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("AddV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AddV2", name: name, keywords: dict);
return op.output;
}
@@ -474,7 +474,7 @@ namespace Tensorflow.Operations
dict["contrast_factor"] = contrast_factor;
dict["min_value"] = min_value;
dict["max_value"] = max_value;
- var op = tf._op_def_lib._apply_op_helper("AdjustContrast", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AdjustContrast", name: name, keywords: dict);
return op.output;
}
@@ -510,7 +510,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["images"] = images;
dict["contrast_factor"] = contrast_factor;
- var op = tf._op_def_lib._apply_op_helper("AdjustContrastv2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AdjustContrastv2", name: name, keywords: dict);
return op.output;
}
@@ -543,7 +543,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["images"] = images;
dict["delta"] = delta;
- var op = tf._op_def_lib._apply_op_helper("AdjustHue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AdjustHue", name: name, keywords: dict);
return op.output;
}
@@ -576,7 +576,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["images"] = images;
dict["scale"] = scale;
- var op = tf._op_def_lib._apply_op_helper("AdjustSaturation", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AdjustSaturation", name: name, keywords: dict);
return op.output;
}
@@ -613,7 +613,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = tf._op_def_lib._apply_op_helper("All", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("All", name: name, keywords: dict);
return op.output;
}
@@ -684,7 +684,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("AllCandidateSampler", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AllCandidateSampler", name: name, keywords: dict);
int _idx = 0;
var sampled_candidates = op.outputs[_idx++];
var true_expected_count = op.outputs[_idx++];
@@ -730,7 +730,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (Tout.HasValue)
dict["Tout"] = Tout.Value;
- var op = tf._op_def_lib._apply_op_helper("Angle", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Angle", name: name, keywords: dict);
return op.output;
}
@@ -758,7 +758,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("AnonymousIterator", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AnonymousIterator", name: name, keywords: dict);
return op.output;
}
@@ -795,7 +795,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = tf._op_def_lib._apply_op_helper("Any", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Any", name: name, keywords: dict);
return op.output;
}
@@ -860,7 +860,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyAdaMax", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyAdaMax", name: name, keywords: dict);
return op.output;
}
@@ -917,7 +917,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyAdadelta", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyAdadelta", name: name, keywords: dict);
return op.output;
}
@@ -965,7 +965,7 @@ namespace Tensorflow.Operations
dict["use_locking"] = use_locking.Value;
if (update_slots.HasValue)
dict["update_slots"] = update_slots.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyAdagrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyAdagrad", name: name, keywords: dict);
return op.output;
}
@@ -1020,7 +1020,7 @@ namespace Tensorflow.Operations
dict["global_step"] = global_step;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyAdagradDA", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyAdagradDA", name: name, keywords: dict);
return op.output;
}
@@ -1095,7 +1095,7 @@ namespace Tensorflow.Operations
dict["use_locking"] = use_locking.Value;
if (use_nesterov.HasValue)
dict["use_nesterov"] = use_nesterov.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyAdam", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyAdam", name: name, keywords: dict);
return op.output;
}
@@ -1152,7 +1152,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyAddSign", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyAddSign", name: name, keywords: dict);
return op.output;
}
@@ -1231,7 +1231,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyCenteredRMSProp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyCenteredRMSProp", name: name, keywords: dict);
return op.output;
}
@@ -1294,7 +1294,7 @@ namespace Tensorflow.Operations
dict["lr_power"] = lr_power;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyFtrl", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyFtrl", name: name, keywords: dict);
return op.output;
}
@@ -1362,7 +1362,7 @@ namespace Tensorflow.Operations
dict["lr_power"] = lr_power;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyFtrlV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyFtrlV2", name: name, keywords: dict);
return op.output;
}
@@ -1397,7 +1397,7 @@ namespace Tensorflow.Operations
dict["delta"] = delta;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyGradientDescent", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyGradientDescent", name: name, keywords: dict);
return op.output;
}
@@ -1454,7 +1454,7 @@ namespace Tensorflow.Operations
dict["use_locking"] = use_locking.Value;
if (use_nesterov.HasValue)
dict["use_nesterov"] = use_nesterov.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyMomentum", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyMomentum", name: name, keywords: dict);
return op.output;
}
@@ -1511,7 +1511,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyPowerSign", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyPowerSign", name: name, keywords: dict);
return op.output;
}
@@ -1563,7 +1563,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyProximalAdagrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyProximalAdagrad", name: name, keywords: dict);
return op.output;
}
@@ -1610,7 +1610,7 @@ namespace Tensorflow.Operations
dict["delta"] = delta;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyProximalGradientDescent", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyProximalGradientDescent", name: name, keywords: dict);
return op.output;
}
@@ -1677,7 +1677,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("ApplyRMSProp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApplyRMSProp", name: name, keywords: dict);
return op.output;
}
@@ -1703,7 +1703,7 @@ namespace Tensorflow.Operations
dict["y"] = y;
if (tolerance.HasValue)
dict["tolerance"] = tolerance.Value;
- var op = tf._op_def_lib._apply_op_helper("ApproximateEqual", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ApproximateEqual", name: name, keywords: dict);
return op.output;
}
@@ -1735,7 +1735,7 @@ namespace Tensorflow.Operations
dict["dimension"] = dimension;
if (output_type.HasValue)
dict["output_type"] = output_type.Value;
- var op = tf._op_def_lib._apply_op_helper("ArgMax", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ArgMax", name: name, keywords: dict);
return op.output;
}
@@ -1767,7 +1767,7 @@ namespace Tensorflow.Operations
dict["dimension"] = dimension;
if (output_type.HasValue)
dict["output_type"] = output_type.Value;
- var op = tf._op_def_lib._apply_op_helper("ArgMin", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ArgMin", name: name, keywords: dict);
return op.output;
}
@@ -1819,7 +1819,7 @@ namespace Tensorflow.Operations
dict["width"] = width.Value;
if (fill != null)
dict["fill"] = fill;
- var op = tf._op_def_lib._apply_op_helper("AsString", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AsString", name: name, keywords: dict);
return op.output;
}
@@ -1838,7 +1838,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Asin", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Asin", name: name, keywords: dict);
return op.output;
}
@@ -1857,7 +1857,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Asinh", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Asinh", name: name, keywords: dict);
return op.output;
}
@@ -1890,7 +1890,7 @@ namespace Tensorflow.Operations
dict["data"] = data;
if (summarize.HasValue)
dict["summarize"] = summarize.Value;
- var op = tf._op_def_lib._apply_op_helper("Assert", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Assert", name: name, keywords: dict);
return op;
}
@@ -1933,7 +1933,7 @@ namespace Tensorflow.Operations
dict["validate_shape"] = validate_shape.Value;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("Assign", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Assign", name: name, keywords: dict);
return op.output;
}
@@ -1969,7 +1969,7 @@ namespace Tensorflow.Operations
dict["value"] = value;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("AssignAdd", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AssignAdd", name: name, keywords: dict);
return op.output;
}
@@ -1997,7 +1997,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource"] = resource;
dict["value"] = value;
- var op = tf._op_def_lib._apply_op_helper("AssignAddVariableOp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AssignAddVariableOp", name: name, keywords: dict);
return op;
}
@@ -2033,7 +2033,7 @@ namespace Tensorflow.Operations
dict["value"] = value;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = tf._op_def_lib._apply_op_helper("AssignSub", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AssignSub", name: name, keywords: dict);
return op.output;
}
@@ -2061,7 +2061,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource"] = resource;
dict["value"] = value;
- var op = tf._op_def_lib._apply_op_helper("AssignSubVariableOp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AssignSubVariableOp", name: name, keywords: dict);
return op;
}
@@ -2089,7 +2089,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource"] = resource;
dict["value"] = value;
- var op = tf._op_def_lib._apply_op_helper("AssignVariableOp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AssignVariableOp", name: name, keywords: dict);
return op;
}
@@ -2108,7 +2108,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Atan", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Atan", name: name, keywords: dict);
return op.output;
}
@@ -2137,7 +2137,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["y"] = y;
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Atan2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Atan2", name: name, keywords: dict);
return op.output;
}
@@ -2156,7 +2156,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Atanh", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Atanh", name: name, keywords: dict);
return op.output;
}
@@ -2221,7 +2221,7 @@ namespace Tensorflow.Operations
dict["stride"] = stride;
if (magnitude_squared.HasValue)
dict["magnitude_squared"] = magnitude_squared.Value;
- var op = tf._op_def_lib._apply_op_helper("AudioSpectrogram", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AudioSpectrogram", name: name, keywords: dict);
return op.output;
}
@@ -2269,7 +2269,7 @@ namespace Tensorflow.Operations
dict["sample_rate"] = sample_rate;
if (max_outputs.HasValue)
dict["max_outputs"] = max_outputs.Value;
- var op = tf._op_def_lib._apply_op_helper("AudioSummary", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AudioSummary", name: name, keywords: dict);
return op.output;
}
@@ -2316,7 +2316,7 @@ namespace Tensorflow.Operations
dict["sample_rate"] = sample_rate;
if (max_outputs.HasValue)
dict["max_outputs"] = max_outputs.Value;
- var op = tf._op_def_lib._apply_op_helper("AudioSummaryV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AudioSummaryV2", name: name, keywords: dict);
return op.output;
}
@@ -2365,7 +2365,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("AvgPool", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AvgPool", name: name, keywords: dict);
return op.output;
}
@@ -2412,7 +2412,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("AvgPool3D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AvgPool3D", name: name, keywords: dict);
return op.output;
}
@@ -2463,7 +2463,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("AvgPool3DGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AvgPool3DGrad", name: name, keywords: dict);
return op.output;
}
@@ -2513,7 +2513,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("AvgPoolGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("AvgPoolGrad", name: name, keywords: dict);
return op.output;
}
@@ -2570,7 +2570,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("Barrier", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Barrier", name: name, keywords: dict);
return op.output;
}
@@ -2605,7 +2605,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
if (cancel_pending_enqueues.HasValue)
dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value;
- var op = tf._op_def_lib._apply_op_helper("BarrierClose", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BarrierClose", name: name, keywords: dict);
return op;
}
@@ -2627,7 +2627,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = tf._op_def_lib._apply_op_helper("BarrierIncompleteSize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BarrierIncompleteSize", name: name, keywords: dict);
return op.output;
}
@@ -2667,7 +2667,7 @@ namespace Tensorflow.Operations
dict["keys"] = keys;
dict["values"] = values;
dict["component_index"] = component_index;
- var op = tf._op_def_lib._apply_op_helper("BarrierInsertMany", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BarrierInsertMany", name: name, keywords: dict);
return op;
}
@@ -2689,7 +2689,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = tf._op_def_lib._apply_op_helper("BarrierReadySize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BarrierReadySize", name: name, keywords: dict);
return op.output;
}
@@ -2752,7 +2752,7 @@ namespace Tensorflow.Operations
dict["wait_for_incomplete"] = wait_for_incomplete.Value;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("BarrierTakeMany", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BarrierTakeMany", name: name, keywords: dict);
int _idx = 0;
var indices = op.outputs[_idx++];
var keys = op.outputs[_idx++];
@@ -2853,7 +2853,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (batching_queue != null)
dict["batching_queue"] = batching_queue;
- var op = tf._op_def_lib._apply_op_helper("Batch", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Batch", name: name, keywords: dict);
int _idx = 0;
var batched_tensors = Enumerable.Range(0, op.OutputListLength("batched_tensors")).Select(_ => op.outputs[_idx++]).ToArray();
var batch_index = op.outputs[_idx++];
@@ -2889,7 +2889,7 @@ namespace Tensorflow.Operations
dict["batch_size"] = batch_size;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("BatchDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BatchDataset", name: name, keywords: dict);
return op.output;
}
@@ -2925,7 +2925,7 @@ namespace Tensorflow.Operations
dict["drop_remainder"] = drop_remainder;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("BatchDatasetV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BatchDatasetV2", name: name, keywords: dict);
return op.output;
}
@@ -2980,7 +2980,7 @@ namespace Tensorflow.Operations
dict["adj_x"] = adj_x.Value;
if (adj_y.HasValue)
dict["adj_y"] = adj_y.Value;
- var op = tf._op_def_lib._apply_op_helper("BatchMatMul", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BatchMatMul", name: name, keywords: dict);
return op.output;
}
@@ -3037,7 +3037,7 @@ namespace Tensorflow.Operations
dict["gamma"] = gamma;
dict["variance_epsilon"] = variance_epsilon;
dict["scale_after_normalization"] = scale_after_normalization;
- var op = tf._op_def_lib._apply_op_helper("BatchNormWithGlobalNormalization", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BatchNormWithGlobalNormalization", name: name, keywords: dict);
return op.output;
}
@@ -3099,7 +3099,7 @@ namespace Tensorflow.Operations
dict["backprop"] = backprop;
dict["variance_epsilon"] = variance_epsilon;
dict["scale_after_normalization"] = scale_after_normalization;
- var op = tf._op_def_lib._apply_op_helper("BatchNormWithGlobalNormalizationGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BatchNormWithGlobalNormalizationGrad", name: name, keywords: dict);
int _idx = 0;
var dx = op.outputs[_idx++];
var dm = op.outputs[_idx++];
@@ -3216,7 +3216,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["crops"] = crops;
dict["block_size"] = block_size;
- var op = tf._op_def_lib._apply_op_helper("BatchToSpace", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BatchToSpace", name: name, keywords: dict);
return op.output;
}
@@ -3361,7 +3361,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["block_shape"] = block_shape;
dict["crops"] = crops;
- var op = tf._op_def_lib._apply_op_helper("BatchToSpaceND", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name: name, keywords: dict);
return op.output;
}
@@ -3386,7 +3386,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("BesselI0e", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BesselI0e", name: name, keywords: dict);
return op.output;
}
@@ -3411,7 +3411,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("BesselI1e", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BesselI1e", name: name, keywords: dict);
return op.output;
}
@@ -3451,7 +3451,7 @@ namespace Tensorflow.Operations
dict["a"] = a;
dict["b"] = b;
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Betainc", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Betainc", name: name, keywords: dict);
return op.output;
}
@@ -3491,7 +3491,7 @@ namespace Tensorflow.Operations
dict["bias"] = bias;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("BiasAdd", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BiasAdd", name: name, keywords: dict);
return op.output;
}
@@ -3528,7 +3528,7 @@ namespace Tensorflow.Operations
dict["out_backprop"] = out_backprop;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("BiasAddGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BiasAddGrad", name: name, keywords: dict);
return op.output;
}
@@ -3559,7 +3559,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["value"] = value;
dict["bias"] = bias;
- var op = tf._op_def_lib._apply_op_helper("BiasAddV1", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BiasAddV1", name: name, keywords: dict);
return op.output;
}
@@ -3600,7 +3600,7 @@ namespace Tensorflow.Operations
dict["arr"] = arr;
dict["size"] = size;
dict["weights"] = weights;
- var op = tf._op_def_lib._apply_op_helper("Bincount", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Bincount", name: name, keywords: dict);
return op.output;
}
@@ -3637,7 +3637,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["type"] = type;
- var op = tf._op_def_lib._apply_op_helper("Bitcast", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Bitcast", name: name, keywords: dict);
return op.output;
}
@@ -3663,7 +3663,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("BitwiseAnd", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BitwiseAnd", name: name, keywords: dict);
return op.output;
}
@@ -3689,7 +3689,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("BitwiseOr", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BitwiseOr", name: name, keywords: dict);
return op.output;
}
@@ -3715,7 +3715,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("BitwiseXor", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BitwiseXor", name: name, keywords: dict);
return op.output;
}
@@ -3776,7 +3776,7 @@ namespace Tensorflow.Operations
dict["tree_complexity"] = tree_complexity;
dict["min_node_weight"] = min_node_weight;
dict["max_splits"] = max_splits;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesCalculateBestGainsPerFeature", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesCalculateBestGainsPerFeature", name: name, keywords: dict);
int _idx = 0;
var node_ids_list = Enumerable.Range(0, op.OutputListLength("node_ids_list")).Select(_ => op.outputs[_idx++]).ToArray();
var gains_list = Enumerable.Range(0, op.OutputListLength("gains_list")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -3819,7 +3819,7 @@ namespace Tensorflow.Operations
dict["mean_hessians"] = mean_hessians;
dict["l1"] = l1;
dict["l2"] = l2;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesCenterBias", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesCenterBias", name: name, keywords: dict);
return op.output;
}
@@ -3847,7 +3847,7 @@ namespace Tensorflow.Operations
dict["tree_ensemble_handle"] = tree_ensemble_handle;
dict["stamp_token"] = stamp_token;
dict["tree_ensemble_serialized"] = tree_ensemble_serialized;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesCreateEnsemble", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesCreateEnsemble", name: name, keywords: dict);
return op;
}
@@ -3878,7 +3878,7 @@ namespace Tensorflow.Operations
dict["tree_ensemble_handle"] = tree_ensemble_handle;
dict["stamp_token"] = stamp_token;
dict["tree_ensemble_serialized"] = tree_ensemble_serialized;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesDeserializeEnsemble", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesDeserializeEnsemble", name: name, keywords: dict);
return op;
}
@@ -3902,7 +3902,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesEnsembleResourceHandleOp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesEnsembleResourceHandleOp", name: name, keywords: dict);
return op.output;
}
@@ -3938,7 +3938,7 @@ namespace Tensorflow.Operations
dict["tree_ensemble_handle"] = tree_ensemble_handle;
dict["bucketized_features"] = bucketized_features;
dict["logits_dimension"] = logits_dimension;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesExampleDebugOutputs", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesExampleDebugOutputs", name: name, keywords: dict);
return op.output;
}
@@ -3965,7 +3965,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["tree_ensemble_handle"] = tree_ensemble_handle;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesGetEnsembleStates", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesGetEnsembleStates", name: name, keywords: dict);
int _idx = 0;
var stamp_token = op.outputs[_idx++];
var num_trees = op.outputs[_idx++];
@@ -4017,7 +4017,7 @@ namespace Tensorflow.Operations
dict["bucketized_features_list"] = bucketized_features_list;
dict["max_splits"] = max_splits;
dict["num_buckets"] = num_buckets;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesMakeStatsSummary", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesMakeStatsSummary", name: name, keywords: dict);
return op.output;
}
@@ -4052,7 +4052,7 @@ namespace Tensorflow.Operations
dict["tree_ensemble_handle"] = tree_ensemble_handle;
dict["bucketized_features"] = bucketized_features;
dict["logits_dimension"] = logits_dimension;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesPredict", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesPredict", name: name, keywords: dict);
return op.output;
}
@@ -4075,7 +4075,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["tree_ensemble_handle"] = tree_ensemble_handle;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesSerializeEnsemble", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesSerializeEnsemble", name: name, keywords: dict);
int _idx = 0;
var stamp_token = op.outputs[_idx++];
var tree_ensemble_serialized = op.outputs[_idx++];
@@ -4128,7 +4128,7 @@ namespace Tensorflow.Operations
dict["cached_node_ids"] = cached_node_ids;
dict["bucketized_features"] = bucketized_features;
dict["logits_dimension"] = logits_dimension;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesTrainingPredict", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesTrainingPredict", name: name, keywords: dict);
int _idx = 0;
var partial_logits = op.outputs[_idx++];
var tree_ids = op.outputs[_idx++];
@@ -4200,7 +4200,7 @@ namespace Tensorflow.Operations
dict["max_depth"] = max_depth;
dict["learning_rate"] = learning_rate;
dict["pruning_mode"] = pruning_mode;
- var op = tf._op_def_lib._apply_op_helper("BoostedTreesUpdateEnsemble", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BoostedTreesUpdateEnsemble", name: name, keywords: dict);
return op;
}
@@ -4226,7 +4226,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["s0"] = s0;
dict["s1"] = s1;
- var op = tf._op_def_lib._apply_op_helper("BroadcastArgs", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BroadcastArgs", name: name, keywords: dict);
return op.output;
}
@@ -4254,7 +4254,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["s0"] = s0;
dict["s1"] = s1;
- var op = tf._op_def_lib._apply_op_helper("BroadcastGradientArgs", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BroadcastGradientArgs", name: name, keywords: dict);
int _idx = 0;
var r0 = op.outputs[_idx++];
var r1 = op.outputs[_idx++];
@@ -4301,7 +4301,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("BroadcastTo", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BroadcastTo", name: name, keywords: dict);
return op.output;
}
@@ -4343,7 +4343,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["boundaries"] = boundaries;
- var op = tf._op_def_lib._apply_op_helper("Bucketize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Bucketize", name: name, keywords: dict);
return op.output;
}
@@ -4373,7 +4373,7 @@ namespace Tensorflow.Operations
dict["tag"] = tag;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("BytesProducedStatsDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("BytesProducedStatsDataset", name: name, keywords: dict);
return op.output;
}
@@ -4431,7 +4431,7 @@ namespace Tensorflow.Operations
dict["top_paths"] = top_paths;
if (merge_repeated.HasValue)
dict["merge_repeated"] = merge_repeated.Value;
- var op = tf._op_def_lib._apply_op_helper("CTCBeamSearchDecoder", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CTCBeamSearchDecoder", name: name, keywords: dict);
int _idx = 0;
var decoded_indices = Enumerable.Range(0, op.OutputListLength("decoded_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var decoded_values = Enumerable.Range(0, op.OutputListLength("decoded_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -4485,7 +4485,7 @@ namespace Tensorflow.Operations
dict["sequence_length"] = sequence_length;
if (merge_repeated.HasValue)
dict["merge_repeated"] = merge_repeated.Value;
- var op = tf._op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CTCGreedyDecoder", name: name, keywords: dict);
int _idx = 0;
var decoded_indices = op.outputs[_idx++];
var decoded_values = op.outputs[_idx++];
@@ -4552,7 +4552,7 @@ namespace Tensorflow.Operations
dict["ctc_merge_repeated"] = ctc_merge_repeated.Value;
if (ignore_longer_outputs_than_inputs.HasValue)
dict["ignore_longer_outputs_than_inputs"] = ignore_longer_outputs_than_inputs.Value;
- var op = tf._op_def_lib._apply_op_helper("CTCLoss", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CTCLoss", name: name, keywords: dict);
int _idx = 0;
var loss = op.outputs[_idx++];
var gradient = op.outputs[_idx++];
@@ -4593,7 +4593,7 @@ namespace Tensorflow.Operations
dict["filename"] = filename;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("CacheDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CacheDataset", name: name, keywords: dict);
return op.output;
}
@@ -4620,7 +4620,7 @@ namespace Tensorflow.Operations
dict["DstT"] = DstT;
if (Truncate.HasValue)
dict["Truncate"] = Truncate.Value;
- var op = tf._op_def_lib._apply_op_helper("Cast", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Cast", name: name, keywords: dict);
return op.output;
}
@@ -4639,7 +4639,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Ceil", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Ceil", name: name, keywords: dict);
return op.output;
}
@@ -4667,7 +4667,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["tensor"] = tensor;
dict["message"] = message;
- var op = tf._op_def_lib._apply_op_helper("CheckNumerics", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CheckNumerics", name: name, keywords: dict);
return op.output;
}
@@ -4703,7 +4703,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("Cholesky", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Cholesky", name: name, keywords: dict);
return op.output;
}
@@ -4736,7 +4736,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["l"] = l;
dict["grad"] = grad;
- var op = tf._op_def_lib._apply_op_helper("CholeskyGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CholeskyGrad", name: name, keywords: dict);
return op.output;
}
@@ -4773,7 +4773,7 @@ namespace Tensorflow.Operations
dict["t"] = t;
dict["clip_value_min"] = clip_value_min;
dict["clip_value_max"] = clip_value_max;
- var op = tf._op_def_lib._apply_op_helper("ClipByValue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ClipByValue", name: name, keywords: dict);
return op.output;
}
@@ -4809,7 +4809,7 @@ namespace Tensorflow.Operations
dict["group_key"] = group_key;
dict["instance_key"] = instance_key;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("CollectiveBcastRecv", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CollectiveBcastRecv", name: name, keywords: dict);
return op.output;
}
@@ -4844,7 +4844,7 @@ namespace Tensorflow.Operations
dict["group_key"] = group_key;
dict["instance_key"] = instance_key;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("CollectiveBcastSend", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CollectiveBcastSend", name: name, keywords: dict);
return op.output;
}
@@ -4887,7 +4887,7 @@ namespace Tensorflow.Operations
dict["merge_op"] = merge_op;
dict["final_op"] = final_op;
dict["subdiv_offsets"] = subdiv_offsets;
- var op = tf._op_def_lib._apply_op_helper("CollectiveReduce", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CollectiveReduce", name: name, keywords: dict);
return op.output;
}
@@ -4937,7 +4937,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["threshold"] = threshold;
- var op = tf._op_def_lib._apply_op_helper("CompareAndBitpack", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CompareAndBitpack", name: name, keywords: dict);
return op.output;
}
@@ -4979,7 +4979,7 @@ namespace Tensorflow.Operations
dict["imag"] = imag;
if (Tout.HasValue)
dict["Tout"] = Tout.Value;
- var op = tf._op_def_lib._apply_op_helper("Complex", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Complex", name: name, keywords: dict);
return op.output;
}
@@ -5008,7 +5008,7 @@ namespace Tensorflow.Operations
dict["x"] = x;
if (Tout.HasValue)
dict["Tout"] = Tout.Value;
- var op = tf._op_def_lib._apply_op_helper("ComplexAbs", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ComplexAbs", name: name, keywords: dict);
return op.output;
}
@@ -5061,7 +5061,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("ComputeAccidentalHits", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ComputeAccidentalHits", name: name, keywords: dict);
int _idx = 0;
var indices = op.outputs[_idx++];
var ids = op.outputs[_idx++];
@@ -5094,7 +5094,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["concat_dim"] = concat_dim;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("Concat", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Concat", name: name, keywords: dict);
return op.output;
}
@@ -5132,7 +5132,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["concat_dim"] = concat_dim;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("ConcatOffset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ConcatOffset", name: name, keywords: dict);
int _idx = 0;
var offset = Enumerable.Range(0, op.OutputListLength("offset")).Select(_ => op.outputs[_idx++]).ToArray();
return (offset);
@@ -5163,7 +5163,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["values"] = values;
dict["axis"] = axis;
- var op = tf._op_def_lib._apply_op_helper("ConcatV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ConcatV2", name: name, keywords: dict);
return op.output;
}
@@ -5193,7 +5193,7 @@ namespace Tensorflow.Operations
dict["another_dataset"] = another_dataset;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("ConcatenateDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ConcatenateDataset", name: name, keywords: dict);
return op.output;
}
@@ -5240,7 +5240,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("ConditionalAccumulator", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ConditionalAccumulator", name: name, keywords: dict);
return op.output;
}
@@ -5277,7 +5277,7 @@ namespace Tensorflow.Operations
dict["tpu_embedding_config"] = tpu_embedding_config;
if (is_global_init.HasValue)
dict["is_global_init"] = is_global_init.Value;
- var op = tf._op_def_lib._apply_op_helper("ConfigureDistributedTPU", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ConfigureDistributedTPU", name: name, keywords: dict);
return op.output;
}
@@ -5311,7 +5311,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("Conj", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conj", name: name, keywords: dict);
return op.output;
}
@@ -5338,7 +5338,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["perm"] = perm;
- var op = tf._op_def_lib._apply_op_helper("ConjugateTranspose", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ConjugateTranspose", name: name, keywords: dict);
return op.output;
}
@@ -5363,7 +5363,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["value"] = value;
dict["dtype"] = dtype;
- var op = tf._op_def_lib._apply_op_helper("Const", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Const", name: name, keywords: dict);
return op.output;
}
@@ -5392,7 +5392,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["mutex_lock"] = mutex_lock;
- var op = tf._op_def_lib._apply_op_helper("ConsumeMutexLock", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ConsumeMutexLock", name: name, keywords: dict);
return op;
}
@@ -5411,7 +5411,7 @@ namespace Tensorflow.Operations
public static Operation control_trigger (string name = "ControlTrigger")
{
var dict = new Dictionary();
- var op = tf._op_def_lib._apply_op_helper("ControlTrigger", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ControlTrigger", name: name, keywords: dict);
return op;
}
@@ -5496,7 +5496,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("Conv2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conv2D", name: name, keywords: dict);
return op.output;
}
@@ -5564,7 +5564,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conv2DBackpropFilter", name: name, keywords: dict);
return op.output;
}
@@ -5631,7 +5631,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("Conv2DBackpropInput", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conv2DBackpropInput", name: name, keywords: dict);
return op.output;
}
@@ -5692,7 +5692,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("Conv3D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conv3D", name: name, keywords: dict);
return op.output;
}
@@ -5737,7 +5737,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("Conv3DBackpropFilter", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conv3DBackpropFilter", name: name, keywords: dict);
return op.output;
}
@@ -5798,7 +5798,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("Conv3DBackpropFilterV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conv3DBackpropFilterV2", name: name, keywords: dict);
return op.output;
}
@@ -5843,7 +5843,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("Conv3DBackpropInput", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conv3DBackpropInput", name: name, keywords: dict);
return op.output;
}
@@ -5904,7 +5904,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("Conv3DBackpropInputV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Conv3DBackpropInputV2", name: name, keywords: dict);
return op.output;
}
@@ -5949,7 +5949,7 @@ namespace Tensorflow.Operations
dict["tensor_name"] = tensor_name;
if (debug_ops_spec != null)
dict["debug_ops_spec"] = debug_ops_spec;
- var op = tf._op_def_lib._apply_op_helper("Copy", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Copy", name: name, keywords: dict);
return op.output;
}
@@ -5992,7 +5992,7 @@ namespace Tensorflow.Operations
dict["tensor_name"] = tensor_name;
if (debug_ops_spec != null)
dict["debug_ops_spec"] = debug_ops_spec;
- var op = tf._op_def_lib._apply_op_helper("CopyHost", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CopyHost", name: name, keywords: dict);
return op.output;
}
@@ -6011,7 +6011,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Cos", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Cos", name: name, keywords: dict);
return op.output;
}
@@ -6030,7 +6030,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Cosh", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Cosh", name: name, keywords: dict);
return op.output;
}
@@ -6058,7 +6058,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["ref"] = referecne;
dict["limit"] = limit;
- var op = tf._op_def_lib._apply_op_helper("CountUpTo", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CountUpTo", name: name, keywords: dict);
return op.output;
}
@@ -6134,7 +6134,7 @@ namespace Tensorflow.Operations
dict["method"] = method;
if (extrapolation_value.HasValue)
dict["extrapolation_value"] = extrapolation_value.Value;
- var op = tf._op_def_lib._apply_op_helper("CropAndResize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CropAndResize", name: name, keywords: dict);
return op.output;
}
@@ -6184,7 +6184,7 @@ namespace Tensorflow.Operations
dict["box_ind"] = box_ind;
if (method != null)
dict["method"] = method;
- var op = tf._op_def_lib._apply_op_helper("CropAndResizeGradBoxes", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CropAndResizeGradBoxes", name: name, keywords: dict);
return op.output;
}
@@ -6239,7 +6239,7 @@ namespace Tensorflow.Operations
dict["T"] = T;
if (method != null)
dict["method"] = method;
- var op = tf._op_def_lib._apply_op_helper("CropAndResizeGradImage", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CropAndResizeGradImage", name: name, keywords: dict);
return op.output;
}
@@ -6269,7 +6269,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["b"] = b;
- var op = tf._op_def_lib._apply_op_helper("Cross", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Cross", name: name, keywords: dict);
return op.output;
}
@@ -6306,7 +6306,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["group_assignment"] = group_assignment;
- var op = tf._op_def_lib._apply_op_helper("CrossReplicaSum", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CrossReplicaSum", name: name, keywords: dict);
return op.output;
}
@@ -6399,7 +6399,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2.Value;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = tf._op_def_lib._apply_op_helper("CudnnRNN", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CudnnRNN", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_h = op.outputs[_idx++];
@@ -6523,7 +6523,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("CudnnRNNBackprop", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CudnnRNNBackprop", name: name, keywords: dict);
int _idx = 0;
var input_backprop = op.outputs[_idx++];
var input_h_backprop = op.outputs[_idx++];
@@ -6653,7 +6653,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("CudnnRNNBackpropV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CudnnRNNBackpropV2", name: name, keywords: dict);
int _idx = 0;
var input_backprop = op.outputs[_idx++];
var input_h_backprop = op.outputs[_idx++];
@@ -6744,7 +6744,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("CudnnRNNCanonicalToParams", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CudnnRNNCanonicalToParams", name: name, keywords: dict);
return op.output;
}
@@ -6824,7 +6824,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("CudnnRNNParamsSize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CudnnRNNParamsSize", name: name, keywords: dict);
return op.output;
}
@@ -6914,7 +6914,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("CudnnRNNParamsToCanonical", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CudnnRNNParamsToCanonical", name: name, keywords: dict);
int _idx = 0;
var weights = Enumerable.Range(0, op.OutputListLength("weights")).Select(_ => op.outputs[_idx++]).ToArray();
var biases = Enumerable.Range(0, op.OutputListLength("biases")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -7014,7 +7014,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2.Value;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = tf._op_def_lib._apply_op_helper("CudnnRNNV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("CudnnRNNV2", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_h = op.outputs[_idx++];
@@ -7087,7 +7087,7 @@ namespace Tensorflow.Operations
dict["exclusive"] = exclusive.Value;
if (reverse.HasValue)
dict["reverse"] = reverse.Value;
- var op = tf._op_def_lib._apply_op_helper("Cumprod", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Cumprod", name: name, keywords: dict);
return op.output;
}
@@ -7154,7 +7154,7 @@ namespace Tensorflow.Operations
dict["exclusive"] = exclusive.Value;
if (reverse.HasValue)
dict["reverse"] = reverse.Value;
- var op = tf._op_def_lib._apply_op_helper("Cumsum", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Cumsum", name: name, keywords: dict);
return op.output;
}
@@ -7189,7 +7189,7 @@ namespace Tensorflow.Operations
dict["src_format"] = src_format;
if (dst_format != null)
dict["dst_format"] = dst_format;
- var op = tf._op_def_lib._apply_op_helper("DataFormatDimMap", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DataFormatDimMap", name: name, keywords: dict);
return op.output;
}
@@ -7223,7 +7223,7 @@ namespace Tensorflow.Operations
dict["src_format"] = src_format;
if (dst_format != null)
dict["dst_format"] = dst_format;
- var op = tf._op_def_lib._apply_op_helper("DataFormatVecPermute", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DataFormatVecPermute", name: name, keywords: dict);
return op.output;
}
@@ -7247,7 +7247,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input_dataset"] = input_dataset;
- var op = tf._op_def_lib._apply_op_helper("DatasetToGraph", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DatasetToGraph", name: name, keywords: dict);
return op.output;
}
@@ -7276,7 +7276,7 @@ namespace Tensorflow.Operations
dict["dataset"] = dataset;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("DatasetToSingleElement", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DatasetToSingleElement", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -7307,7 +7307,7 @@ namespace Tensorflow.Operations
dict["input_dataset"] = input_dataset;
dict["filename"] = filename;
dict["compression_type"] = compression_type;
- var op = tf._op_def_lib._apply_op_helper("DatasetToTFRecord", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DatasetToTFRecord", name: name, keywords: dict);
return op;
}
@@ -7331,7 +7331,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("DebugGradientIdentity", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DebugGradientIdentity", name: name, keywords: dict);
return op.output;
}
@@ -7355,7 +7355,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("DebugGradientRefIdentity", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DebugGradientRefIdentity", name: name, keywords: dict);
return op.output;
}
@@ -7404,7 +7404,7 @@ namespace Tensorflow.Operations
dict["debug_urls"] = debug_urls;
if (gated_grpc.HasValue)
dict["gated_grpc"] = gated_grpc.Value;
- var op = tf._op_def_lib._apply_op_helper("DebugIdentity", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DebugIdentity", name: name, keywords: dict);
return op.output;
}
@@ -7453,7 +7453,7 @@ namespace Tensorflow.Operations
dict["debug_urls"] = debug_urls;
if (gated_grpc.HasValue)
dict["gated_grpc"] = gated_grpc.Value;
- var op = tf._op_def_lib._apply_op_helper("DebugNanCount", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DebugNanCount", name: name, keywords: dict);
return op.output;
}
@@ -7547,7 +7547,7 @@ namespace Tensorflow.Operations
dict["mute_if_healthy"] = mute_if_healthy.Value;
if (gated_grpc.HasValue)
dict["gated_grpc"] = gated_grpc.Value;
- var op = tf._op_def_lib._apply_op_helper("DebugNumericSummary", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DebugNumericSummary", name: name, keywords: dict);
return op.output;
}
@@ -7630,7 +7630,7 @@ namespace Tensorflow.Operations
dict["acceptable_fraction"] = acceptable_fraction.Value;
if (dct_method != null)
dict["dct_method"] = dct_method;
- var op = tf._op_def_lib._apply_op_helper("DecodeAndCropJpeg", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeAndCropJpeg", name: name, keywords: dict);
return op.output;
}
@@ -7655,7 +7655,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("DecodeBase64", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeBase64", name: name, keywords: dict);
return op.output;
}
@@ -7690,7 +7690,7 @@ namespace Tensorflow.Operations
dict["contents"] = contents;
if (channels.HasValue)
dict["channels"] = channels.Value;
- var op = tf._op_def_lib._apply_op_helper("DecodeBmp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeBmp", name: name, keywords: dict);
return op.output;
}
@@ -7744,7 +7744,7 @@ namespace Tensorflow.Operations
dict["na_value"] = na_value;
if (select_cols != null)
dict["select_cols"] = select_cols;
- var op = tf._op_def_lib._apply_op_helper("DecodeCSV", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeCSV", name: name, keywords: dict);
int _idx = 0;
var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray();
return (output);
@@ -7782,7 +7782,7 @@ namespace Tensorflow.Operations
dict["bytes"] = bytes;
if (compression_type != null)
dict["compression_type"] = compression_type;
- var op = tf._op_def_lib._apply_op_helper("DecodeCompressed", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeCompressed", name: name, keywords: dict);
return op.output;
}
@@ -7812,7 +7812,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["contents"] = contents;
- var op = tf._op_def_lib._apply_op_helper("DecodeGif", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeGif", name: name, keywords: dict);
return op.output;
}
@@ -7843,7 +7843,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["json_examples"] = json_examples;
- var op = tf._op_def_lib._apply_op_helper("DecodeJSONExample", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeJSONExample", name: name, keywords: dict);
return op.output;
}
@@ -7922,7 +7922,7 @@ namespace Tensorflow.Operations
dict["acceptable_fraction"] = acceptable_fraction.Value;
if (dct_method != null)
dict["dct_method"] = dct_method;
- var op = tf._op_def_lib._apply_op_helper("DecodeJpeg", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeJpeg", name: name, keywords: dict);
return op.output;
}
@@ -7969,7 +7969,7 @@ namespace Tensorflow.Operations
dict["channels"] = channels.Value;
if (dtype.HasValue)
dict["dtype"] = dtype.Value;
- var op = tf._op_def_lib._apply_op_helper("DecodePng", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodePng", name: name, keywords: dict);
return op.output;
}
@@ -8078,7 +8078,7 @@ namespace Tensorflow.Operations
dict["message_format"] = message_format;
if (sanitize.HasValue)
dict["sanitize"] = sanitize.Value;
- var op = tf._op_def_lib._apply_op_helper("DecodeProtoV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeProtoV2", name: name, keywords: dict);
int _idx = 0;
var sizes = op.outputs[_idx++];
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -8115,7 +8115,7 @@ namespace Tensorflow.Operations
dict["out_type"] = out_type;
if (little_endian.HasValue)
dict["little_endian"] = little_endian.Value;
- var op = tf._op_def_lib._apply_op_helper("DecodeRaw", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeRaw", name: name, keywords: dict);
return op.output;
}
@@ -8164,7 +8164,7 @@ namespace Tensorflow.Operations
dict["desired_channels"] = desired_channels.Value;
if (desired_samples.HasValue)
dict["desired_samples"] = desired_samples.Value;
- var op = tf._op_def_lib._apply_op_helper("DecodeWav", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DecodeWav", name: name, keywords: dict);
int _idx = 0;
var audio = op.outputs[_idx++];
var sample_rate = op.outputs[_idx++];
@@ -8189,7 +8189,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("DeepCopy", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DeepCopy", name: name, keywords: dict);
return op.output;
}
@@ -8209,7 +8209,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = tf._op_def_lib._apply_op_helper("DeleteSessionTensor", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DeleteSessionTensor", name: name, keywords: dict);
return op;
}
@@ -8258,7 +8258,7 @@ namespace Tensorflow.Operations
dict["set_operation"] = set_operation;
if (validate_indices.HasValue)
dict["validate_indices"] = validate_indices.Value;
- var op = tf._op_def_lib._apply_op_helper("DenseToDenseSetOperation", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DenseToDenseSetOperation", name: name, keywords: dict);
int _idx = 0;
var result_indices = op.outputs[_idx++];
var result_values = op.outputs[_idx++];
@@ -8301,7 +8301,7 @@ namespace Tensorflow.Operations
dict["row_shape"] = row_shape;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("DenseToSparseBatchDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DenseToSparseBatchDataset", name: name, keywords: dict);
return op.output;
}
@@ -8369,7 +8369,7 @@ namespace Tensorflow.Operations
dict["set_operation"] = set_operation;
if (validate_indices.HasValue)
dict["validate_indices"] = validate_indices.Value;
- var op = tf._op_def_lib._apply_op_helper("DenseToSparseSetOperation", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DenseToSparseSetOperation", name: name, keywords: dict);
int _idx = 0;
var result_indices = op.outputs[_idx++];
var result_values = op.outputs[_idx++];
@@ -8492,7 +8492,7 @@ namespace Tensorflow.Operations
dict["block_size"] = block_size;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("DepthToSpace", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DepthToSpace", name: name, keywords: dict);
return op.output;
}
@@ -8563,7 +8563,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("DepthwiseConv2dNative", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNative", name: name, keywords: dict);
return op.output;
}
@@ -8630,7 +8630,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("DepthwiseConv2dNativeBackpropFilter", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNativeBackpropFilter", name: name, keywords: dict);
return op.output;
}
@@ -8697,7 +8697,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("DepthwiseConv2dNativeBackpropInput", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNativeBackpropInput", name: name, keywords: dict);
return op.output;
}
@@ -8803,7 +8803,7 @@ namespace Tensorflow.Operations
dict["max_range"] = max_range;
if (mode != null)
dict["mode"] = mode;
- var op = tf._op_def_lib._apply_op_helper("Dequantize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Dequantize", name: name, keywords: dict);
return op.output;
}
@@ -8828,7 +8828,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource_handle"] = resource_handle;
dict["serialized"] = serialized;
- var op = tf._op_def_lib._apply_op_helper("DeserializeIterator", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DeserializeIterator", name: name, keywords: dict);
return op;
}
@@ -8901,7 +8901,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["serialized_sparse"] = serialized_sparse;
dict["dtype"] = dtype;
- var op = tf._op_def_lib._apply_op_helper("DeserializeManySparse", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DeserializeManySparse", name: name, keywords: dict);
int _idx = 0;
var sparse_indices = op.outputs[_idx++];
var sparse_values = op.outputs[_idx++];
@@ -8978,7 +8978,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["serialized_sparse"] = serialized_sparse;
dict["dtype"] = dtype;
- var op = tf._op_def_lib._apply_op_helper("DeserializeSparse", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DeserializeSparse", name: name, keywords: dict);
int _idx = 0;
var sparse_indices = op.outputs[_idx++];
var sparse_values = op.outputs[_idx++];
@@ -9012,7 +9012,7 @@ namespace Tensorflow.Operations
dict["resource"] = resource;
if (ignore_lookup_error.HasValue)
dict["ignore_lookup_error"] = ignore_lookup_error.Value;
- var op = tf._op_def_lib._apply_op_helper("DestroyResourceOp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DestroyResourceOp", name: name, keywords: dict);
return op;
}
@@ -9047,7 +9047,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["ref"] = referecne;
dict["var_name"] = var_name;
- var op = tf._op_def_lib._apply_op_helper("DestroyTemporaryVariable", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DestroyTemporaryVariable", name: name, keywords: dict);
return op.output;
}
@@ -9086,7 +9086,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["diagonal"] = diagonal;
- var op = tf._op_def_lib._apply_op_helper("Diag", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Diag", name: name, keywords: dict);
return op.output;
}
@@ -9127,7 +9127,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("DiagPart", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DiagPart", name: name, keywords: dict);
return op.output;
}
@@ -9149,7 +9149,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Digamma", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Digamma", name: name, keywords: dict);
return op.output;
}
@@ -9216,7 +9216,7 @@ namespace Tensorflow.Operations
dict["strides"] = strides;
dict["rates"] = rates;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("Dilation2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Dilation2D", name: name, keywords: dict);
return op.output;
}
@@ -9262,7 +9262,7 @@ namespace Tensorflow.Operations
dict["strides"] = strides;
dict["rates"] = rates;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("Dilation2DBackpropFilter", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Dilation2DBackpropFilter", name: name, keywords: dict);
return op.output;
}
@@ -9308,7 +9308,7 @@ namespace Tensorflow.Operations
dict["strides"] = strides;
dict["rates"] = rates;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("Dilation2DBackpropInput", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Dilation2DBackpropInput", name: name, keywords: dict);
return op.output;
}
@@ -9334,7 +9334,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Div", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Div", name: name, keywords: dict);
return op.output;
}
@@ -9361,7 +9361,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("DivNoNan", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DivNoNan", name: name, keywords: dict);
return op.output;
}
@@ -9401,7 +9401,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["images"] = images;
dict["boxes"] = boxes;
- var op = tf._op_def_lib._apply_op_helper("DrawBoundingBoxes", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DrawBoundingBoxes", name: name, keywords: dict);
return op.output;
}
@@ -9468,7 +9468,7 @@ namespace Tensorflow.Operations
dict["data"] = data;
dict["partitions"] = partitions;
dict["num_partitions"] = num_partitions;
- var op = tf._op_def_lib._apply_op_helper("DynamicPartition", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DynamicPartition", name: name, keywords: dict);
int _idx = 0;
var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray();
return (outputs);
@@ -9556,7 +9556,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["indices"] = indices;
dict["data"] = data;
- var op = tf._op_def_lib._apply_op_helper("DynamicStitch", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("DynamicStitch", name: name, keywords: dict);
return op.output;
}
@@ -9646,7 +9646,7 @@ namespace Tensorflow.Operations
dict["truth_shape"] = truth_shape;
if (normalize.HasValue)
dict["normalize"] = normalize.Value;
- var op = tf._op_def_lib._apply_op_helper("EditDistance", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EditDistance", name: name, keywords: dict);
return op.output;
}
@@ -9669,7 +9669,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["features"] = features;
- var op = tf._op_def_lib._apply_op_helper("Elu", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Elu", name: name, keywords: dict);
return op.output;
}
@@ -9695,7 +9695,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["gradients"] = gradients;
dict["outputs"] = outputs;
- var op = tf._op_def_lib._apply_op_helper("EluGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EluGrad", name: name, keywords: dict);
return op.output;
}
@@ -9727,7 +9727,7 @@ namespace Tensorflow.Operations
dict["dtype"] = dtype;
if (init.HasValue)
dict["init"] = init.Value;
- var op = tf._op_def_lib._apply_op_helper("Empty", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Empty", name: name, keywords: dict);
return op.output;
}
@@ -9758,7 +9758,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["element_shape"] = element_shape;
dict["element_dtype"] = element_dtype;
- var op = tf._op_def_lib._apply_op_helper("EmptyTensorList", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EmptyTensorList", name: name, keywords: dict);
return op.output;
}
@@ -9792,7 +9792,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (pad.HasValue)
dict["pad"] = pad.Value;
- var op = tf._op_def_lib._apply_op_helper("EncodeBase64", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EncodeBase64", name: name, keywords: dict);
return op.output;
}
@@ -9877,7 +9877,7 @@ namespace Tensorflow.Operations
dict["y_density"] = y_density.Value;
if (xmp_metadata != null)
dict["xmp_metadata"] = xmp_metadata;
- var op = tf._op_def_lib._apply_op_helper("EncodeJpeg", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EncodeJpeg", name: name, keywords: dict);
return op.output;
}
@@ -9916,7 +9916,7 @@ namespace Tensorflow.Operations
dict["image"] = image;
if (compression.HasValue)
dict["compression"] = compression.Value;
- var op = tf._op_def_lib._apply_op_helper("EncodePng", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EncodePng", name: name, keywords: dict);
return op.output;
}
@@ -9994,7 +9994,7 @@ namespace Tensorflow.Operations
dict["message_type"] = message_type;
if (descriptor_source != null)
dict["descriptor_source"] = descriptor_source;
- var op = tf._op_def_lib._apply_op_helper("EncodeProto", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EncodeProto", name: name, keywords: dict);
return op.output;
}
@@ -10028,7 +10028,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["audio"] = audio;
dict["sample_rate"] = sample_rate;
- var op = tf._op_def_lib._apply_op_helper("EncodeWav", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EncodeWav", name: name, keywords: dict);
return op.output;
}
@@ -10058,7 +10058,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("EnsureShape", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("EnsureShape", name: name, keywords: dict);
return op.output;
}
@@ -10101,7 +10101,7 @@ namespace Tensorflow.Operations
dict["is_constant"] = is_constant.Value;
if (parallel_iterations.HasValue)
dict["parallel_iterations"] = parallel_iterations.Value;
- var op = tf._op_def_lib._apply_op_helper("Enter", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Enter", name: name, keywords: dict);
return op.output;
}
@@ -10127,7 +10127,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Equal", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Equal", name: name, keywords: dict);
return op.output;
}
@@ -10146,7 +10146,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Erf", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Erf", name: name, keywords: dict);
return op.output;
}
@@ -10165,7 +10165,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Erfc", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Erfc", name: name, keywords: dict);
return op.output;
}
@@ -10189,7 +10189,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["data"] = data;
- var op = tf._op_def_lib._apply_op_helper("Exit", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Exit", name: name, keywords: dict);
return op.output;
}
@@ -10208,7 +10208,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Exp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Exp", name: name, keywords: dict);
return op.output;
}
@@ -10267,7 +10267,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["dim"] = dim;
- var op = tf._op_def_lib._apply_op_helper("ExpandDims", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ExpandDims", name: name, keywords: dict);
return op.output;
}
@@ -10289,7 +10289,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Expm1", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Expm1", name: name, keywords: dict);
return op.output;
}
@@ -10364,7 +10364,7 @@ namespace Tensorflow.Operations
dict["normalized"] = normalized.Value;
if (uniform_noise.HasValue)
dict["uniform_noise"] = uniform_noise.Value;
- var op = tf._op_def_lib._apply_op_helper("ExtractGlimpse", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ExtractGlimpse", name: name, keywords: dict);
return op.output;
}
@@ -10422,7 +10422,7 @@ namespace Tensorflow.Operations
dict["strides"] = strides;
dict["rates"] = rates;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("ExtractImagePatches", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ExtractImagePatches", name: name, keywords: dict);
return op.output;
}
@@ -10452,7 +10452,7 @@ namespace Tensorflow.Operations
dict["contents"] = contents;
if (output_type.HasValue)
dict["output_type"] = output_type.Value;
- var op = tf._op_def_lib._apply_op_helper("ExtractJpegShape", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ExtractJpegShape", name: name, keywords: dict);
return op.output;
}
@@ -10482,7 +10482,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("FFT", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FFT", name: name, keywords: dict);
return op.output;
}
@@ -10512,7 +10512,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("FFT2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FFT2D", name: name, keywords: dict);
return op.output;
}
@@ -10542,7 +10542,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("FFT3D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FFT3D", name: name, keywords: dict);
return op.output;
}
@@ -10590,7 +10590,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("FIFOQueue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FIFOQueue", name: name, keywords: dict);
return op.output;
}
@@ -10638,7 +10638,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("FIFOQueueV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FIFOQueueV2", name: name, keywords: dict);
return op.output;
}
@@ -10669,7 +10669,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dtype"] = dtype;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("FakeParam", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FakeParam", name: name, keywords: dict);
return op.output;
}
@@ -10713,7 +10713,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxArgs", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxArgs", name: name, keywords: dict);
return op.output;
}
@@ -10755,7 +10755,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxArgsGradient", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxArgsGradient", name: name, keywords: dict);
return op.output;
}
@@ -10800,7 +10800,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxVars", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVars", name: name, keywords: dict);
return op.output;
}
@@ -10848,7 +10848,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsGradient", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsGradient", name: name, keywords: dict);
int _idx = 0;
var backprops_wrt_input = op.outputs[_idx++];
var backprop_wrt_min = op.outputs[_idx++];
@@ -10898,7 +10898,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannel", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannel", name: name, keywords: dict);
return op.output;
}
@@ -10949,7 +10949,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannelGradient", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannelGradient", name: name, keywords: dict);
int _idx = 0;
var backprops_wrt_input = op.outputs[_idx++];
var backprop_wrt_min = op.outputs[_idx++];
@@ -10972,7 +10972,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["resource"] = resource;
- var op = tf._op_def_lib._apply_op_helper("FakeQueue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FakeQueue", name: name, keywords: dict);
return op.output;
}
@@ -11021,7 +11021,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dims"] = dims;
dict["value"] = value;
- var op = tf._op_def_lib._apply_op_helper("Fill", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Fill", name: name, keywords: dict);
return op.output;
}
@@ -11048,7 +11048,7 @@ namespace Tensorflow.Operations
dict["input_dataset"] = input_dataset;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("FilterByLastComponentDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FilterByLastComponentDataset", name: name, keywords: dict);
return op.output;
}
@@ -11087,7 +11087,7 @@ namespace Tensorflow.Operations
dict["record_bytes"] = record_bytes;
dict["footer_bytes"] = footer_bytes;
dict["buffer_size"] = buffer_size;
- var op = tf._op_def_lib._apply_op_helper("FixedLengthRecordDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FixedLengthRecordDataset", name: name, keywords: dict);
return op.output;
}
@@ -11137,7 +11137,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("FixedLengthRecordReader", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReader", name: name, keywords: dict);
return op.output;
}
@@ -11193,7 +11193,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (encoding != null)
dict["encoding"] = encoding;
- var op = tf._op_def_lib._apply_op_helper("FixedLengthRecordReaderV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReaderV2", name: name, keywords: dict);
return op.output;
}
@@ -11321,7 +11321,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("FixedUnigramCandidateSampler", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FixedUnigramCandidateSampler", name: name, keywords: dict);
int _idx = 0;
var sampled_candidates = op.outputs[_idx++];
var true_expected_count = op.outputs[_idx++];
@@ -11344,7 +11344,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Floor", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Floor", name: name, keywords: dict);
return op.output;
}
@@ -11370,7 +11370,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("FloorDiv", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FloorDiv", name: name, keywords: dict);
return op.output;
}
@@ -11399,7 +11399,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("FloorMod", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FloorMod", name: name, keywords: dict);
return op.output;
}
@@ -11479,7 +11479,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("FractionalAvgPool", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FractionalAvgPool", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var row_pooling_sequence = op.outputs[_idx++];
@@ -11539,7 +11539,7 @@ namespace Tensorflow.Operations
dict["col_pooling_sequence"] = col_pooling_sequence;
if (overlapping.HasValue)
dict["overlapping"] = overlapping.Value;
- var op = tf._op_def_lib._apply_op_helper("FractionalAvgPoolGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FractionalAvgPoolGrad", name: name, keywords: dict);
return op.output;
}
@@ -11643,7 +11643,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("FractionalMaxPool", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FractionalMaxPool", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var row_pooling_sequence = op.outputs[_idx++];
@@ -11700,7 +11700,7 @@ namespace Tensorflow.Operations
dict["col_pooling_sequence"] = col_pooling_sequence;
if (overlapping.HasValue)
dict["overlapping"] = overlapping.Value;
- var op = tf._op_def_lib._apply_op_helper("FractionalMaxPoolGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FractionalMaxPoolGrad", name: name, keywords: dict);
return op.output;
}
@@ -11768,7 +11768,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = tf._op_def_lib._apply_op_helper("FusedBatchNorm", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FusedBatchNorm", name: name, keywords: dict);
int _idx = 0;
var y = op.outputs[_idx++];
var batch_mean = op.outputs[_idx++];
@@ -11845,7 +11845,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGrad", name: name, keywords: dict);
int _idx = 0;
var x_backprop = op.outputs[_idx++];
var scale_backprop = op.outputs[_idx++];
@@ -11922,7 +11922,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGradV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGradV2", name: name, keywords: dict);
int _idx = 0;
var x_backprop = op.outputs[_idx++];
var scale_backprop = op.outputs[_idx++];
@@ -11996,7 +11996,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = tf._op_def_lib._apply_op_helper("FusedBatchNormV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FusedBatchNormV2", name: name, keywords: dict);
int _idx = 0;
var y = op.outputs[_idx++];
var batch_mean = op.outputs[_idx++];
@@ -12060,7 +12060,7 @@ namespace Tensorflow.Operations
dict["mode"] = mode;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("FusedPadConv2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FusedPadConv2D", name: name, keywords: dict);
return op.output;
}
@@ -12128,7 +12128,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (resize_align_corners.HasValue)
dict["resize_align_corners"] = resize_align_corners.Value;
- var op = tf._op_def_lib._apply_op_helper("FusedResizeAndPadConv2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("FusedResizeAndPadConv2D", name: name, keywords: dict);
return op.output;
}
@@ -12181,7 +12181,7 @@ namespace Tensorflow.Operations
dict["indices"] = indices;
if (validate_indices.HasValue)
dict["validate_indices"] = validate_indices.Value;
- var op = tf._op_def_lib._apply_op_helper("Gather", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Gather", name: name, keywords: dict);
return op.output;
}
@@ -12313,7 +12313,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["params"] = parameters;
dict["indices"] = indices;
- var op = tf._op_def_lib._apply_op_helper("GatherNd", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("GatherNd", name: name, keywords: dict);
return op.output;
}
@@ -12374,7 +12374,7 @@ namespace Tensorflow.Operations
dict["params"] = parameters;
dict["indices"] = indices;
dict["axis"] = axis;
- var op = tf._op_def_lib._apply_op_helper("GatherV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("GatherV2", name: name, keywords: dict);
return op.output;
}
@@ -12449,7 +12449,7 @@ namespace Tensorflow.Operations
dict["num_new_vocab"] = num_new_vocab;
if (old_vocab_size.HasValue)
dict["old_vocab_size"] = old_vocab_size.Value;
- var op = tf._op_def_lib._apply_op_helper("GenerateVocabRemapping", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("GenerateVocabRemapping", name: name, keywords: dict);
int _idx = 0;
var remapping = op.outputs[_idx++];
var num_present = op.outputs[_idx++];
@@ -12474,7 +12474,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["value"] = value;
- var op = tf._op_def_lib._apply_op_helper("GetSessionHandle", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("GetSessionHandle", name: name, keywords: dict);
return op.output;
}
@@ -12496,7 +12496,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["value"] = value;
- var op = tf._op_def_lib._apply_op_helper("GetSessionHandleV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("GetSessionHandleV2", name: name, keywords: dict);
return op.output;
}
@@ -12522,7 +12522,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["handle"] = handle;
dict["dtype"] = dtype;
- var op = tf._op_def_lib._apply_op_helper("GetSessionTensor", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("GetSessionTensor", name: name, keywords: dict);
return op.output;
}
@@ -12548,7 +12548,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Greater", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Greater", name: name, keywords: dict);
return op.output;
}
@@ -12574,7 +12574,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("GreaterEqual", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("GreaterEqual", name: name, keywords: dict);
return op.output;
}
@@ -12601,7 +12601,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("GuaranteeConst", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("GuaranteeConst", name: name, keywords: dict);
return op.output;
}
@@ -12629,7 +12629,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["images"] = images;
- var op = tf._op_def_lib._apply_op_helper("HSVToRGB", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("HSVToRGB", name: name, keywords: dict);
return op.output;
}
@@ -12679,7 +12679,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (use_node_name_sharing.HasValue)
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
- var op = tf._op_def_lib._apply_op_helper("HashTable", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("HashTable", name: name, keywords: dict);
return op.output;
}
@@ -12729,7 +12729,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (use_node_name_sharing.HasValue)
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
- var op = tf._op_def_lib._apply_op_helper("HashTableV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("HashTableV2", name: name, keywords: dict);
return op.output;
}
@@ -12781,7 +12781,7 @@ namespace Tensorflow.Operations
dict["nbins"] = nbins;
if (dtype.HasValue)
dict["dtype"] = dtype.Value;
- var op = tf._op_def_lib._apply_op_helper("HistogramFixedWidth", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("HistogramFixedWidth", name: name, keywords: dict);
return op.output;
}
@@ -12813,7 +12813,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["tag"] = tag;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("HistogramSummary", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("HistogramSummary", name: name, keywords: dict);
return op.output;
}
@@ -12838,7 +12838,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["value"] = value;
dict["dtype"] = dtype;
- var op = tf._op_def_lib._apply_op_helper("HostConst", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("HostConst", name: name, keywords: dict);
return op.output;
}
@@ -12868,7 +12868,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("IFFT", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IFFT", name: name, keywords: dict);
return op.output;
}
@@ -12898,7 +12898,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("IFFT2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IFFT2D", name: name, keywords: dict);
return op.output;
}
@@ -12928,7 +12928,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("IFFT3D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IFFT3D", name: name, keywords: dict);
return op.output;
}
@@ -12974,7 +12974,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = tf._op_def_lib._apply_op_helper("IRFFT", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IRFFT", name: name, keywords: dict);
return op.output;
}
@@ -13021,7 +13021,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = tf._op_def_lib._apply_op_helper("IRFFT2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IRFFT2D", name: name, keywords: dict);
return op.output;
}
@@ -13068,7 +13068,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = tf._op_def_lib._apply_op_helper("IRFFT3D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IRFFT3D", name: name, keywords: dict);
return op.output;
}
@@ -13087,7 +13087,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("Identity", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Identity", name: name, keywords: dict);
return op.output;
}
@@ -13123,7 +13123,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("IdentityN", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IdentityN", name: name, keywords: dict);
int _idx = 0;
var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray();
return (output);
@@ -13158,7 +13158,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("IdentityReader", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IdentityReader", name: name, keywords: dict);
return op.output;
}
@@ -13191,7 +13191,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("IdentityReaderV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IdentityReaderV2", name: name, keywords: dict);
return op.output;
}
@@ -13228,7 +13228,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Igamma", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Igamma", name: name, keywords: dict);
return op.output;
}
@@ -13250,7 +13250,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("IgammaGradA", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IgammaGradA", name: name, keywords: dict);
return op.output;
}
@@ -13286,7 +13286,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Igammac", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Igammac", name: name, keywords: dict);
return op.output;
}
@@ -13322,7 +13322,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (Tout.HasValue)
dict["Tout"] = Tout.Value;
- var op = tf._op_def_lib._apply_op_helper("Imag", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Imag", name: name, keywords: dict);
return op.output;
}
@@ -13393,7 +13393,7 @@ namespace Tensorflow.Operations
dict["max_images"] = max_images.Value;
if (bad_color != null)
dict["bad_color"] = bad_color;
- var op = tf._op_def_lib._apply_op_helper("ImageSummary", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ImageSummary", name: name, keywords: dict);
return op.output;
}
@@ -13428,7 +13428,7 @@ namespace Tensorflow.Operations
dict["dtype"] = dtype;
dict["shape"] = shape;
dict["memory_region_name"] = memory_region_name;
- var op = tf._op_def_lib._apply_op_helper("ImmutableConst", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ImmutableConst", name: name, keywords: dict);
return op.output;
}
@@ -13474,7 +13474,7 @@ namespace Tensorflow.Operations
dict["predictions"] = predictions;
dict["targets"] = targets;
dict["k"] = k;
- var op = tf._op_def_lib._apply_op_helper("InTopK", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InTopK", name: name, keywords: dict);
return op.output;
}
@@ -13519,7 +13519,7 @@ namespace Tensorflow.Operations
dict["predictions"] = predictions;
dict["targets"] = targets;
dict["k"] = k;
- var op = tf._op_def_lib._apply_op_helper("InTopKV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InTopKV2", name: name, keywords: dict);
return op.output;
}
@@ -13546,7 +13546,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dtype"] = dtype;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("InfeedDequeue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InfeedDequeue", name: name, keywords: dict);
return op.output;
}
@@ -13576,7 +13576,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dtypes"] = dtypes;
dict["shapes"] = shapes;
- var op = tf._op_def_lib._apply_op_helper("InfeedDequeueTuple", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InfeedDequeueTuple", name: name, keywords: dict);
int _idx = 0;
var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray();
return (outputs);
@@ -13610,7 +13610,7 @@ namespace Tensorflow.Operations
dict["shape"] = shape;
if (device_ordinal.HasValue)
dict["device_ordinal"] = device_ordinal.Value;
- var op = tf._op_def_lib._apply_op_helper("InfeedEnqueue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InfeedEnqueue", name: name, keywords: dict);
return op;
}
@@ -13642,7 +13642,7 @@ namespace Tensorflow.Operations
dict["shapes"] = shapes;
if (device_ordinal.HasValue)
dict["device_ordinal"] = device_ordinal.Value;
- var op = tf._op_def_lib._apply_op_helper("InfeedEnqueueTuple", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InfeedEnqueueTuple", name: name, keywords: dict);
return op;
}
@@ -13670,7 +13670,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("InitializeTable", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InitializeTable", name: name, keywords: dict);
return op;
}
@@ -13727,7 +13727,7 @@ namespace Tensorflow.Operations
dict["vocab_size"] = vocab_size.Value;
if (delimiter != null)
dict["delimiter"] = delimiter;
- var op = tf._op_def_lib._apply_op_helper("InitializeTableFromTextFile", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InitializeTableFromTextFile", name: name, keywords: dict);
return op;
}
@@ -13784,7 +13784,7 @@ namespace Tensorflow.Operations
dict["vocab_size"] = vocab_size.Value;
if (delimiter != null)
dict["delimiter"] = delimiter;
- var op = tf._op_def_lib._apply_op_helper("InitializeTableFromTextFileV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InitializeTableFromTextFileV2", name: name, keywords: dict);
return op;
}
@@ -13812,7 +13812,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("InitializeTableV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InitializeTableV2", name: name, keywords: dict);
return op;
}
@@ -13843,7 +13843,7 @@ namespace Tensorflow.Operations
dict["x"] = x;
dict["i"] = i;
dict["v"] = v;
- var op = tf._op_def_lib._apply_op_helper("InplaceAdd", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InplaceAdd", name: name, keywords: dict);
return op.output;
}
@@ -13874,7 +13874,7 @@ namespace Tensorflow.Operations
dict["x"] = x;
dict["i"] = i;
dict["v"] = v;
- var op = tf._op_def_lib._apply_op_helper("InplaceSub", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InplaceSub", name: name, keywords: dict);
return op.output;
}
@@ -13905,7 +13905,7 @@ namespace Tensorflow.Operations
dict["x"] = x;
dict["i"] = i;
dict["v"] = v;
- var op = tf._op_def_lib._apply_op_helper("InplaceUpdate", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InplaceUpdate", name: name, keywords: dict);
return op.output;
}
@@ -13927,7 +13927,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Inv", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Inv", name: name, keywords: dict);
return op.output;
}
@@ -13953,7 +13953,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["y"] = y;
dict["dy"] = dy;
- var op = tf._op_def_lib._apply_op_helper("InvGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InvGrad", name: name, keywords: dict);
return op.output;
}
@@ -13976,7 +13976,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Invert", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Invert", name: name, keywords: dict);
return op.output;
}
@@ -14014,7 +14014,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("InvertPermutation", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("InvertPermutation", name: name, keywords: dict);
return op.output;
}
@@ -14035,7 +14035,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["tree_ensemble_handle"] = tree_ensemble_handle;
- var op = tf._op_def_lib._apply_op_helper("IsBoostedTreesEnsembleInitialized", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IsBoostedTreesEnsembleInitialized", name: name, keywords: dict);
return op.output;
}
@@ -14059,7 +14059,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("IsFinite", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IsFinite", name: name, keywords: dict);
return op.output;
}
@@ -14083,7 +14083,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("IsInf", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IsInf", name: name, keywords: dict);
return op.output;
}
@@ -14107,7 +14107,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("IsNan", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IsNan", name: name, keywords: dict);
return op.output;
}
@@ -14130,7 +14130,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["ref"] = referecne;
- var op = tf._op_def_lib._apply_op_helper("IsVariableInitialized", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IsVariableInitialized", name: name, keywords: dict);
return op.output;
}
@@ -14164,7 +14164,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("Iterator", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Iterator", name: name, keywords: dict);
return op.output;
}
@@ -14197,7 +14197,7 @@ namespace Tensorflow.Operations
dict["output_types"] = output_types;
if (output_shapes != null)
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("IteratorFromStringHandle", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IteratorFromStringHandle", name: name, keywords: dict);
return op.output;
}
@@ -14224,7 +14224,7 @@ namespace Tensorflow.Operations
dict["iterator"] = iterator;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("IteratorGetNext", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IteratorGetNext", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -14253,7 +14253,7 @@ namespace Tensorflow.Operations
dict["iterator"] = iterator;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("IteratorGetNextAsOptional", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IteratorGetNextAsOptional", name: name, keywords: dict);
return op.output;
}
@@ -14286,7 +14286,7 @@ namespace Tensorflow.Operations
dict["iterator"] = iterator;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("IteratorGetNextSync", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IteratorGetNextSync", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -14309,7 +14309,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["resource_handle"] = resource_handle;
- var op = tf._op_def_lib._apply_op_helper("IteratorToStringHandle", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("IteratorToStringHandle", name: name, keywords: dict);
return op.output;
}
@@ -14335,7 +14335,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["t"] = t;
- var op = tf._op_def_lib._apply_op_helper("L2Loss", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("L2Loss", name: name, keywords: dict);
return op.output;
}
@@ -14364,7 +14364,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("LMDBReader", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LMDBReader", name: name, keywords: dict);
return op.output;
}
@@ -14417,7 +14417,7 @@ namespace Tensorflow.Operations
dict["alpha"] = alpha.Value;
if (beta.HasValue)
dict["beta"] = beta.Value;
- var op = tf._op_def_lib._apply_op_helper("LRN", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LRN", name: name, keywords: dict);
return op.output;
}
@@ -14466,7 +14466,7 @@ namespace Tensorflow.Operations
dict["alpha"] = alpha.Value;
if (beta.HasValue)
dict["beta"] = beta.Value;
- var op = tf._op_def_lib._apply_op_helper("LRNGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LRNGrad", name: name, keywords: dict);
return op.output;
}
@@ -14496,7 +14496,7 @@ namespace Tensorflow.Operations
dict["tag"] = tag;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("LatencyStatsDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LatencyStatsDataset", name: name, keywords: dict);
return op.output;
}
@@ -14572,7 +14572,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("LearnedUnigramCandidateSampler", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LearnedUnigramCandidateSampler", name: name, keywords: dict);
int _idx = 0;
var sampled_candidates = op.outputs[_idx++];
var true_expected_count = op.outputs[_idx++];
@@ -14602,7 +14602,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("LeftShift", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LeftShift", name: name, keywords: dict);
return op.output;
}
@@ -14628,7 +14628,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Less", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Less", name: name, keywords: dict);
return op.output;
}
@@ -14654,7 +14654,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("LessEqual", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LessEqual", name: name, keywords: dict);
return op.output;
}
@@ -14673,7 +14673,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Lgamma", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Lgamma", name: name, keywords: dict);
return op.output;
}
@@ -14713,7 +14713,7 @@ namespace Tensorflow.Operations
dict["start"] = start;
dict["stop"] = stop;
dict["num"] = num;
- var op = tf._op_def_lib._apply_op_helper("LinSpace", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LinSpace", name: name, keywords: dict);
return op.output;
}
@@ -14767,7 +14767,7 @@ namespace Tensorflow.Operations
dict["y"] = y;
if (out_idx.HasValue)
dict["out_idx"] = out_idx.Value;
- var op = tf._op_def_lib._apply_op_helper("ListDiff", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ListDiff", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var idx = op.outputs[_idx++];
@@ -14869,7 +14869,7 @@ namespace Tensorflow.Operations
dict["num_cols"] = num_cols;
if (max_rows_in_memory.HasValue)
dict["max_rows_in_memory"] = max_rows_in_memory.Value;
- var op = tf._op_def_lib._apply_op_helper("LoadAndRemapMatrix", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LoadAndRemapMatrix", name: name, keywords: dict);
return op.output;
}
@@ -14891,7 +14891,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Log", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Log", name: name, keywords: dict);
return op.output;
}
@@ -14913,7 +14913,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Log1p", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Log1p", name: name, keywords: dict);
return op.output;
}
@@ -14948,7 +14948,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("LogMatrixDeterminant", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LogMatrixDeterminant", name: name, keywords: dict);
int _idx = 0;
var sign = op.outputs[_idx++];
var log_abs_determinant = op.outputs[_idx++];
@@ -14977,7 +14977,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["logits"] = logits;
- var op = tf._op_def_lib._apply_op_helper("LogSoftmax", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LogSoftmax", name: name, keywords: dict);
return op.output;
}
@@ -15053,7 +15053,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("LogUniformCandidateSampler", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LogUniformCandidateSampler", name: name, keywords: dict);
int _idx = 0;
var sampled_candidates = op.outputs[_idx++];
var true_expected_count = op.outputs[_idx++];
@@ -15083,7 +15083,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("LogicalAnd", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LogicalAnd", name: name, keywords: dict);
return op.output;
}
@@ -15102,7 +15102,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("LogicalNot", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LogicalNot", name: name, keywords: dict);
return op.output;
}
@@ -15128,7 +15128,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("LogicalOr", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LogicalOr", name: name, keywords: dict);
return op.output;
}
@@ -15159,7 +15159,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["Tkeys"] = Tkeys;
dict["Tvalues"] = Tvalues;
- var op = tf._op_def_lib._apply_op_helper("LookupTableExport", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableExport", name: name, keywords: dict);
int _idx = 0;
var keys = op.outputs[_idx++];
var values = op.outputs[_idx++];
@@ -15193,7 +15193,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["Tkeys"] = Tkeys;
dict["Tvalues"] = Tvalues;
- var op = tf._op_def_lib._apply_op_helper("LookupTableExportV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableExportV2", name: name, keywords: dict);
int _idx = 0;
var keys = op.outputs[_idx++];
var values = op.outputs[_idx++];
@@ -15232,7 +15232,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["default_value"] = default_value;
- var op = tf._op_def_lib._apply_op_helper("LookupTableFind", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableFind", name: name, keywords: dict);
return op.output;
}
@@ -15268,7 +15268,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["default_value"] = default_value;
- var op = tf._op_def_lib._apply_op_helper("LookupTableFindV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableFindV2", name: name, keywords: dict);
return op.output;
}
@@ -15300,7 +15300,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("LookupTableImport", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableImport", name: name, keywords: dict);
return op;
}
@@ -15332,7 +15332,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("LookupTableImportV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableImportV2", name: name, keywords: dict);
return op;
}
@@ -15364,7 +15364,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("LookupTableInsert", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableInsert", name: name, keywords: dict);
return op;
}
@@ -15396,7 +15396,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = tf._op_def_lib._apply_op_helper("LookupTableInsertV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableInsertV2", name: name, keywords: dict);
return op;
}
@@ -15417,7 +15417,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["table_handle"] = table_handle;
- var op = tf._op_def_lib._apply_op_helper("LookupTableSize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableSize", name: name, keywords: dict);
return op.output;
}
@@ -15438,7 +15438,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["table_handle"] = table_handle;
- var op = tf._op_def_lib._apply_op_helper("LookupTableSizeV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LookupTableSizeV2", name: name, keywords: dict);
return op.output;
}
@@ -15463,7 +15463,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("LoopCond", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("LoopCond", name: name, keywords: dict);
return op.output;
}
@@ -15489,7 +15489,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dataset"] = dataset;
dict["iterator"] = iterator;
- var op = tf._op_def_lib._apply_op_helper("MakeIterator", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MakeIterator", name: name, keywords: dict);
return op;
}
@@ -15525,7 +15525,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("MapClear", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MapClear", name: name, keywords: dict);
return op;
}
@@ -15561,7 +15561,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("MapIncompleteSize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MapIncompleteSize", name: name, keywords: dict);
return op.output;
}
@@ -15607,7 +15607,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("MapPeek", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MapPeek", name: name, keywords: dict);
int _idx = 0;
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
return (values);
@@ -15645,7 +15645,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("MapSize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MapSize", name: name, keywords: dict);
return op.output;
}
@@ -15698,7 +15698,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("MapStage", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MapStage", name: name, keywords: dict);
return op;
}
@@ -15744,7 +15744,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("MapUnstage", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MapUnstage", name: name, keywords: dict);
int _idx = 0;
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
return (values);
@@ -15792,7 +15792,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("MapUnstageNoKey", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MapUnstageNoKey", name: name, keywords: dict);
int _idx = 0;
var key = op.outputs[_idx++];
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -15836,7 +15836,7 @@ namespace Tensorflow.Operations
dict["transpose_a"] = transpose_a.Value;
if (transpose_b.HasValue)
dict["transpose_b"] = transpose_b.Value;
- var op = tf._op_def_lib._apply_op_helper("MatMul", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatMul", name: name, keywords: dict);
return op.output;
}
@@ -15862,7 +15862,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["pattern"] = pattern;
- var op = tf._op_def_lib._apply_op_helper("MatchingFiles", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatchingFiles", name: name, keywords: dict);
return op.output;
}
@@ -15934,7 +15934,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["num_lower"] = num_lower;
dict["num_upper"] = num_upper;
- var op = tf._op_def_lib._apply_op_helper("MatrixBandPart", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixBandPart", name: name, keywords: dict);
return op.output;
}
@@ -15960,7 +15960,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("MatrixDeterminant", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixDeterminant", name: name, keywords: dict);
return op.output;
}
@@ -16009,7 +16009,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["diagonal"] = diagonal;
- var op = tf._op_def_lib._apply_op_helper("MatrixDiag", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixDiag", name: name, keywords: dict);
return op.output;
}
@@ -16061,7 +16061,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("MatrixDiagPart", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixDiagPart", name: name, keywords: dict);
return op.output;
}
@@ -16080,7 +16080,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("MatrixExponential", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixExponential", name: name, keywords: dict);
return op.output;
}
@@ -16122,7 +16122,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (adjoint.HasValue)
dict["adjoint"] = adjoint.Value;
- var op = tf._op_def_lib._apply_op_helper("MatrixInverse", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixInverse", name: name, keywords: dict);
return op.output;
}
@@ -16164,7 +16164,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("MatrixLogarithm", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixLogarithm", name: name, keywords: dict);
return op.output;
}
@@ -16203,7 +16203,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["diagonal"] = diagonal;
- var op = tf._op_def_lib._apply_op_helper("MatrixSetDiag", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixSetDiag", name: name, keywords: dict);
return op.output;
}
@@ -16242,7 +16242,7 @@ namespace Tensorflow.Operations
dict["rhs"] = rhs;
if (adjoint.HasValue)
dict["adjoint"] = adjoint.Value;
- var op = tf._op_def_lib._apply_op_helper("MatrixSolve", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixSolve", name: name, keywords: dict);
return op.output;
}
@@ -16315,7 +16315,7 @@ namespace Tensorflow.Operations
dict["l2_regularizer"] = l2_regularizer;
if (fast.HasValue)
dict["fast"] = fast.Value;
- var op = tf._op_def_lib._apply_op_helper("MatrixSolveLs", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixSolveLs", name: name, keywords: dict);
return op.output;
}
@@ -16373,7 +16373,7 @@ namespace Tensorflow.Operations
dict["lower"] = lower.Value;
if (adjoint.HasValue)
dict["adjoint"] = adjoint.Value;
- var op = tf._op_def_lib._apply_op_helper("MatrixTriangularSolve", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MatrixTriangularSolve", name: name, keywords: dict);
return op.output;
}
@@ -16410,7 +16410,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = tf._op_def_lib._apply_op_helper("Max", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Max", name: name, keywords: dict);
return op.output;
}
@@ -16456,7 +16456,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPool", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPool", name: name, keywords: dict);
return op.output;
}
@@ -16503,7 +16503,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPool3D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPool3D", name: name, keywords: dict);
return op.output;
}
@@ -16557,7 +16557,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPool3DGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPool3DGrad", name: name, keywords: dict);
return op.output;
}
@@ -16612,7 +16612,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPool3DGradGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPool3DGradGrad", name: name, keywords: dict);
return op.output;
}
@@ -16666,7 +16666,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPoolGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPoolGrad", name: name, keywords: dict);
return op.output;
}
@@ -16720,7 +16720,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPoolGradGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPoolGradGrad", name: name, keywords: dict);
return op.output;
}
@@ -16772,7 +16772,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPoolGradGradV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPoolGradGradV2", name: name, keywords: dict);
return op.output;
}
@@ -16818,7 +16818,7 @@ namespace Tensorflow.Operations
dict["ksize"] = ksize;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("MaxPoolGradGradWithArgmax", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPoolGradGradWithArgmax", name: name, keywords: dict);
return op.output;
}
@@ -16870,7 +16870,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPoolGradV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPoolGradV2", name: name, keywords: dict);
return op.output;
}
@@ -16916,7 +16916,7 @@ namespace Tensorflow.Operations
dict["ksize"] = ksize;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("MaxPoolGradWithArgmax", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPoolGradWithArgmax", name: name, keywords: dict);
return op.output;
}
@@ -16960,7 +16960,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = tf._op_def_lib._apply_op_helper("MaxPoolV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPoolV2", name: name, keywords: dict);
return op.output;
}
@@ -17013,7 +17013,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (Targmax.HasValue)
dict["Targmax"] = Targmax.Value;
- var op = tf._op_def_lib._apply_op_helper("MaxPoolWithArgmax", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MaxPoolWithArgmax", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var argmax = op.outputs[_idx++];
@@ -17042,7 +17042,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Maximum", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Maximum", name: name, keywords: dict);
return op.output;
}
@@ -17079,7 +17079,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = tf._op_def_lib._apply_op_helper("Mean", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Mean", name: name, keywords: dict);
return op.output;
}
@@ -17109,7 +17109,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = tf._op_def_lib._apply_op_helper("Merge", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Merge", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var value_index = op.outputs[_idx++];
@@ -17143,7 +17143,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = tf._op_def_lib._apply_op_helper("MergeSummary", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MergeSummary", name: name, keywords: dict);
return op.output;
}
@@ -17183,7 +17183,7 @@ namespace Tensorflow.Operations
dict["destination_prefix"] = destination_prefix;
if (delete_old_dirs.HasValue)
dict["delete_old_dirs"] = delete_old_dirs.Value;
- var op = tf._op_def_lib._apply_op_helper("MergeV2Checkpoints", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MergeV2Checkpoints", name: name, keywords: dict);
return op;
}
@@ -17238,7 +17238,7 @@ namespace Tensorflow.Operations
dict["filterbank_channel_count"] = filterbank_channel_count.Value;
if (dct_coefficient_count.HasValue)
dict["dct_coefficient_count"] = dct_coefficient_count.Value;
- var op = tf._op_def_lib._apply_op_helper("Mfcc", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Mfcc", name: name, keywords: dict);
return op.output;
}
@@ -17275,7 +17275,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = tf._op_def_lib._apply_op_helper("Min", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Min", name: name, keywords: dict);
return op.output;
}
@@ -17301,7 +17301,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Minimum", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Minimum", name: name, keywords: dict);
return op.output;
}
@@ -17363,7 +17363,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["paddings"] = paddings;
dict["mode"] = mode;
- var op = tf._op_def_lib._apply_op_helper("MirrorPad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MirrorPad", name: name, keywords: dict);
return op.output;
}
@@ -17414,7 +17414,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["paddings"] = paddings;
dict["mode"] = mode;
- var op = tf._op_def_lib._apply_op_helper("MirrorPadGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MirrorPadGrad", name: name, keywords: dict);
return op.output;
}
@@ -17443,7 +17443,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Mod", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Mod", name: name, keywords: dict);
return op.output;
}
@@ -17469,7 +17469,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Mul", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Mul", name: name, keywords: dict);
return op.output;
}
@@ -17511,7 +17511,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2.Value;
if (output_dtype.HasValue)
dict["output_dtype"] = output_dtype.Value;
- var op = tf._op_def_lib._apply_op_helper("Multinomial", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Multinomial", name: name, keywords: dict);
return op.output;
}
@@ -17579,7 +17579,7 @@ namespace Tensorflow.Operations
dict["initial_num_buckets"] = initial_num_buckets.Value;
if (max_load_factor.HasValue)
dict["max_load_factor"] = max_load_factor.Value;
- var op = tf._op_def_lib._apply_op_helper("MutableDenseHashTable", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MutableDenseHashTable", name: name, keywords: dict);
return op.output;
}
@@ -17647,7 +17647,7 @@ namespace Tensorflow.Operations
dict["initial_num_buckets"] = initial_num_buckets.Value;
if (max_load_factor.HasValue)
dict["max_load_factor"] = max_load_factor.Value;
- var op = tf._op_def_lib._apply_op_helper("MutableDenseHashTableV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MutableDenseHashTableV2", name: name, keywords: dict);
return op.output;
}
@@ -17697,7 +17697,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (use_node_name_sharing.HasValue)
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
- var op = tf._op_def_lib._apply_op_helper("MutableHashTable", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MutableHashTable", name: name, keywords: dict);
return op.output;
}
@@ -17749,7 +17749,7 @@ namespace Tensorflow.Operations
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
if (value_shape != null)
dict["value_shape"] = value_shape;
- var op = tf._op_def_lib._apply_op_helper("MutableHashTableOfTensors", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MutableHashTableOfTensors", name: name, keywords: dict);
return op.output;
}
@@ -17801,7 +17801,7 @@ namespace Tensorflow.Operations
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
if (value_shape != null)
dict["value_shape"] = value_shape;
- var op = tf._op_def_lib._apply_op_helper("MutableHashTableOfTensorsV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MutableHashTableOfTensorsV2", name: name, keywords: dict);
return op.output;
}
@@ -17851,7 +17851,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (use_node_name_sharing.HasValue)
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
- var op = tf._op_def_lib._apply_op_helper("MutableHashTableV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MutableHashTableV2", name: name, keywords: dict);
return op.output;
}
@@ -17914,7 +17914,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["mutex"] = mutex;
- var op = tf._op_def_lib._apply_op_helper("MutexLock", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MutexLock", name: name, keywords: dict);
return op.output;
}
@@ -17943,7 +17943,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("MutexV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("MutexV2", name: name, keywords: dict);
return op.output;
}
@@ -17965,7 +17965,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Neg", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Neg", name: name, keywords: dict);
return op.output;
}
@@ -18010,7 +18010,7 @@ namespace Tensorflow.Operations
dict["lr"] = lr;
dict["vocab_count"] = vocab_count;
dict["num_negative_samples"] = num_negative_samples;
- var op = tf._op_def_lib._apply_op_helper("NegTrain", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NegTrain", name: name, keywords: dict);
return op;
}
@@ -18031,7 +18031,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["data"] = data;
- var op = tf._op_def_lib._apply_op_helper("NextIteration", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NextIteration", name: name, keywords: dict);
return op.output;
}
@@ -18047,7 +18047,7 @@ namespace Tensorflow.Operations
public static Operation no_op (string name = "NoOp")
{
var dict = new Dictionary();
- var op = tf._op_def_lib._apply_op_helper("NoOp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NoOp", name: name, keywords: dict);
return op;
}
@@ -18103,7 +18103,7 @@ namespace Tensorflow.Operations
dict["max_output_size"] = max_output_size;
if (iou_threshold.HasValue)
dict["iou_threshold"] = iou_threshold.Value;
- var op = tf._op_def_lib._apply_op_helper("NonMaxSuppression", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NonMaxSuppression", name: name, keywords: dict);
return op.output;
}
@@ -18160,7 +18160,7 @@ namespace Tensorflow.Operations
dict["scores"] = scores;
dict["max_output_size"] = max_output_size;
dict["iou_threshold"] = iou_threshold;
- var op = tf._op_def_lib._apply_op_helper("NonMaxSuppressionV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NonMaxSuppressionV2", name: name, keywords: dict);
return op.output;
}
@@ -18221,7 +18221,7 @@ namespace Tensorflow.Operations
dict["max_output_size"] = max_output_size;
dict["iou_threshold"] = iou_threshold;
dict["score_threshold"] = score_threshold;
- var op = tf._op_def_lib._apply_op_helper("NonMaxSuppressionV3", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NonMaxSuppressionV3", name: name, keywords: dict);
return op.output;
}
@@ -18291,7 +18291,7 @@ namespace Tensorflow.Operations
dict["score_threshold"] = score_threshold;
if (pad_to_max_output_size.HasValue)
dict["pad_to_max_output_size"] = pad_to_max_output_size.Value;
- var op = tf._op_def_lib._apply_op_helper("NonMaxSuppressionV4", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NonMaxSuppressionV4", name: name, keywords: dict);
int _idx = 0;
var selected_indices = op.outputs[_idx++];
var valid_outputs = op.outputs[_idx++];
@@ -18353,7 +18353,7 @@ namespace Tensorflow.Operations
dict["max_output_size"] = max_output_size;
dict["overlap_threshold"] = overlap_threshold;
dict["score_threshold"] = score_threshold;
- var op = tf._op_def_lib._apply_op_helper("NonMaxSuppressionWithOverlaps", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NonMaxSuppressionWithOverlaps", name: name, keywords: dict);
return op.output;
}
@@ -18379,7 +18379,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("NotEqual", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NotEqual", name: name, keywords: dict);
return op.output;
}
@@ -18420,7 +18420,7 @@ namespace Tensorflow.Operations
dict["n"] = n;
if (reverse.HasValue)
dict["reverse"] = reverse.Value;
- var op = tf._op_def_lib._apply_op_helper("NthElement", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("NthElement", name: name, keywords: dict);
return op.output;
}
@@ -18552,7 +18552,7 @@ namespace Tensorflow.Operations
dict["off_value"] = off_value;
if (axis.HasValue)
dict["axis"] = axis.Value;
- var op = tf._op_def_lib._apply_op_helper("OneHot", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OneHot", name: name, keywords: dict);
return op.output;
}
@@ -18573,7 +18573,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("OnesLike", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OnesLike", name: name, keywords: dict);
return op.output;
}
@@ -18608,7 +18608,7 @@ namespace Tensorflow.Operations
dict["optimizations"] = optimizations;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("OptimizeDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OptimizeDataset", name: name, keywords: dict);
return op.output;
}
@@ -18627,7 +18627,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["components"] = components;
- var op = tf._op_def_lib._apply_op_helper("OptionalFromValue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OptionalFromValue", name: name, keywords: dict);
return op.output;
}
@@ -18654,7 +18654,7 @@ namespace Tensorflow.Operations
dict["optional"] = optional;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("OptionalGetValue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OptionalGetValue", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -18675,7 +18675,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["optional"] = optional;
- var op = tf._op_def_lib._apply_op_helper("OptionalHasValue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OptionalHasValue", name: name, keywords: dict);
return op.output;
}
@@ -18691,7 +18691,7 @@ namespace Tensorflow.Operations
public static Tensor optional_none (string name = "OptionalNone")
{
var dict = new Dictionary();
- var op = tf._op_def_lib._apply_op_helper("OptionalNone", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OptionalNone", name: name, keywords: dict);
return op.output;
}
@@ -18727,7 +18727,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("OrderedMapClear", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OrderedMapClear", name: name, keywords: dict);
return op;
}
@@ -18763,7 +18763,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("OrderedMapIncompleteSize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OrderedMapIncompleteSize", name: name, keywords: dict);
return op.output;
}
@@ -18810,7 +18810,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("OrderedMapPeek", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OrderedMapPeek", name: name, keywords: dict);
int _idx = 0;
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
return (values);
@@ -18848,7 +18848,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("OrderedMapSize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OrderedMapSize", name: name, keywords: dict);
return op.output;
}
@@ -18904,7 +18904,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("OrderedMapStage", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OrderedMapStage", name: name, keywords: dict);
return op;
}
@@ -18950,7 +18950,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("OrderedMapUnstage", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OrderedMapUnstage", name: name, keywords: dict);
int _idx = 0;
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
return (values);
@@ -18998,7 +18998,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("OrderedMapUnstageNoKey", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OrderedMapUnstageNoKey", name: name, keywords: dict);
int _idx = 0;
var key = op.outputs[_idx++];
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -19038,7 +19038,7 @@ namespace Tensorflow.Operations
dict["shape"] = shape;
if (device_ordinal.HasValue)
dict["device_ordinal"] = device_ordinal.Value;
- var op = tf._op_def_lib._apply_op_helper("OutfeedDequeue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OutfeedDequeue", name: name, keywords: dict);
return op.output;
}
@@ -19076,7 +19076,7 @@ namespace Tensorflow.Operations
dict["shapes"] = shapes;
if (device_ordinal.HasValue)
dict["device_ordinal"] = device_ordinal.Value;
- var op = tf._op_def_lib._apply_op_helper("OutfeedDequeueTuple", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OutfeedDequeueTuple", name: name, keywords: dict);
int _idx = 0;
var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray();
return (outputs);
@@ -19098,7 +19098,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("OutfeedEnqueue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OutfeedEnqueue", name: name, keywords: dict);
return op;
}
@@ -19119,7 +19119,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = tf._op_def_lib._apply_op_helper("OutfeedEnqueueTuple", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("OutfeedEnqueueTuple", name: name, keywords: dict);
return op;
}
@@ -19167,7 +19167,7 @@ namespace Tensorflow.Operations
dict["values"] = values;
if (axis.HasValue)
dict["axis"] = axis.Value;
- var op = tf._op_def_lib._apply_op_helper("Pack", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Pack", name: name, keywords: dict);
return op.output;
}
@@ -19214,7 +19214,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["paddings"] = paddings;
- var op = tf._op_def_lib._apply_op_helper("Pad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Pad", name: name, keywords: dict);
return op.output;
}
@@ -19265,7 +19265,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["paddings"] = paddings;
dict["constant_values"] = constant_values;
- var op = tf._op_def_lib._apply_op_helper("PadV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PadV2", name: name, keywords: dict);
return op.output;
}
@@ -19305,7 +19305,7 @@ namespace Tensorflow.Operations
dict["padded_shapes"] = padded_shapes;
dict["padding_values"] = padding_values;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("PaddedBatchDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PaddedBatchDataset", name: name, keywords: dict);
return op.output;
}
@@ -19350,7 +19350,7 @@ namespace Tensorflow.Operations
dict["padding_values"] = padding_values;
dict["drop_remainder"] = drop_remainder;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("PaddedBatchDatasetV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PaddedBatchDatasetV2", name: name, keywords: dict);
return op.output;
}
@@ -19407,7 +19407,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("PaddingFIFOQueue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PaddingFIFOQueue", name: name, keywords: dict);
return op.output;
}
@@ -19464,7 +19464,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("PaddingFIFOQueueV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PaddingFIFOQueueV2", name: name, keywords: dict);
return op.output;
}
@@ -19510,7 +19510,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["values"] = values;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("ParallelConcat", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParallelConcat", name: name, keywords: dict);
return op.output;
}
@@ -19595,7 +19595,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["indices"] = indices;
dict["data"] = data;
- var op = tf._op_def_lib._apply_op_helper("ParallelDynamicStitch", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParallelDynamicStitch", name: name, keywords: dict);
return op.output;
}
@@ -19650,7 +19650,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("ParameterizedTruncatedNormal", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParameterizedTruncatedNormal", name: name, keywords: dict);
return op.output;
}
@@ -19733,7 +19733,7 @@ namespace Tensorflow.Operations
dict["dense_defaults"] = dense_defaults;
dict["sparse_types"] = sparse_types;
dict["dense_shapes"] = dense_shapes;
- var op = tf._op_def_lib._apply_op_helper("ParseExample", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParseExample", name: name, keywords: dict);
int _idx = 0;
var sparse_indices = Enumerable.Range(0, op.OutputListLength("sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var sparse_values = Enumerable.Range(0, op.OutputListLength("sparse_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -19807,7 +19807,7 @@ namespace Tensorflow.Operations
dict["dense_shapes"] = dense_shapes;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("ParseExampleDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParseExampleDataset", name: name, keywords: dict);
return op.output;
}
@@ -19946,7 +19946,7 @@ namespace Tensorflow.Operations
dict["feature_list_sparse_types"] = feature_list_sparse_types;
if (feature_list_dense_shapes != null)
dict["feature_list_dense_shapes"] = feature_list_dense_shapes;
- var op = tf._op_def_lib._apply_op_helper("ParseSequenceExample", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParseSequenceExample", name: name, keywords: dict);
int _idx = 0;
var context_sparse_indices = Enumerable.Range(0, op.OutputListLength("context_sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var context_sparse_values = Enumerable.Range(0, op.OutputListLength("context_sparse_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -20033,7 +20033,7 @@ namespace Tensorflow.Operations
dict["dense_keys"] = dense_keys;
dict["sparse_types"] = sparse_types;
dict["dense_shapes"] = dense_shapes;
- var op = tf._op_def_lib._apply_op_helper("ParseSingleExample", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParseSingleExample", name: name, keywords: dict);
int _idx = 0;
var sparse_indices = Enumerable.Range(0, op.OutputListLength("sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var sparse_values = Enumerable.Range(0, op.OutputListLength("sparse_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -20155,7 +20155,7 @@ namespace Tensorflow.Operations
dict["feature_list_sparse_types"] = feature_list_sparse_types;
if (feature_list_dense_shapes != null)
dict["feature_list_dense_shapes"] = feature_list_dense_shapes;
- var op = tf._op_def_lib._apply_op_helper("ParseSingleSequenceExample", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParseSingleSequenceExample", name: name, keywords: dict);
int _idx = 0;
var context_sparse_indices = Enumerable.Range(0, op.OutputListLength("context_sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var context_sparse_values = Enumerable.Range(0, op.OutputListLength("context_sparse_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -20191,7 +20191,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["serialized"] = serialized;
dict["out_type"] = out_type;
- var op = tf._op_def_lib._apply_op_helper("ParseTensor", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ParseTensor", name: name, keywords: dict);
return op.output;
}
@@ -20224,7 +20224,7 @@ namespace Tensorflow.Operations
dict["dtype"] = dtype;
if (shape != null)
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("Placeholder", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Placeholder", name: name, keywords: dict);
return op.output;
}
@@ -20257,7 +20257,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dtype"] = dtype;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("PlaceholderV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PlaceholderV2", name: name, keywords: dict);
return op.output;
}
@@ -20283,7 +20283,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["shape"] = shape;
- var op = tf._op_def_lib._apply_op_helper("PlaceholderWithDefault", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name: name, keywords: dict);
return op.output;
}
@@ -20313,7 +20313,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("Polygamma", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Polygamma", name: name, keywords: dict);
return op.output;
}
@@ -20340,7 +20340,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = tf._op_def_lib._apply_op_helper("PopulationCount", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PopulationCount", name: name, keywords: dict);
return op.output;
}
@@ -20372,7 +20372,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = tf._op_def_lib._apply_op_helper("Pow", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Pow", name: name, keywords: dict);
return op.output;
}
@@ -20404,7 +20404,7 @@ namespace Tensorflow.Operations
dict["buffer_size"] = buffer_size;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("PrefetchDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PrefetchDataset", name: name, keywords: dict);
return op.output;
}
@@ -20440,7 +20440,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (message != null)
dict["message"] = message;
- var op = tf._op_def_lib._apply_op_helper("PreventGradient", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PreventGradient", name: name, keywords: dict);
return op.output;
}
@@ -20483,7 +20483,7 @@ namespace Tensorflow.Operations
dict["first_n"] = first_n.Value;
if (summarize.HasValue)
dict["summarize"] = summarize.Value;
- var op = tf._op_def_lib._apply_op_helper("Print", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Print", name: name, keywords: dict);
return op.output;
}
@@ -20538,7 +20538,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("PriorityQueue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PriorityQueue", name: name, keywords: dict);
return op.output;
}
@@ -20593,7 +20593,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("PriorityQueueV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("PriorityQueueV2", name: name, keywords: dict);
return op.output;
}
@@ -20630,7 +20630,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = tf._op_def_lib._apply_op_helper("Prod", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Prod", name: name, keywords: dict);
return op.output;
}
@@ -20675,7 +20675,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (full_matrices.HasValue)
dict["full_matrices"] = full_matrices.Value;
- var op = tf._op_def_lib._apply_op_helper("Qr", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Qr", name: name, keywords: dict);
int _idx = 0;
var q = op.outputs[_idx++];
var r = op.outputs[_idx++];
@@ -20717,7 +20717,7 @@ namespace Tensorflow.Operations
dict["input_min"] = input_min.Value;
if (input_max.HasValue)
dict["input_max"] = input_max.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizeAndDequantize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantize", name: name, keywords: dict);
return op.output;
}
@@ -20817,7 +20817,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (range_given.HasValue)
dict["range_given"] = range_given.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizeAndDequantizeV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV2", name: name, keywords: dict);
return op.output;
}
@@ -20857,7 +20857,7 @@ namespace Tensorflow.Operations
dict["signed_input"] = signed_input.Value;
if (range_given.HasValue)
dict["range_given"] = range_given.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizeAndDequantizeV3", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV3", name: name, keywords: dict);
return op.output;
}
@@ -20917,7 +20917,7 @@ namespace Tensorflow.Operations
dict["input_min"] = input_min;
dict["input_max"] = input_max;
dict["out_type"] = out_type;
- var op = tf._op_def_lib._apply_op_helper("QuantizeDownAndShrinkRange", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizeDownAndShrinkRange", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_min = op.outputs[_idx++];
@@ -21064,7 +21064,7 @@ namespace Tensorflow.Operations
dict["mode"] = mode;
if (round_mode != null)
dict["round_mode"] = round_mode;
- var op = tf._op_def_lib._apply_op_helper("QuantizeV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizeV2", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_min = op.outputs[_idx++];
@@ -21117,7 +21117,7 @@ namespace Tensorflow.Operations
dict["max_y"] = max_y;
if (Toutput.HasValue)
dict["Toutput"] = Toutput.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizedAdd", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedAdd", name: name, keywords: dict);
int _idx = 0;
var z = op.outputs[_idx++];
var min_z = op.outputs[_idx++];
@@ -21170,7 +21170,7 @@ namespace Tensorflow.Operations
dict["ksize"] = ksize;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("QuantizedAvgPool", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedAvgPool", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_output = op.outputs[_idx++];
@@ -21280,7 +21280,7 @@ namespace Tensorflow.Operations
dict["out_type"] = out_type;
dict["variance_epsilon"] = variance_epsilon;
dict["scale_after_normalization"] = scale_after_normalization;
- var op = tf._op_def_lib._apply_op_helper("QuantizedBatchNormWithGlobalNormalization", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedBatchNormWithGlobalNormalization", name: name, keywords: dict);
int _idx = 0;
var result = op.outputs[_idx++];
var result_min = op.outputs[_idx++];
@@ -21334,7 +21334,7 @@ namespace Tensorflow.Operations
dict["min_bias"] = min_bias;
dict["max_bias"] = max_bias;
dict["out_type"] = out_type;
- var op = tf._op_def_lib._apply_op_helper("QuantizedBiasAdd", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedBiasAdd", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_out = op.outputs[_idx++];
@@ -21378,7 +21378,7 @@ namespace Tensorflow.Operations
dict["values"] = values;
dict["input_mins"] = input_mins;
dict["input_maxes"] = input_maxes;
- var op = tf._op_def_lib._apply_op_helper("QuantizedConcat", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedConcat", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_min = op.outputs[_idx++];
@@ -21455,7 +21455,7 @@ namespace Tensorflow.Operations
dict["out_type"] = out_type.Value;
if (dilations != null)
dict["dilations"] = dilations;
- var op = tf._op_def_lib._apply_op_helper("QuantizedConv2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedConv2D", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_output = op.outputs[_idx++];
@@ -21518,7 +21518,7 @@ namespace Tensorflow.Operations
dict["variance_epsilon"] = variance_epsilon.Value;
if (min_separation.HasValue)
dict["min_separation"] = min_separation.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizedInstanceNorm", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedInstanceNorm", name: name, keywords: dict);
int _idx = 0;
var y = op.outputs[_idx++];
var y_min = op.outputs[_idx++];
@@ -21592,7 +21592,7 @@ namespace Tensorflow.Operations
dict["transpose_b"] = transpose_b.Value;
if (Tactivation.HasValue)
dict["Tactivation"] = Tactivation.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizedMatMul", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedMatMul", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_out = op.outputs[_idx++];
@@ -21645,7 +21645,7 @@ namespace Tensorflow.Operations
dict["ksize"] = ksize;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = tf._op_def_lib._apply_op_helper("QuantizedMaxPool", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedMaxPool", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_output = op.outputs[_idx++];
@@ -21698,7 +21698,7 @@ namespace Tensorflow.Operations
dict["max_y"] = max_y;
if (Toutput.HasValue)
dict["Toutput"] = Toutput.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizedMul", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedMul", name: name, keywords: dict);
int _idx = 0;
var z = op.outputs[_idx++];
var min_z = op.outputs[_idx++];
@@ -21737,7 +21737,7 @@ namespace Tensorflow.Operations
dict["max_features"] = max_features;
if (out_type.HasValue)
dict["out_type"] = out_type.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizedRelu", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedRelu", name: name, keywords: dict);
int _idx = 0;
var activations = op.outputs[_idx++];
var min_activations = op.outputs[_idx++];
@@ -21776,7 +21776,7 @@ namespace Tensorflow.Operations
dict["max_features"] = max_features;
if (out_type.HasValue)
dict["out_type"] = out_type.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizedRelu6", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedRelu6", name: name, keywords: dict);
int _idx = 0;
var activations = op.outputs[_idx++];
var min_activations = op.outputs[_idx++];
@@ -21818,7 +21818,7 @@ namespace Tensorflow.Operations
dict["max_features"] = max_features;
if (out_type.HasValue)
dict["out_type"] = out_type.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizedReluX", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedReluX", name: name, keywords: dict);
int _idx = 0;
var activations = op.outputs[_idx++];
var min_activations = op.outputs[_idx++];
@@ -21859,7 +21859,7 @@ namespace Tensorflow.Operations
dict["shape"] = shape;
dict["input_min"] = input_min;
dict["input_max"] = input_max;
- var op = tf._op_def_lib._apply_op_helper("QuantizedReshape", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedReshape", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_min = op.outputs[_idx++];
@@ -21908,7 +21908,7 @@ namespace Tensorflow.Operations
dict["max"] = max;
if (align_corners.HasValue)
dict["align_corners"] = align_corners.Value;
- var op = tf._op_def_lib._apply_op_helper("QuantizedResizeBilinear", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QuantizedResizeBilinear", name: name, keywords: dict);
int _idx = 0;
var resized_images = op.outputs[_idx++];
var out_min = op.outputs[_idx++];
@@ -21945,7 +21945,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
if (cancel_pending_enqueues.HasValue)
dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueClose", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueClose", name: name, keywords: dict);
return op;
}
@@ -21978,7 +21978,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
if (cancel_pending_enqueues.HasValue)
dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueCloseV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueCloseV2", name: name, keywords: dict);
return op;
}
@@ -22019,7 +22019,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueDequeue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueDequeue", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22073,7 +22073,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueDequeueMany", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueDequeueMany", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22127,7 +22127,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueDequeueManyV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueDequeueManyV2", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22185,7 +22185,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueDequeueUpTo", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueDequeueUpTo", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22243,7 +22243,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueDequeueUpToV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueDequeueUpToV2", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22286,7 +22286,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueDequeueV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueDequeueV2", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22326,7 +22326,7 @@ namespace Tensorflow.Operations
dict["components"] = components;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueEnqueue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueEnqueue", name: name, keywords: dict);
return op;
}
@@ -22369,7 +22369,7 @@ namespace Tensorflow.Operations
dict["components"] = components;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueEnqueueMany", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueEnqueueMany", name: name, keywords: dict);
return op;
}
@@ -22412,7 +22412,7 @@ namespace Tensorflow.Operations
dict["components"] = components;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueEnqueueManyV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueEnqueueManyV2", name: name, keywords: dict);
return op;
}
@@ -22450,7 +22450,7 @@ namespace Tensorflow.Operations
dict["components"] = components;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = tf._op_def_lib._apply_op_helper("QueueEnqueueV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueEnqueueV2", name: name, keywords: dict);
return op;
}
@@ -22474,7 +22474,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = tf._op_def_lib._apply_op_helper("QueueIsClosed", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueIsClosed", name: name, keywords: dict);
return op.output;
}
@@ -22498,7 +22498,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = tf._op_def_lib._apply_op_helper("QueueIsClosedV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueIsClosedV2", name: name, keywords: dict);
return op.output;
}
@@ -22519,7 +22519,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = tf._op_def_lib._apply_op_helper("QueueSize", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueSize", name: name, keywords: dict);
return op.output;
}
@@ -22540,7 +22540,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = tf._op_def_lib._apply_op_helper("QueueSizeV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("QueueSizeV2", name: name, keywords: dict);
return op.output;
}
@@ -22583,7 +22583,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = tf._op_def_lib._apply_op_helper("RFFT", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RFFT", name: name, keywords: dict);
return op.output;
}
@@ -22628,7 +22628,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = tf._op_def_lib._apply_op_helper("RFFT2D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RFFT2D", name: name, keywords: dict);
return op.output;
}
@@ -22673,7 +22673,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = tf._op_def_lib._apply_op_helper("RFFT3D", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RFFT3D", name: name, keywords: dict);
return op.output;
}
@@ -22703,7 +22703,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["images"] = images;
- var op = tf._op_def_lib._apply_op_helper("RGBToHSV", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RGBToHSV", name: name, keywords: dict);
return op.output;
}
@@ -22748,7 +22748,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("RandomCrop", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomCrop", name: name, keywords: dict);
return op.output;
}
@@ -22782,7 +22782,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("RandomDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomDataset", name: name, keywords: dict);
return op.output;
}
@@ -22828,7 +22828,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("RandomGamma", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomGamma", name: name, keywords: dict);
return op.output;
}
@@ -22850,7 +22850,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["alpha"] = alpha;
dict["sample"] = sample;
- var op = tf._op_def_lib._apply_op_helper("RandomGammaGrad", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomGammaGrad", name: name, keywords: dict);
return op.output;
}
@@ -22880,7 +22880,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("RandomPoisson", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomPoisson", name: name, keywords: dict);
return op.output;
}
@@ -22936,7 +22936,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2.Value;
if (dtype.HasValue)
dict["dtype"] = dtype.Value;
- var op = tf._op_def_lib._apply_op_helper("RandomPoissonV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomPoissonV2", name: name, keywords: dict);
return op.output;
}
@@ -22981,7 +22981,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("RandomShuffle", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomShuffle", name: name, keywords: dict);
return op.output;
}
@@ -23047,7 +23047,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("RandomShuffleQueue", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomShuffleQueue", name: name, keywords: dict);
return op.output;
}
@@ -23113,7 +23113,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = tf._op_def_lib._apply_op_helper("RandomShuffleQueueV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomShuffleQueueV2", name: name, keywords: dict);
return op.output;
}
@@ -23154,7 +23154,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("RandomStandardNormal", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomStandardNormal", name: name, keywords: dict);
return op.output;
}
@@ -23196,7 +23196,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("RandomUniform", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomUniform", name: name, keywords: dict);
return op.output;
}
@@ -23246,7 +23246,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = tf._op_def_lib._apply_op_helper("RandomUniformInt", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RandomUniformInt", name: name, keywords: dict);
return op.output;
}
@@ -23288,7 +23288,7 @@ namespace Tensorflow.Operations
dict["start"] = start;
dict["limit"] = limit;
dict["delta"] = delta;
- var op = tf._op_def_lib._apply_op_helper("Range", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Range", name: name, keywords: dict);
return op.output;
}
@@ -23324,7 +23324,7 @@ namespace Tensorflow.Operations
dict["step"] = step;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = tf._op_def_lib._apply_op_helper("RangeDataset", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("RangeDataset", name: name, keywords: dict);
return op.output;
}
@@ -23358,7 +23358,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = tf._op_def_lib._apply_op_helper("Rank", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("Rank", name: name, keywords: dict);
return op.output;
}
@@ -23377,7 +23377,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["filename"] = filename;
- var op = tf._op_def_lib._apply_op_helper("ReadFile", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReadFile", name: name, keywords: dict);
return op.output;
}
@@ -23410,7 +23410,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource"] = resource;
dict["dtype"] = dtype;
- var op = tf._op_def_lib._apply_op_helper("ReadVariableOp", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReadVariableOp", name: name, keywords: dict);
return op.output;
}
@@ -23434,7 +23434,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = tf._op_def_lib._apply_op_helper("ReaderNumRecordsProduced", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReaderNumRecordsProduced", name: name, keywords: dict);
return op.output;
}
@@ -23458,7 +23458,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = tf._op_def_lib._apply_op_helper("ReaderNumRecordsProducedV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReaderNumRecordsProducedV2", name: name, keywords: dict);
return op.output;
}
@@ -23478,7 +23478,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = tf._op_def_lib._apply_op_helper("ReaderNumWorkUnitsCompleted", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReaderNumWorkUnitsCompleted", name: name, keywords: dict);
return op.output;
}
@@ -23498,7 +23498,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = tf._op_def_lib._apply_op_helper("ReaderNumWorkUnitsCompletedV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReaderNumWorkUnitsCompletedV2", name: name, keywords: dict);
return op.output;
}
@@ -23530,7 +23530,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
dict["queue_handle"] = queue_handle;
- var op = tf._op_def_lib._apply_op_helper("ReaderRead", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReaderRead", name: name, keywords: dict);
int _idx = 0;
var key = op.outputs[_idx++];
var value = op.outputs[_idx++];
@@ -23570,7 +23570,7 @@ namespace Tensorflow.Operations
dict["reader_handle"] = reader_handle;
dict["queue_handle"] = queue_handle;
dict["num_records"] = num_records;
- var op = tf._op_def_lib._apply_op_helper("ReaderReadUpTo", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReaderReadUpTo", name: name, keywords: dict);
int _idx = 0;
var keys = op.outputs[_idx++];
var values = op.outputs[_idx++];
@@ -23610,7 +23610,7 @@ namespace Tensorflow.Operations
dict["reader_handle"] = reader_handle;
dict["queue_handle"] = queue_handle;
dict["num_records"] = num_records;
- var op = tf._op_def_lib._apply_op_helper("ReaderReadUpToV2", name: name, keywords: dict);
+ var op = tf.OpDefLib._apply_op_helper("ReaderReadUpToV2", name: name, keywords: dict);
int _idx = 0;
var keys = op.outputs[_idx++];
var values = op.outputs[_idx++];
@@ -23645,7 +23645,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary