// // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/protobuf/config.proto // #pragma warning disable 1591, 0612, 3021 #region Designer generated code using pb = global::Google.Protobuf; using pbc = global::Google.Protobuf.Collections; using pbr = global::Google.Protobuf.Reflection; using scg = global::System.Collections.Generic; namespace Tensorflow { /// Holder for reflection information generated from tensorflow/core/protobuf/config.proto public static partial class ConfigReflection { #region Descriptor /// File descriptor for tensorflow/core/protobuf/config.proto public static pbr::FileDescriptor Descriptor { get { return descriptor; } } private static pbr::FileDescriptor descriptor; static ConfigReflection() { byte[] descriptorData = global::System.Convert.FromBase64String( string.Concat( "CiV0ZW5zb3JmbG93L2NvcmUvcHJvdG9idWYvY29uZmlnLnByb3RvEgp0ZW5z", "b3JmbG93Gip0ZW5zb3JmbG93L2NvcmUvZnJhbWV3b3JrL2Nvc3RfZ3JhcGgu", "cHJvdG8aJXRlbnNvcmZsb3cvY29yZS9mcmFtZXdvcmsvZ3JhcGgucHJvdG8a", "KnRlbnNvcmZsb3cvY29yZS9mcmFtZXdvcmsvc3RlcF9zdGF0cy5wcm90bxom", "dGVuc29yZmxvdy9jb3JlL3Byb3RvYnVmL2NsdXN0ZXIucHJvdG8aJHRlbnNv", "cmZsb3cvY29yZS9wcm90b2J1Zi9kZWJ1Zy5wcm90bxoudGVuc29yZmxvdy9j", "b3JlL3Byb3RvYnVmL3Jld3JpdGVyX2NvbmZpZy5wcm90byK3BQoKR1BVT3B0", "aW9ucxInCh9wZXJfcHJvY2Vzc19ncHVfbWVtb3J5X2ZyYWN0aW9uGAEgASgB", "EhQKDGFsbG93X2dyb3d0aBgEIAEoCBIWCg5hbGxvY2F0b3JfdHlwZRgCIAEo", "CRIfChdkZWZlcnJlZF9kZWxldGlvbl9ieXRlcxgDIAEoAxIbChN2aXNpYmxl", "X2RldmljZV9saXN0GAUgASgJEiIKGnBvbGxpbmdfYWN0aXZlX2RlbGF5X3Vz", "ZWNzGAYgASgFEiQKHHBvbGxpbmdfaW5hY3RpdmVfZGVsYXlfbXNlY3MYByAB", "KAUSHAoUZm9yY2VfZ3B1X2NvbXBhdGlibGUYCCABKAgSOQoMZXhwZXJpbWVu", "dGFsGAkgASgLMiMudGVuc29yZmxvdy5HUFVPcHRpb25zLkV4cGVyaW1lbnRh", "bBrwAgoMRXhwZXJpbWVudGFsEksKD3ZpcnR1YWxfZGV2aWNlcxgBIAMoCzIy", "LnRlbnNvcmZsb3cuR1BVT3B0aW9ucy5FeHBlcmltZW50YWwuVmlydHVhbERl", "dmljZXMSGgoSdXNlX3VuaWZpZWRfbWVtb3J5GAIgASgIEiMKG251bV9kZXZf", "dG9fZGV2X2NvcHlfc3RyZWFtcxgDIAEoBRIdChVjb2xsZWN0aXZlX3Jpbmdf", "b3JkZXIYBCABKAkSHQoVdGltZXN0YW1wZWRfYWxsb2NhdG9yGAUgASgIEiMK", "G2tlcm5lbF90cmFja2VyX21heF9pbnRlcnZhbBgHIAEoBRIgChhrZXJuZWxf", "dHJhY2tlcl9tYXhfYnl0ZXMYCCABKAUSIgoaa2VybmVsX3RyYWNrZXJfbWF4", "X3BlbmRpbmcYCSABKAUaKQoOVmlydHVhbERldmljZXMSFwoPbWVtb3J5X2xp", "bWl0X21iGAEgAygCIoUDChBPcHRpbWl6ZXJPcHRpb25zEisKI2RvX2NvbW1v", "bl9zdWJleHByZXNzaW9uX2VsaW1pbmF0aW9uGAEgASgIEhsKE2RvX2NvbnN0", "YW50X2ZvbGRpbmcYAiABKAgSJAocbWF4X2ZvbGRlZF9jb25zdGFudF9pbl9i", "eXRlcxgGIAEoAxIcChRkb19mdW5jdGlvbl9pbmxpbmluZxgEIAEoCBI1Cglv", "cHRfbGV2ZWwYAyABKA4yIi50ZW5zb3JmbG93Lk9wdGltaXplck9wdGlvbnMu", "TGV2ZWwSRQoQZ2xvYmFsX2ppdF9sZXZlbBgFIAEoDjIrLnRlbnNvcmZsb3cu", "T3B0aW1pemVyT3B0aW9ucy5HbG9iYWxKaXRMZXZlbCIgCgVMZXZlbBIGCgJM", "MRAAEg8KAkwwEP///////////wEiQwoOR2xvYmFsSml0TGV2ZWwSCwoHREVG", "QVVMVBAAEhAKA09GRhD///////////8BEggKBE9OXzEQARIICgRPTl8yEAIi", "7gIKDEdyYXBoT3B0aW9ucxIeChZlbmFibGVfcmVjdl9zY2hlZHVsaW5nGAIg", "ASgIEjcKEW9wdGltaXplcl9vcHRpb25zGAMgASgLMhwudGVuc29yZmxvdy5P", "cHRpbWl6ZXJPcHRpb25zEhgKEGJ1aWxkX2Nvc3RfbW9kZWwYBCABKAMSHgoW", "YnVpbGRfY29zdF9tb2RlbF9hZnRlchgJIAEoAxIUCgxpbmZlcl9zaGFwZXMY", "BSABKAgSGgoScGxhY2VfcHJ1bmVkX2dyYXBoGAYgASgIEiAKGGVuYWJsZV9i", "ZmxvYXQxNl9zZW5kcmVjdhgHIAEoCBIVCg10aW1lbGluZV9zdGVwGAggASgF", "EjMKD3Jld3JpdGVfb3B0aW9ucxgKIAEoCzIaLnRlbnNvcmZsb3cuUmV3cml0", "ZXJDb25maWdKBAgBEAJSJXNraXBfY29tbW9uX3N1YmV4cHJlc3Npb25fZWxp", "bWluYXRpb24iQQoVVGhyZWFkUG9vbE9wdGlvblByb3RvEhMKC251bV90aHJl", "YWRzGAEgASgFEhMKC2dsb2JhbF9uYW1lGAIgASgJImwKClJQQ09wdGlvbnMS", "JAocdXNlX3JwY19mb3JfaW5wcm9jZXNzX21hc3RlchgBIAEoCBIdChVjb21w", "cmVzc2lvbl9hbGdvcml0aG0YAiABKAkSGQoRY29tcHJlc3Npb25fbGV2ZWwY", "AyABKAUisggKC0NvbmZpZ1Byb3RvEj4KDGRldmljZV9jb3VudBgBIAMoCzIo", "LnRlbnNvcmZsb3cuQ29uZmlnUHJvdG8uRGV2aWNlQ291bnRFbnRyeRIkChxp", "bnRyYV9vcF9wYXJhbGxlbGlzbV90aHJlYWRzGAIgASgFEiQKHGludGVyX29w", "X3BhcmFsbGVsaXNtX3RocmVhZHMYBSABKAUSHwoXdXNlX3Blcl9zZXNzaW9u", "X3RocmVhZHMYCSABKAgSRwocc2Vzc2lvbl9pbnRlcl9vcF90aHJlYWRfcG9v", "bBgMIAMoCzIhLnRlbnNvcmZsb3cuVGhyZWFkUG9vbE9wdGlvblByb3RvEhgK", "EHBsYWNlbWVudF9wZXJpb2QYAyABKAUSFgoOZGV2aWNlX2ZpbHRlcnMYBCAD", "KAkSKwoLZ3B1X29wdGlvbnMYBiABKAsyFi50ZW5zb3JmbG93LkdQVU9wdGlv", "bnMSHAoUYWxsb3dfc29mdF9wbGFjZW1lbnQYByABKAgSHAoUbG9nX2Rldmlj", "ZV9wbGFjZW1lbnQYCCABKAgSLwoNZ3JhcGhfb3B0aW9ucxgKIAEoCzIYLnRl", "bnNvcmZsb3cuR3JhcGhPcHRpb25zEh8KF29wZXJhdGlvbl90aW1lb3V0X2lu", "X21zGAsgASgDEisKC3JwY19vcHRpb25zGA0gASgLMhYudGVuc29yZmxvdy5S", "UENPcHRpb25zEisKC2NsdXN0ZXJfZGVmGA4gASgLMhYudGVuc29yZmxvdy5D", "bHVzdGVyRGVmEh0KFWlzb2xhdGVfc2Vzc2lvbl9zdGF0ZRgPIAEoCBI6Cgxl", "eHBlcmltZW50YWwYECABKAsyJC50ZW5zb3JmbG93LkNvbmZpZ1Byb3RvLkV4", "cGVyaW1lbnRhbBoyChBEZXZpY2VDb3VudEVudHJ5EgsKA2tleRgBIAEoCRIN", "CgV2YWx1ZRgCIAEoBToCOAEa1gIKDEV4cGVyaW1lbnRhbBIfChdjb2xsZWN0", "aXZlX2dyb3VwX2xlYWRlchgBIAEoCRIVCg1leGVjdXRvcl90eXBlGAMgASgJ", "EhoKEnJlY3ZfYnVmX21heF9jaHVuaxgEIAEoBRIZChF1c2VfbnVtYV9hZmZp", "bml0eRgFIAEoCBI1Ci1jb2xsZWN0aXZlX2RldGVybWluaXN0aWNfc2VxdWVu", "dGlhbF9leGVjdXRpb24YBiABKAgSFwoPY29sbGVjdGl2ZV9uY2NsGAcgASgI", "EjYKLnNoYXJlX3Nlc3Npb25fc3RhdGVfaW5fY2x1c3RlcnNwZWNfcHJvcGFn", "YXRpb24YCCABKAgSHwoXZGlzYWJsZV90aHJlYWRfc3Bpbm5pbmcYCSABKAgS", "KAogc2hhcmVfY2x1c3Rlcl9kZXZpY2VzX2luX3Nlc3Npb24YCiABKAhKBAgC", "EAMi2AMKClJ1bk9wdGlvbnMSNgoLdHJhY2VfbGV2ZWwYASABKA4yIS50ZW5z", "b3JmbG93LlJ1bk9wdGlvbnMuVHJhY2VMZXZlbBIVCg10aW1lb3V0X2luX21z", "GAIgASgDEhwKFGludGVyX29wX3RocmVhZF9wb29sGAMgASgFEh8KF291dHB1", "dF9wYXJ0aXRpb25fZ3JhcGhzGAUgASgIEi8KDWRlYnVnX29wdGlvbnMYBiAB", "KAsyGC50ZW5zb3JmbG93LkRlYnVnT3B0aW9ucxIqCiJyZXBvcnRfdGVuc29y", "X2FsbG9jYXRpb25zX3Vwb25fb29tGAcgASgIEjkKDGV4cGVyaW1lbnRhbBgI", "IAEoCzIjLnRlbnNvcmZsb3cuUnVuT3B0aW9ucy5FeHBlcmltZW50YWwaSgoM", "RXhwZXJpbWVudGFsEhwKFGNvbGxlY3RpdmVfZ3JhcGhfa2V5GAEgASgDEhwK", "FHVzZV9ydW5faGFuZGxlcl9wb29sGAIgASgIIlIKClRyYWNlTGV2ZWwSDAoI", "Tk9fVFJBQ0UQABISCg5TT0ZUV0FSRV9UUkFDRRABEhIKDkhBUkRXQVJFX1RS", "QUNFEAISDgoKRlVMTF9UUkFDRRADSgQIBBAFIocDCgtSdW5NZXRhZGF0YRIp", "CgpzdGVwX3N0YXRzGAEgASgLMhUudGVuc29yZmxvdy5TdGVwU3RhdHMSLAoK", "Y29zdF9ncmFwaBgCIAEoCzIYLnRlbnNvcmZsb3cuQ29zdEdyYXBoRGVmEi4K", "EHBhcnRpdGlvbl9ncmFwaHMYAyADKAsyFC50ZW5zb3JmbG93LkdyYXBoRGVm", "Ej8KD2Z1bmN0aW9uX2dyYXBocxgEIAMoCzImLnRlbnNvcmZsb3cuUnVuTWV0", "YWRhdGEuRnVuY3Rpb25HcmFwaHMarQEKDkZ1bmN0aW9uR3JhcGhzEi4KEHBh", "cnRpdGlvbl9ncmFwaHMYASADKAsyFC50ZW5zb3JmbG93LkdyYXBoRGVmEjQK", "FnByZV9vcHRpbWl6YXRpb25fZ3JhcGgYAiABKAsyFC50ZW5zb3JmbG93Lkdy", "YXBoRGVmEjUKF3Bvc3Rfb3B0aW1pemF0aW9uX2dyYXBoGAMgASgLMhQudGVu", "c29yZmxvdy5HcmFwaERlZiI6ChBUZW5zb3JDb25uZWN0aW9uEhMKC2Zyb21f", "dGVuc29yGAEgASgJEhEKCXRvX3RlbnNvchgCIAEoCSKwAwoPQ2FsbGFibGVP", "cHRpb25zEgwKBGZlZWQYASADKAkSDQoFZmV0Y2gYAiADKAkSDgoGdGFyZ2V0", "GAMgAygJEisKC3J1bl9vcHRpb25zGAQgASgLMhYudGVuc29yZmxvdy5SdW5P", "cHRpb25zEjcKEXRlbnNvcl9jb25uZWN0aW9uGAUgAygLMhwudGVuc29yZmxv", "dy5UZW5zb3JDb25uZWN0aW9uEkIKDGZlZWRfZGV2aWNlcxgGIAMoCzIsLnRl", "bnNvcmZsb3cuQ2FsbGFibGVPcHRpb25zLkZlZWREZXZpY2VzRW50cnkSRAoN", "ZmV0Y2hfZGV2aWNlcxgHIAMoCzItLnRlbnNvcmZsb3cuQ2FsbGFibGVPcHRp", "b25zLkZldGNoRGV2aWNlc0VudHJ5EhcKD2ZldGNoX3NraXBfc3luYxgIIAEo", "CBoyChBGZWVkRGV2aWNlc0VudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgC", "IAEoCToCOAEaMwoRRmV0Y2hEZXZpY2VzRW50cnkSCwoDa2V5GAEgASgJEg0K", "BXZhbHVlGAIgASgJOgI4AUItChhvcmcudGVuc29yZmxvdy5mcmFtZXdvcmtC", "DENvbmZpZ1Byb3Rvc1AB+AEBYgZwcm90bzM=")); descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData, new pbr::FileDescriptor[] { global::Tensorflow.CostGraphReflection.Descriptor, global::Tensorflow.GraphReflection.Descriptor, global::Tensorflow.StepStatsReflection.Descriptor, global::Tensorflow.ClusterReflection.Descriptor, global::Tensorflow.DebugReflection.Descriptor, global::Tensorflow.RewriterConfigReflection.Descriptor, }, new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] { new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.GPUOptions), global::Tensorflow.GPUOptions.Parser, new[]{ "PerProcessGpuMemoryFraction", "AllowGrowth", "AllocatorType", "DeferredDeletionBytes", "VisibleDeviceList", "PollingActiveDelayUsecs", "PollingInactiveDelayMsecs", "ForceGpuCompatible", "Experimental" }, null, null, new pbr::GeneratedClrTypeInfo[] { new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.GPUOptions.Types.Experimental), global::Tensorflow.GPUOptions.Types.Experimental.Parser, new[]{ "VirtualDevices", "UseUnifiedMemory", "NumDevToDevCopyStreams", "CollectiveRingOrder", "TimestampedAllocator", "KernelTrackerMaxInterval", "KernelTrackerMaxBytes", "KernelTrackerMaxPending" }, null, null, new pbr::GeneratedClrTypeInfo[] { new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.GPUOptions.Types.Experimental.Types.VirtualDevices), global::Tensorflow.GPUOptions.Types.Experimental.Types.VirtualDevices.Parser, new[]{ "MemoryLimitMb" }, null, null, null)})}), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.OptimizerOptions), global::Tensorflow.OptimizerOptions.Parser, new[]{ "DoCommonSubexpressionElimination", "DoConstantFolding", "MaxFoldedConstantInBytes", "DoFunctionInlining", "OptLevel", "GlobalJitLevel" }, null, new[]{ typeof(global::Tensorflow.OptimizerOptions.Types.Level), typeof(global::Tensorflow.OptimizerOptions.Types.GlobalJitLevel) }, null), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.GraphOptions), global::Tensorflow.GraphOptions.Parser, new[]{ "EnableRecvScheduling", "OptimizerOptions", "BuildCostModel", "BuildCostModelAfter", "InferShapes", "PlacePrunedGraph", "EnableBfloat16Sendrecv", "TimelineStep", "RewriteOptions" }, null, null, null), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.ThreadPoolOptionProto), global::Tensorflow.ThreadPoolOptionProto.Parser, new[]{ "NumThreads", "GlobalName" }, null, null, null), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.RPCOptions), global::Tensorflow.RPCOptions.Parser, new[]{ "UseRpcForInprocessMaster", "CompressionAlgorithm", "CompressionLevel" }, null, null, null), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.ConfigProto), global::Tensorflow.ConfigProto.Parser, new[]{ "DeviceCount", "IntraOpParallelismThreads", "InterOpParallelismThreads", "UsePerSessionThreads", "SessionInterOpThreadPool", "PlacementPeriod", "DeviceFilters", "GpuOptions", "AllowSoftPlacement", "LogDevicePlacement", "GraphOptions", "OperationTimeoutInMs", "RpcOptions", "ClusterDef", "IsolateSessionState", "Experimental" }, null, null, new pbr::GeneratedClrTypeInfo[] { null, new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.ConfigProto.Types.Experimental), global::Tensorflow.ConfigProto.Types.Experimental.Parser, new[]{ "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession" }, null, null, null)}), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.RunOptions), global::Tensorflow.RunOptions.Parser, new[]{ "TraceLevel", "TimeoutInMs", "InterOpThreadPool", "OutputPartitionGraphs", "DebugOptions", "ReportTensorAllocationsUponOom", "Experimental" }, null, new[]{ typeof(global::Tensorflow.RunOptions.Types.TraceLevel) }, new pbr::GeneratedClrTypeInfo[] { new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.RunOptions.Types.Experimental), global::Tensorflow.RunOptions.Types.Experimental.Parser, new[]{ "CollectiveGraphKey", "UseRunHandlerPool" }, null, null, null)}), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.RunMetadata), global::Tensorflow.RunMetadata.Parser, new[]{ "StepStats", "CostGraph", "PartitionGraphs", "FunctionGraphs" }, null, null, new pbr::GeneratedClrTypeInfo[] { new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.RunMetadata.Types.FunctionGraphs), global::Tensorflow.RunMetadata.Types.FunctionGraphs.Parser, new[]{ "PartitionGraphs", "PreOptimizationGraph", "PostOptimizationGraph" }, null, null, null)}), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.TensorConnection), global::Tensorflow.TensorConnection.Parser, new[]{ "FromTensor", "ToTensor" }, null, null, null), new pbr::GeneratedClrTypeInfo(typeof(global::Tensorflow.CallableOptions), global::Tensorflow.CallableOptions.Parser, new[]{ "Feed", "Fetch", "Target", "RunOptions", "TensorConnection", "FeedDevices", "FetchDevices", "FetchSkipSync" }, null, null, new pbr::GeneratedClrTypeInfo[] { null, null, }) })); } #endregion } #region Messages public sealed partial class GPUOptions : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new GPUOptions()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[0]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public GPUOptions() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public GPUOptions(GPUOptions other) : this() { perProcessGpuMemoryFraction_ = other.perProcessGpuMemoryFraction_; allowGrowth_ = other.allowGrowth_; allocatorType_ = other.allocatorType_; deferredDeletionBytes_ = other.deferredDeletionBytes_; visibleDeviceList_ = other.visibleDeviceList_; pollingActiveDelayUsecs_ = other.pollingActiveDelayUsecs_; pollingInactiveDelayMsecs_ = other.pollingInactiveDelayMsecs_; forceGpuCompatible_ = other.forceGpuCompatible_; experimental_ = other.experimental_ != null ? other.experimental_.Clone() : null; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public GPUOptions Clone() { return new GPUOptions(this); } /// Field number for the "per_process_gpu_memory_fraction" field. public const int PerProcessGpuMemoryFractionFieldNumber = 1; private double perProcessGpuMemoryFraction_; /// /// Fraction of the available GPU memory to allocate for each process. /// 1 means to allocate all of the GPU memory, 0.5 means the process /// allocates up to ~50% of the available GPU memory. /// /// GPU memory is pre-allocated unless the allow_growth option is enabled. /// /// If greater than 1.0, uses CUDA unified memory to potentially oversubscribe /// the amount of memory available on the GPU device by using host memory as a /// swap space. Accessing memory not available on the device will be /// significantly slower as that would require memory transfer between the host /// and the device. Options to reduce the memory requirement should be /// considered before enabling this option as this may come with a negative /// performance impact. Oversubscription using the unified memory requires /// Pascal class or newer GPUs and it is currently only supported on the Linux /// operating system. See /// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements /// for the detailed requirements. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public double PerProcessGpuMemoryFraction { get { return perProcessGpuMemoryFraction_; } set { perProcessGpuMemoryFraction_ = value; } } /// Field number for the "allow_growth" field. public const int AllowGrowthFieldNumber = 4; private bool allowGrowth_; /// /// If true, the allocator does not pre-allocate the entire specified /// GPU memory region, instead starting small and growing as needed. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool AllowGrowth { get { return allowGrowth_; } set { allowGrowth_ = value; } } /// Field number for the "allocator_type" field. public const int AllocatorTypeFieldNumber = 2; private string allocatorType_ = ""; /// /// The type of GPU allocation strategy to use. /// /// Allowed values: /// "": The empty string (default) uses a system-chosen default /// which may change over time. /// /// "BFC": A "Best-fit with coalescing" algorithm, simplified from a /// version of dlmalloc. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string AllocatorType { get { return allocatorType_; } set { allocatorType_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } /// Field number for the "deferred_deletion_bytes" field. public const int DeferredDeletionBytesFieldNumber = 3; private long deferredDeletionBytes_; /// /// Delay deletion of up to this many bytes to reduce the number of /// interactions with gpu driver code. If 0, the system chooses /// a reasonable default (several MBs). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public long DeferredDeletionBytes { get { return deferredDeletionBytes_; } set { deferredDeletionBytes_ = value; } } /// Field number for the "visible_device_list" field. public const int VisibleDeviceListFieldNumber = 5; private string visibleDeviceList_ = ""; /// /// A comma-separated list of GPU ids that determines the 'visible' /// to 'virtual' mapping of GPU devices. For example, if TensorFlow /// can see 8 GPU devices in the process, and one wanted to map /// visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", /// then one would specify this field as "5,3". This field is similar in /// spirit to the CUDA_VISIBLE_DEVICES environment variable, except /// it applies to the visible GPU devices in the process. /// /// NOTE: /// 1. The GPU driver provides the process with the visible GPUs /// in an order which is not guaranteed to have any correlation to /// the *physical* GPU id in the machine. This field is used for /// remapping "visible" to "virtual", which means this operates only /// after the process starts. Users are required to use vendor /// specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the /// physical to visible device mapping prior to invoking TensorFlow. /// 2. In the code, the ids in this list are also called "platform GPU id"s, /// and the 'virtual' ids of GPU devices (i.e. the ids in the device /// name "/device:GPU:<id>") are also called "TF GPU id"s. Please /// refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h /// for more information. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string VisibleDeviceList { get { return visibleDeviceList_; } set { visibleDeviceList_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } /// Field number for the "polling_active_delay_usecs" field. public const int PollingActiveDelayUsecsFieldNumber = 6; private int pollingActiveDelayUsecs_; /// /// In the event polling loop sleep this many microseconds between /// PollEvents calls, when the queue is not empty. If value is not /// set or set to 0, gets set to a non-zero default. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int PollingActiveDelayUsecs { get { return pollingActiveDelayUsecs_; } set { pollingActiveDelayUsecs_ = value; } } /// Field number for the "polling_inactive_delay_msecs" field. public const int PollingInactiveDelayMsecsFieldNumber = 7; private int pollingInactiveDelayMsecs_; /// /// This field is deprecated and ignored. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int PollingInactiveDelayMsecs { get { return pollingInactiveDelayMsecs_; } set { pollingInactiveDelayMsecs_ = value; } } /// Field number for the "force_gpu_compatible" field. public const int ForceGpuCompatibleFieldNumber = 8; private bool forceGpuCompatible_; /// /// Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow, /// enabling this option forces all CPU tensors to be allocated with Cuda /// pinned memory. Normally, TensorFlow will infer which tensors should be /// allocated as the pinned memory. But in case where the inference is /// incomplete, this option can significantly speed up the cross-device memory /// copy performance as long as it fits the memory. /// Note that this option is not something that should be /// enabled by default for unknown or very large models, since all Cuda pinned /// memory is unpageable, having too much pinned memory might negatively impact /// the overall host system performance. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool ForceGpuCompatible { get { return forceGpuCompatible_; } set { forceGpuCompatible_ = value; } } /// Field number for the "experimental" field. public const int ExperimentalFieldNumber = 9; private global::Tensorflow.GPUOptions.Types.Experimental experimental_; /// /// Everything inside experimental is subject to change and is not subject /// to API stability guarantees in /// https://www.tensorflow.org/guide/version_compat. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.GPUOptions.Types.Experimental Experimental { get { return experimental_; } set { experimental_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as GPUOptions); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(GPUOptions other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (!pbc::ProtobufEqualityComparers.BitwiseDoubleEqualityComparer.Equals(PerProcessGpuMemoryFraction, other.PerProcessGpuMemoryFraction)) return false; if (AllowGrowth != other.AllowGrowth) return false; if (AllocatorType != other.AllocatorType) return false; if (DeferredDeletionBytes != other.DeferredDeletionBytes) return false; if (VisibleDeviceList != other.VisibleDeviceList) return false; if (PollingActiveDelayUsecs != other.PollingActiveDelayUsecs) return false; if (PollingInactiveDelayMsecs != other.PollingInactiveDelayMsecs) return false; if (ForceGpuCompatible != other.ForceGpuCompatible) return false; if (!object.Equals(Experimental, other.Experimental)) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (PerProcessGpuMemoryFraction != 0D) hash ^= pbc::ProtobufEqualityComparers.BitwiseDoubleEqualityComparer.GetHashCode(PerProcessGpuMemoryFraction); if (AllowGrowth != false) hash ^= AllowGrowth.GetHashCode(); if (AllocatorType.Length != 0) hash ^= AllocatorType.GetHashCode(); if (DeferredDeletionBytes != 0L) hash ^= DeferredDeletionBytes.GetHashCode(); if (VisibleDeviceList.Length != 0) hash ^= VisibleDeviceList.GetHashCode(); if (PollingActiveDelayUsecs != 0) hash ^= PollingActiveDelayUsecs.GetHashCode(); if (PollingInactiveDelayMsecs != 0) hash ^= PollingInactiveDelayMsecs.GetHashCode(); if (ForceGpuCompatible != false) hash ^= ForceGpuCompatible.GetHashCode(); if (experimental_ != null) hash ^= Experimental.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (PerProcessGpuMemoryFraction != 0D) { output.WriteRawTag(9); output.WriteDouble(PerProcessGpuMemoryFraction); } if (AllocatorType.Length != 0) { output.WriteRawTag(18); output.WriteString(AllocatorType); } if (DeferredDeletionBytes != 0L) { output.WriteRawTag(24); output.WriteInt64(DeferredDeletionBytes); } if (AllowGrowth != false) { output.WriteRawTag(32); output.WriteBool(AllowGrowth); } if (VisibleDeviceList.Length != 0) { output.WriteRawTag(42); output.WriteString(VisibleDeviceList); } if (PollingActiveDelayUsecs != 0) { output.WriteRawTag(48); output.WriteInt32(PollingActiveDelayUsecs); } if (PollingInactiveDelayMsecs != 0) { output.WriteRawTag(56); output.WriteInt32(PollingInactiveDelayMsecs); } if (ForceGpuCompatible != false) { output.WriteRawTag(64); output.WriteBool(ForceGpuCompatible); } if (experimental_ != null) { output.WriteRawTag(74); output.WriteMessage(Experimental); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (PerProcessGpuMemoryFraction != 0D) { size += 1 + 8; } if (AllowGrowth != false) { size += 1 + 1; } if (AllocatorType.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(AllocatorType); } if (DeferredDeletionBytes != 0L) { size += 1 + pb::CodedOutputStream.ComputeInt64Size(DeferredDeletionBytes); } if (VisibleDeviceList.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(VisibleDeviceList); } if (PollingActiveDelayUsecs != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(PollingActiveDelayUsecs); } if (PollingInactiveDelayMsecs != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(PollingInactiveDelayMsecs); } if (ForceGpuCompatible != false) { size += 1 + 1; } if (experimental_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(Experimental); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(GPUOptions other) { if (other == null) { return; } if (other.PerProcessGpuMemoryFraction != 0D) { PerProcessGpuMemoryFraction = other.PerProcessGpuMemoryFraction; } if (other.AllowGrowth != false) { AllowGrowth = other.AllowGrowth; } if (other.AllocatorType.Length != 0) { AllocatorType = other.AllocatorType; } if (other.DeferredDeletionBytes != 0L) { DeferredDeletionBytes = other.DeferredDeletionBytes; } if (other.VisibleDeviceList.Length != 0) { VisibleDeviceList = other.VisibleDeviceList; } if (other.PollingActiveDelayUsecs != 0) { PollingActiveDelayUsecs = other.PollingActiveDelayUsecs; } if (other.PollingInactiveDelayMsecs != 0) { PollingInactiveDelayMsecs = other.PollingInactiveDelayMsecs; } if (other.ForceGpuCompatible != false) { ForceGpuCompatible = other.ForceGpuCompatible; } if (other.experimental_ != null) { if (experimental_ == null) { experimental_ = new global::Tensorflow.GPUOptions.Types.Experimental(); } Experimental.MergeFrom(other.Experimental); } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 9: { PerProcessGpuMemoryFraction = input.ReadDouble(); break; } case 18: { AllocatorType = input.ReadString(); break; } case 24: { DeferredDeletionBytes = input.ReadInt64(); break; } case 32: { AllowGrowth = input.ReadBool(); break; } case 42: { VisibleDeviceList = input.ReadString(); break; } case 48: { PollingActiveDelayUsecs = input.ReadInt32(); break; } case 56: { PollingInactiveDelayMsecs = input.ReadInt32(); break; } case 64: { ForceGpuCompatible = input.ReadBool(); break; } case 74: { if (experimental_ == null) { experimental_ = new global::Tensorflow.GPUOptions.Types.Experimental(); } input.ReadMessage(experimental_); break; } } } } #region Nested types /// Container for nested types declared in the GPUOptions message type. [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static partial class Types { public sealed partial class Experimental : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new Experimental()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.GPUOptions.Descriptor.NestedTypes[0]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental(Experimental other) : this() { virtualDevices_ = other.virtualDevices_.Clone(); useUnifiedMemory_ = other.useUnifiedMemory_; numDevToDevCopyStreams_ = other.numDevToDevCopyStreams_; collectiveRingOrder_ = other.collectiveRingOrder_; timestampedAllocator_ = other.timestampedAllocator_; kernelTrackerMaxInterval_ = other.kernelTrackerMaxInterval_; kernelTrackerMaxBytes_ = other.kernelTrackerMaxBytes_; kernelTrackerMaxPending_ = other.kernelTrackerMaxPending_; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental Clone() { return new Experimental(this); } /// Field number for the "virtual_devices" field. public const int VirtualDevicesFieldNumber = 1; private static readonly pb::FieldCodec _repeated_virtualDevices_codec = pb::FieldCodec.ForMessage(10, global::Tensorflow.GPUOptions.Types.Experimental.Types.VirtualDevices.Parser); private readonly pbc::RepeatedField virtualDevices_ = new pbc::RepeatedField(); /// /// The multi virtual device settings. If empty (not set), it will create /// single virtual device on each visible GPU, according to the settings /// in "visible_device_list" above. Otherwise, the number of elements in the /// list must be the same as the number of visible GPUs (after /// "visible_device_list" filtering if it is set), and the string represented /// device names (e.g. /device:GPU:<id>) will refer to the virtual /// devices and have the <id> field assigned sequentially starting from 0, /// according to the order they appear in this list and the "memory_limit" /// list inside each element. For example, /// visible_device_list = "1,0" /// virtual_devices { memory_limit: 1GB memory_limit: 2GB } /// virtual_devices {} /// will create three virtual devices as: /// /device:GPU:0 -> visible GPU 1 with 1GB memory /// /device:GPU:1 -> visible GPU 1 with 2GB memory /// /device:GPU:2 -> visible GPU 0 with all available memory /// /// NOTE: /// 1. It's invalid to set both this and "per_process_gpu_memory_fraction" /// at the same time. /// 2. Currently this setting is per-process, not per-session. Using /// different settings in different sessions within same process will /// result in undefined behavior. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField VirtualDevices { get { return virtualDevices_; } } /// Field number for the "use_unified_memory" field. public const int UseUnifiedMemoryFieldNumber = 2; private bool useUnifiedMemory_; /// /// If true, uses CUDA unified memory for memory allocations. If /// per_process_gpu_memory_fraction option is greater than 1.0, then unified /// memory is used regardless of the value for this field. See comments for /// per_process_gpu_memory_fraction field for more details and requirements /// of the unified memory. This option is useful to oversubscribe memory if /// multiple processes are sharing a single GPU while individually using less /// than 1.0 per process memory fraction. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool UseUnifiedMemory { get { return useUnifiedMemory_; } set { useUnifiedMemory_ = value; } } /// Field number for the "num_dev_to_dev_copy_streams" field. public const int NumDevToDevCopyStreamsFieldNumber = 3; private int numDevToDevCopyStreams_; /// /// If > 1, the number of device-to-device copy streams to create /// for each GPUDevice. Default value is 0, which is automatically /// converted to 1. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int NumDevToDevCopyStreams { get { return numDevToDevCopyStreams_; } set { numDevToDevCopyStreams_ = value; } } /// Field number for the "collective_ring_order" field. public const int CollectiveRingOrderFieldNumber = 4; private string collectiveRingOrder_ = ""; /// /// If non-empty, defines a good GPU ring order on a single worker based on /// device interconnect. This assumes that all workers have the same GPU /// topology. Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4". /// This ring order is used by the RingReducer implementation of /// CollectiveReduce, and serves as an override to automatic ring order /// generation in OrderTaskDeviceMap() during CollectiveParam resolution. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string CollectiveRingOrder { get { return collectiveRingOrder_; } set { collectiveRingOrder_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } /// Field number for the "timestamped_allocator" field. public const int TimestampedAllocatorFieldNumber = 5; private bool timestampedAllocator_; /// /// If true then extra work is done by GPUDevice and GPUBFCAllocator to /// keep track of when GPU memory is freed and when kernels actually /// complete so that we can know when a nominally free memory chunk /// is really not subject to pending use. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool TimestampedAllocator { get { return timestampedAllocator_; } set { timestampedAllocator_ = value; } } /// Field number for the "kernel_tracker_max_interval" field. public const int KernelTrackerMaxIntervalFieldNumber = 7; private int kernelTrackerMaxInterval_; /// /// Parameters for GPUKernelTracker. By default no kernel tracking is done. /// Note that timestamped_allocator is only effective if some tracking is /// specified. /// /// If kernel_tracker_max_interval = n > 0, then a tracking event /// is inserted after every n kernels without an event. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int KernelTrackerMaxInterval { get { return kernelTrackerMaxInterval_; } set { kernelTrackerMaxInterval_ = value; } } /// Field number for the "kernel_tracker_max_bytes" field. public const int KernelTrackerMaxBytesFieldNumber = 8; private int kernelTrackerMaxBytes_; /// /// If kernel_tracker_max_bytes = n > 0, then a tracking event is /// inserted after every series of kernels allocating a sum of /// memory >= n. If one kernel allocates b * n bytes, then one /// event will be inserted after it, but it will count as b against /// the pending limit. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int KernelTrackerMaxBytes { get { return kernelTrackerMaxBytes_; } set { kernelTrackerMaxBytes_ = value; } } /// Field number for the "kernel_tracker_max_pending" field. public const int KernelTrackerMaxPendingFieldNumber = 9; private int kernelTrackerMaxPending_; /// /// If kernel_tracker_max_pending > 0 then no more than this many /// tracking events can be outstanding at a time. An attempt to /// launch an additional kernel will stall until an event /// completes. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int KernelTrackerMaxPending { get { return kernelTrackerMaxPending_; } set { kernelTrackerMaxPending_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as Experimental); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(Experimental other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if(!virtualDevices_.Equals(other.virtualDevices_)) return false; if (UseUnifiedMemory != other.UseUnifiedMemory) return false; if (NumDevToDevCopyStreams != other.NumDevToDevCopyStreams) return false; if (CollectiveRingOrder != other.CollectiveRingOrder) return false; if (TimestampedAllocator != other.TimestampedAllocator) return false; if (KernelTrackerMaxInterval != other.KernelTrackerMaxInterval) return false; if (KernelTrackerMaxBytes != other.KernelTrackerMaxBytes) return false; if (KernelTrackerMaxPending != other.KernelTrackerMaxPending) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; hash ^= virtualDevices_.GetHashCode(); if (UseUnifiedMemory != false) hash ^= UseUnifiedMemory.GetHashCode(); if (NumDevToDevCopyStreams != 0) hash ^= NumDevToDevCopyStreams.GetHashCode(); if (CollectiveRingOrder.Length != 0) hash ^= CollectiveRingOrder.GetHashCode(); if (TimestampedAllocator != false) hash ^= TimestampedAllocator.GetHashCode(); if (KernelTrackerMaxInterval != 0) hash ^= KernelTrackerMaxInterval.GetHashCode(); if (KernelTrackerMaxBytes != 0) hash ^= KernelTrackerMaxBytes.GetHashCode(); if (KernelTrackerMaxPending != 0) hash ^= KernelTrackerMaxPending.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { virtualDevices_.WriteTo(output, _repeated_virtualDevices_codec); if (UseUnifiedMemory != false) { output.WriteRawTag(16); output.WriteBool(UseUnifiedMemory); } if (NumDevToDevCopyStreams != 0) { output.WriteRawTag(24); output.WriteInt32(NumDevToDevCopyStreams); } if (CollectiveRingOrder.Length != 0) { output.WriteRawTag(34); output.WriteString(CollectiveRingOrder); } if (TimestampedAllocator != false) { output.WriteRawTag(40); output.WriteBool(TimestampedAllocator); } if (KernelTrackerMaxInterval != 0) { output.WriteRawTag(56); output.WriteInt32(KernelTrackerMaxInterval); } if (KernelTrackerMaxBytes != 0) { output.WriteRawTag(64); output.WriteInt32(KernelTrackerMaxBytes); } if (KernelTrackerMaxPending != 0) { output.WriteRawTag(72); output.WriteInt32(KernelTrackerMaxPending); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; size += virtualDevices_.CalculateSize(_repeated_virtualDevices_codec); if (UseUnifiedMemory != false) { size += 1 + 1; } if (NumDevToDevCopyStreams != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(NumDevToDevCopyStreams); } if (CollectiveRingOrder.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(CollectiveRingOrder); } if (TimestampedAllocator != false) { size += 1 + 1; } if (KernelTrackerMaxInterval != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(KernelTrackerMaxInterval); } if (KernelTrackerMaxBytes != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(KernelTrackerMaxBytes); } if (KernelTrackerMaxPending != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(KernelTrackerMaxPending); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(Experimental other) { if (other == null) { return; } virtualDevices_.Add(other.virtualDevices_); if (other.UseUnifiedMemory != false) { UseUnifiedMemory = other.UseUnifiedMemory; } if (other.NumDevToDevCopyStreams != 0) { NumDevToDevCopyStreams = other.NumDevToDevCopyStreams; } if (other.CollectiveRingOrder.Length != 0) { CollectiveRingOrder = other.CollectiveRingOrder; } if (other.TimestampedAllocator != false) { TimestampedAllocator = other.TimestampedAllocator; } if (other.KernelTrackerMaxInterval != 0) { KernelTrackerMaxInterval = other.KernelTrackerMaxInterval; } if (other.KernelTrackerMaxBytes != 0) { KernelTrackerMaxBytes = other.KernelTrackerMaxBytes; } if (other.KernelTrackerMaxPending != 0) { KernelTrackerMaxPending = other.KernelTrackerMaxPending; } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 10: { virtualDevices_.AddEntriesFrom(input, _repeated_virtualDevices_codec); break; } case 16: { UseUnifiedMemory = input.ReadBool(); break; } case 24: { NumDevToDevCopyStreams = input.ReadInt32(); break; } case 34: { CollectiveRingOrder = input.ReadString(); break; } case 40: { TimestampedAllocator = input.ReadBool(); break; } case 56: { KernelTrackerMaxInterval = input.ReadInt32(); break; } case 64: { KernelTrackerMaxBytes = input.ReadInt32(); break; } case 72: { KernelTrackerMaxPending = input.ReadInt32(); break; } } } } #region Nested types /// Container for nested types declared in the Experimental message type. [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static partial class Types { /// /// Configuration for breaking down a visible GPU into multiple "virtual" /// devices. /// public sealed partial class VirtualDevices : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new VirtualDevices()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.GPUOptions.Types.Experimental.Descriptor.NestedTypes[0]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public VirtualDevices() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public VirtualDevices(VirtualDevices other) : this() { memoryLimitMb_ = other.memoryLimitMb_.Clone(); _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public VirtualDevices Clone() { return new VirtualDevices(this); } /// Field number for the "memory_limit_mb" field. public const int MemoryLimitMbFieldNumber = 1; private static readonly pb::FieldCodec _repeated_memoryLimitMb_codec = pb::FieldCodec.ForFloat(10); private readonly pbc::RepeatedField memoryLimitMb_ = new pbc::RepeatedField(); /// /// Per "virtual" device memory limit, in MB. The number of elements in /// the list is the number of virtual devices to create on the /// corresponding visible GPU (see "virtual_devices" below). /// If empty, it will create single virtual device taking all available /// memory from the device. /// /// For the concept of "visible" and "virtual" GPU, see the comments for /// "visible_device_list" above for more information. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField MemoryLimitMb { get { return memoryLimitMb_; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as VirtualDevices); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(VirtualDevices other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if(!memoryLimitMb_.Equals(other.memoryLimitMb_)) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; hash ^= memoryLimitMb_.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { memoryLimitMb_.WriteTo(output, _repeated_memoryLimitMb_codec); if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; size += memoryLimitMb_.CalculateSize(_repeated_memoryLimitMb_codec); if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(VirtualDevices other) { if (other == null) { return; } memoryLimitMb_.Add(other.memoryLimitMb_); _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 10: case 13: { memoryLimitMb_.AddEntriesFrom(input, _repeated_memoryLimitMb_codec); break; } } } } } } #endregion } } #endregion } /// /// Options passed to the graph optimizer /// public sealed partial class OptimizerOptions : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new OptimizerOptions()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[1]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public OptimizerOptions() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public OptimizerOptions(OptimizerOptions other) : this() { doCommonSubexpressionElimination_ = other.doCommonSubexpressionElimination_; doConstantFolding_ = other.doConstantFolding_; maxFoldedConstantInBytes_ = other.maxFoldedConstantInBytes_; doFunctionInlining_ = other.doFunctionInlining_; optLevel_ = other.optLevel_; globalJitLevel_ = other.globalJitLevel_; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public OptimizerOptions Clone() { return new OptimizerOptions(this); } /// Field number for the "do_common_subexpression_elimination" field. public const int DoCommonSubexpressionEliminationFieldNumber = 1; private bool doCommonSubexpressionElimination_; /// /// If true, optimize the graph using common subexpression elimination. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool DoCommonSubexpressionElimination { get { return doCommonSubexpressionElimination_; } set { doCommonSubexpressionElimination_ = value; } } /// Field number for the "do_constant_folding" field. public const int DoConstantFoldingFieldNumber = 2; private bool doConstantFolding_; /// /// If true, perform constant folding optimization on the graph. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool DoConstantFolding { get { return doConstantFolding_; } set { doConstantFolding_ = value; } } /// Field number for the "max_folded_constant_in_bytes" field. public const int MaxFoldedConstantInBytesFieldNumber = 6; private long maxFoldedConstantInBytes_; /// /// Constant folding optimization replaces tensors whose values can be /// predetermined, with constant nodes. To avoid inserting too large constants, /// the size of each constant created can be limited. If this value is zero, a /// default limit of 10 MiB will be applied. If constant folding optimization /// is disabled, this value is ignored. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public long MaxFoldedConstantInBytes { get { return maxFoldedConstantInBytes_; } set { maxFoldedConstantInBytes_ = value; } } /// Field number for the "do_function_inlining" field. public const int DoFunctionInliningFieldNumber = 4; private bool doFunctionInlining_; /// /// If true, perform function inlining on the graph. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool DoFunctionInlining { get { return doFunctionInlining_; } set { doFunctionInlining_ = value; } } /// Field number for the "opt_level" field. public const int OptLevelFieldNumber = 3; private global::Tensorflow.OptimizerOptions.Types.Level optLevel_ = 0; /// /// Overall optimization level. The actual optimizations applied will be the /// logical OR of the flags that this level implies and any flags already set. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.OptimizerOptions.Types.Level OptLevel { get { return optLevel_; } set { optLevel_ = value; } } /// Field number for the "global_jit_level" field. public const int GlobalJitLevelFieldNumber = 5; private global::Tensorflow.OptimizerOptions.Types.GlobalJitLevel globalJitLevel_ = 0; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.OptimizerOptions.Types.GlobalJitLevel GlobalJitLevel { get { return globalJitLevel_; } set { globalJitLevel_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as OptimizerOptions); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(OptimizerOptions other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (DoCommonSubexpressionElimination != other.DoCommonSubexpressionElimination) return false; if (DoConstantFolding != other.DoConstantFolding) return false; if (MaxFoldedConstantInBytes != other.MaxFoldedConstantInBytes) return false; if (DoFunctionInlining != other.DoFunctionInlining) return false; if (OptLevel != other.OptLevel) return false; if (GlobalJitLevel != other.GlobalJitLevel) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (DoCommonSubexpressionElimination != false) hash ^= DoCommonSubexpressionElimination.GetHashCode(); if (DoConstantFolding != false) hash ^= DoConstantFolding.GetHashCode(); if (MaxFoldedConstantInBytes != 0L) hash ^= MaxFoldedConstantInBytes.GetHashCode(); if (DoFunctionInlining != false) hash ^= DoFunctionInlining.GetHashCode(); if (OptLevel != 0) hash ^= OptLevel.GetHashCode(); if (GlobalJitLevel != 0) hash ^= GlobalJitLevel.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (DoCommonSubexpressionElimination != false) { output.WriteRawTag(8); output.WriteBool(DoCommonSubexpressionElimination); } if (DoConstantFolding != false) { output.WriteRawTag(16); output.WriteBool(DoConstantFolding); } if (OptLevel != 0) { output.WriteRawTag(24); output.WriteEnum((int) OptLevel); } if (DoFunctionInlining != false) { output.WriteRawTag(32); output.WriteBool(DoFunctionInlining); } if (GlobalJitLevel != 0) { output.WriteRawTag(40); output.WriteEnum((int) GlobalJitLevel); } if (MaxFoldedConstantInBytes != 0L) { output.WriteRawTag(48); output.WriteInt64(MaxFoldedConstantInBytes); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (DoCommonSubexpressionElimination != false) { size += 1 + 1; } if (DoConstantFolding != false) { size += 1 + 1; } if (MaxFoldedConstantInBytes != 0L) { size += 1 + pb::CodedOutputStream.ComputeInt64Size(MaxFoldedConstantInBytes); } if (DoFunctionInlining != false) { size += 1 + 1; } if (OptLevel != 0) { size += 1 + pb::CodedOutputStream.ComputeEnumSize((int) OptLevel); } if (GlobalJitLevel != 0) { size += 1 + pb::CodedOutputStream.ComputeEnumSize((int) GlobalJitLevel); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(OptimizerOptions other) { if (other == null) { return; } if (other.DoCommonSubexpressionElimination != false) { DoCommonSubexpressionElimination = other.DoCommonSubexpressionElimination; } if (other.DoConstantFolding != false) { DoConstantFolding = other.DoConstantFolding; } if (other.MaxFoldedConstantInBytes != 0L) { MaxFoldedConstantInBytes = other.MaxFoldedConstantInBytes; } if (other.DoFunctionInlining != false) { DoFunctionInlining = other.DoFunctionInlining; } if (other.OptLevel != 0) { OptLevel = other.OptLevel; } if (other.GlobalJitLevel != 0) { GlobalJitLevel = other.GlobalJitLevel; } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 8: { DoCommonSubexpressionElimination = input.ReadBool(); break; } case 16: { DoConstantFolding = input.ReadBool(); break; } case 24: { optLevel_ = (global::Tensorflow.OptimizerOptions.Types.Level) input.ReadEnum(); break; } case 32: { DoFunctionInlining = input.ReadBool(); break; } case 40: { globalJitLevel_ = (global::Tensorflow.OptimizerOptions.Types.GlobalJitLevel) input.ReadEnum(); break; } case 48: { MaxFoldedConstantInBytes = input.ReadInt64(); break; } } } } #region Nested types /// Container for nested types declared in the OptimizerOptions message type. [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static partial class Types { /// /// Optimization level /// public enum Level { /// /// L1 is the default level. /// Optimization performed at L1 : /// 1. Common subexpression elimination /// 2. Constant folding /// [pbr::OriginalName("L1")] L1 = 0, /// /// No optimizations /// [pbr::OriginalName("L0")] L0 = -1, } /// /// Control the use of the compiler/jit. Experimental. /// public enum GlobalJitLevel { /// /// Default setting ("off" now, but later expected to be "on") /// [pbr::OriginalName("DEFAULT")] Default = 0, [pbr::OriginalName("OFF")] Off = -1, /// /// The following settings turn on compilation, with higher values being /// more aggressive. Higher values may reduce opportunities for parallelism /// and may use more memory. (At present, there is no distinction, but this /// is expected to change.) /// [pbr::OriginalName("ON_1")] On1 = 1, [pbr::OriginalName("ON_2")] On2 = 2, } } #endregion } public sealed partial class GraphOptions : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new GraphOptions()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[2]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public GraphOptions() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public GraphOptions(GraphOptions other) : this() { enableRecvScheduling_ = other.enableRecvScheduling_; optimizerOptions_ = other.optimizerOptions_ != null ? other.optimizerOptions_.Clone() : null; buildCostModel_ = other.buildCostModel_; buildCostModelAfter_ = other.buildCostModelAfter_; inferShapes_ = other.inferShapes_; placePrunedGraph_ = other.placePrunedGraph_; enableBfloat16Sendrecv_ = other.enableBfloat16Sendrecv_; timelineStep_ = other.timelineStep_; rewriteOptions_ = other.rewriteOptions_ != null ? other.rewriteOptions_.Clone() : null; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public GraphOptions Clone() { return new GraphOptions(this); } /// Field number for the "enable_recv_scheduling" field. public const int EnableRecvSchedulingFieldNumber = 2; private bool enableRecvScheduling_; /// /// If true, use control flow to schedule the activation of Recv nodes. /// (Currently ignored.) /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool EnableRecvScheduling { get { return enableRecvScheduling_; } set { enableRecvScheduling_ = value; } } /// Field number for the "optimizer_options" field. public const int OptimizerOptionsFieldNumber = 3; private global::Tensorflow.OptimizerOptions optimizerOptions_; /// /// Options controlling how graph is optimized. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.OptimizerOptions OptimizerOptions { get { return optimizerOptions_; } set { optimizerOptions_ = value; } } /// Field number for the "build_cost_model" field. public const int BuildCostModelFieldNumber = 4; private long buildCostModel_; /// /// The number of steps to run before returning a cost model detailing /// the memory usage and performance of each node of the graph. 0 means /// no cost model. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public long BuildCostModel { get { return buildCostModel_; } set { buildCostModel_ = value; } } /// Field number for the "build_cost_model_after" field. public const int BuildCostModelAfterFieldNumber = 9; private long buildCostModelAfter_; /// /// The number of steps to skip before collecting statistics for the /// cost model. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public long BuildCostModelAfter { get { return buildCostModelAfter_; } set { buildCostModelAfter_ = value; } } /// Field number for the "infer_shapes" field. public const int InferShapesFieldNumber = 5; private bool inferShapes_; /// /// Annotate each Node with Op output shape data, to the extent it can /// be statically inferred. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool InferShapes { get { return inferShapes_; } set { inferShapes_ = value; } } /// Field number for the "place_pruned_graph" field. public const int PlacePrunedGraphFieldNumber = 6; private bool placePrunedGraph_; /// /// Only place the subgraphs that are run, rather than the entire graph. /// /// This is useful for interactive graph building, where one might /// produce graphs that cannot be placed during the debugging /// process. In particular, it allows the client to continue work in /// a session after adding a node to a graph whose placement /// constraints are unsatisfiable. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool PlacePrunedGraph { get { return placePrunedGraph_; } set { placePrunedGraph_ = value; } } /// Field number for the "enable_bfloat16_sendrecv" field. public const int EnableBfloat16SendrecvFieldNumber = 7; private bool enableBfloat16Sendrecv_; /// /// If true, transfer float values between processes as bfloat16. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool EnableBfloat16Sendrecv { get { return enableBfloat16Sendrecv_; } set { enableBfloat16Sendrecv_ = value; } } /// Field number for the "timeline_step" field. public const int TimelineStepFieldNumber = 8; private int timelineStep_; /// /// If > 0, record a timeline every this many steps. /// EXPERIMENTAL: This currently has no effect in MasterSession. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int TimelineStep { get { return timelineStep_; } set { timelineStep_ = value; } } /// Field number for the "rewrite_options" field. public const int RewriteOptionsFieldNumber = 10; private global::Tensorflow.RewriterConfig rewriteOptions_; /// /// Options that control the type and amount of graph rewriting. /// Not currently configurable via the public Python API (i.e. there is no API /// stability guarantee if you import RewriterConfig explicitly). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.RewriterConfig RewriteOptions { get { return rewriteOptions_; } set { rewriteOptions_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as GraphOptions); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(GraphOptions other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (EnableRecvScheduling != other.EnableRecvScheduling) return false; if (!object.Equals(OptimizerOptions, other.OptimizerOptions)) return false; if (BuildCostModel != other.BuildCostModel) return false; if (BuildCostModelAfter != other.BuildCostModelAfter) return false; if (InferShapes != other.InferShapes) return false; if (PlacePrunedGraph != other.PlacePrunedGraph) return false; if (EnableBfloat16Sendrecv != other.EnableBfloat16Sendrecv) return false; if (TimelineStep != other.TimelineStep) return false; if (!object.Equals(RewriteOptions, other.RewriteOptions)) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (EnableRecvScheduling != false) hash ^= EnableRecvScheduling.GetHashCode(); if (optimizerOptions_ != null) hash ^= OptimizerOptions.GetHashCode(); if (BuildCostModel != 0L) hash ^= BuildCostModel.GetHashCode(); if (BuildCostModelAfter != 0L) hash ^= BuildCostModelAfter.GetHashCode(); if (InferShapes != false) hash ^= InferShapes.GetHashCode(); if (PlacePrunedGraph != false) hash ^= PlacePrunedGraph.GetHashCode(); if (EnableBfloat16Sendrecv != false) hash ^= EnableBfloat16Sendrecv.GetHashCode(); if (TimelineStep != 0) hash ^= TimelineStep.GetHashCode(); if (rewriteOptions_ != null) hash ^= RewriteOptions.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (EnableRecvScheduling != false) { output.WriteRawTag(16); output.WriteBool(EnableRecvScheduling); } if (optimizerOptions_ != null) { output.WriteRawTag(26); output.WriteMessage(OptimizerOptions); } if (BuildCostModel != 0L) { output.WriteRawTag(32); output.WriteInt64(BuildCostModel); } if (InferShapes != false) { output.WriteRawTag(40); output.WriteBool(InferShapes); } if (PlacePrunedGraph != false) { output.WriteRawTag(48); output.WriteBool(PlacePrunedGraph); } if (EnableBfloat16Sendrecv != false) { output.WriteRawTag(56); output.WriteBool(EnableBfloat16Sendrecv); } if (TimelineStep != 0) { output.WriteRawTag(64); output.WriteInt32(TimelineStep); } if (BuildCostModelAfter != 0L) { output.WriteRawTag(72); output.WriteInt64(BuildCostModelAfter); } if (rewriteOptions_ != null) { output.WriteRawTag(82); output.WriteMessage(RewriteOptions); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (EnableRecvScheduling != false) { size += 1 + 1; } if (optimizerOptions_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(OptimizerOptions); } if (BuildCostModel != 0L) { size += 1 + pb::CodedOutputStream.ComputeInt64Size(BuildCostModel); } if (BuildCostModelAfter != 0L) { size += 1 + pb::CodedOutputStream.ComputeInt64Size(BuildCostModelAfter); } if (InferShapes != false) { size += 1 + 1; } if (PlacePrunedGraph != false) { size += 1 + 1; } if (EnableBfloat16Sendrecv != false) { size += 1 + 1; } if (TimelineStep != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(TimelineStep); } if (rewriteOptions_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(RewriteOptions); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(GraphOptions other) { if (other == null) { return; } if (other.EnableRecvScheduling != false) { EnableRecvScheduling = other.EnableRecvScheduling; } if (other.optimizerOptions_ != null) { if (optimizerOptions_ == null) { optimizerOptions_ = new global::Tensorflow.OptimizerOptions(); } OptimizerOptions.MergeFrom(other.OptimizerOptions); } if (other.BuildCostModel != 0L) { BuildCostModel = other.BuildCostModel; } if (other.BuildCostModelAfter != 0L) { BuildCostModelAfter = other.BuildCostModelAfter; } if (other.InferShapes != false) { InferShapes = other.InferShapes; } if (other.PlacePrunedGraph != false) { PlacePrunedGraph = other.PlacePrunedGraph; } if (other.EnableBfloat16Sendrecv != false) { EnableBfloat16Sendrecv = other.EnableBfloat16Sendrecv; } if (other.TimelineStep != 0) { TimelineStep = other.TimelineStep; } if (other.rewriteOptions_ != null) { if (rewriteOptions_ == null) { rewriteOptions_ = new global::Tensorflow.RewriterConfig(); } RewriteOptions.MergeFrom(other.RewriteOptions); } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 16: { EnableRecvScheduling = input.ReadBool(); break; } case 26: { if (optimizerOptions_ == null) { optimizerOptions_ = new global::Tensorflow.OptimizerOptions(); } input.ReadMessage(optimizerOptions_); break; } case 32: { BuildCostModel = input.ReadInt64(); break; } case 40: { InferShapes = input.ReadBool(); break; } case 48: { PlacePrunedGraph = input.ReadBool(); break; } case 56: { EnableBfloat16Sendrecv = input.ReadBool(); break; } case 64: { TimelineStep = input.ReadInt32(); break; } case 72: { BuildCostModelAfter = input.ReadInt64(); break; } case 82: { if (rewriteOptions_ == null) { rewriteOptions_ = new global::Tensorflow.RewriterConfig(); } input.ReadMessage(rewriteOptions_); break; } } } } } public sealed partial class ThreadPoolOptionProto : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new ThreadPoolOptionProto()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[3]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public ThreadPoolOptionProto() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public ThreadPoolOptionProto(ThreadPoolOptionProto other) : this() { numThreads_ = other.numThreads_; globalName_ = other.globalName_; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public ThreadPoolOptionProto Clone() { return new ThreadPoolOptionProto(this); } /// Field number for the "num_threads" field. public const int NumThreadsFieldNumber = 1; private int numThreads_; /// /// The number of threads in the pool. /// /// 0 means the system picks a value based on where this option proto is used /// (see the declaration of the specific field for more info). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int NumThreads { get { return numThreads_; } set { numThreads_ = value; } } /// Field number for the "global_name" field. public const int GlobalNameFieldNumber = 2; private string globalName_ = ""; /// /// The global name of the threadpool. /// /// If empty, then the threadpool is made and used according to the scope it's /// in - e.g., for a session threadpool, it is used by that session only. /// /// If non-empty, then: /// - a global threadpool associated with this name is looked /// up or created. This allows, for example, sharing one threadpool across /// many sessions (e.g., like the default behavior, if /// inter_op_parallelism_threads is not configured), but still partitioning /// into a large and small pool. /// - if the threadpool for this global_name already exists, then it is an /// error if the existing pool was created using a different num_threads /// value as is specified on this call. /// - threadpools created this way are never garbage collected. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string GlobalName { get { return globalName_; } set { globalName_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as ThreadPoolOptionProto); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(ThreadPoolOptionProto other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (NumThreads != other.NumThreads) return false; if (GlobalName != other.GlobalName) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (NumThreads != 0) hash ^= NumThreads.GetHashCode(); if (GlobalName.Length != 0) hash ^= GlobalName.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (NumThreads != 0) { output.WriteRawTag(8); output.WriteInt32(NumThreads); } if (GlobalName.Length != 0) { output.WriteRawTag(18); output.WriteString(GlobalName); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (NumThreads != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(NumThreads); } if (GlobalName.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(GlobalName); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(ThreadPoolOptionProto other) { if (other == null) { return; } if (other.NumThreads != 0) { NumThreads = other.NumThreads; } if (other.GlobalName.Length != 0) { GlobalName = other.GlobalName; } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 8: { NumThreads = input.ReadInt32(); break; } case 18: { GlobalName = input.ReadString(); break; } } } } } public sealed partial class RPCOptions : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new RPCOptions()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[4]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RPCOptions() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RPCOptions(RPCOptions other) : this() { useRpcForInprocessMaster_ = other.useRpcForInprocessMaster_; compressionAlgorithm_ = other.compressionAlgorithm_; compressionLevel_ = other.compressionLevel_; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RPCOptions Clone() { return new RPCOptions(this); } /// Field number for the "use_rpc_for_inprocess_master" field. public const int UseRpcForInprocessMasterFieldNumber = 1; private bool useRpcForInprocessMaster_; /// /// If true, always use RPC to contact the session target. /// /// If false (the default option), TensorFlow may use an optimized /// transport for client-master communication that avoids the RPC /// stack. This option is primarily for used testing the RPC stack. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool UseRpcForInprocessMaster { get { return useRpcForInprocessMaster_; } set { useRpcForInprocessMaster_ = value; } } /// Field number for the "compression_algorithm" field. public const int CompressionAlgorithmFieldNumber = 2; private string compressionAlgorithm_ = ""; /// /// The compression algorithm to be used. One of "deflate", "gzip". /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string CompressionAlgorithm { get { return compressionAlgorithm_; } set { compressionAlgorithm_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } /// Field number for the "compression_level" field. public const int CompressionLevelFieldNumber = 3; private int compressionLevel_; /// /// If compression_algorithm is set, the compression level to be used. /// From 0 (no compression), up to 3. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CompressionLevel { get { return compressionLevel_; } set { compressionLevel_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as RPCOptions); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(RPCOptions other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (UseRpcForInprocessMaster != other.UseRpcForInprocessMaster) return false; if (CompressionAlgorithm != other.CompressionAlgorithm) return false; if (CompressionLevel != other.CompressionLevel) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (UseRpcForInprocessMaster != false) hash ^= UseRpcForInprocessMaster.GetHashCode(); if (CompressionAlgorithm.Length != 0) hash ^= CompressionAlgorithm.GetHashCode(); if (CompressionLevel != 0) hash ^= CompressionLevel.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (UseRpcForInprocessMaster != false) { output.WriteRawTag(8); output.WriteBool(UseRpcForInprocessMaster); } if (CompressionAlgorithm.Length != 0) { output.WriteRawTag(18); output.WriteString(CompressionAlgorithm); } if (CompressionLevel != 0) { output.WriteRawTag(24); output.WriteInt32(CompressionLevel); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (UseRpcForInprocessMaster != false) { size += 1 + 1; } if (CompressionAlgorithm.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(CompressionAlgorithm); } if (CompressionLevel != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(CompressionLevel); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(RPCOptions other) { if (other == null) { return; } if (other.UseRpcForInprocessMaster != false) { UseRpcForInprocessMaster = other.UseRpcForInprocessMaster; } if (other.CompressionAlgorithm.Length != 0) { CompressionAlgorithm = other.CompressionAlgorithm; } if (other.CompressionLevel != 0) { CompressionLevel = other.CompressionLevel; } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 8: { UseRpcForInprocessMaster = input.ReadBool(); break; } case 18: { CompressionAlgorithm = input.ReadString(); break; } case 24: { CompressionLevel = input.ReadInt32(); break; } } } } } /// /// Session configuration parameters. /// The system picks appropriate values for fields that are not set. /// public sealed partial class ConfigProto : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new ConfigProto()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[5]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public ConfigProto() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public ConfigProto(ConfigProto other) : this() { deviceCount_ = other.deviceCount_.Clone(); intraOpParallelismThreads_ = other.intraOpParallelismThreads_; interOpParallelismThreads_ = other.interOpParallelismThreads_; usePerSessionThreads_ = other.usePerSessionThreads_; sessionInterOpThreadPool_ = other.sessionInterOpThreadPool_.Clone(); placementPeriod_ = other.placementPeriod_; deviceFilters_ = other.deviceFilters_.Clone(); gpuOptions_ = other.gpuOptions_ != null ? other.gpuOptions_.Clone() : null; allowSoftPlacement_ = other.allowSoftPlacement_; logDevicePlacement_ = other.logDevicePlacement_; graphOptions_ = other.graphOptions_ != null ? other.graphOptions_.Clone() : null; operationTimeoutInMs_ = other.operationTimeoutInMs_; rpcOptions_ = other.rpcOptions_ != null ? other.rpcOptions_.Clone() : null; clusterDef_ = other.clusterDef_ != null ? other.clusterDef_.Clone() : null; isolateSessionState_ = other.isolateSessionState_; experimental_ = other.experimental_ != null ? other.experimental_.Clone() : null; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public ConfigProto Clone() { return new ConfigProto(this); } /// Field number for the "device_count" field. public const int DeviceCountFieldNumber = 1; private static readonly pbc::MapField.Codec _map_deviceCount_codec = new pbc::MapField.Codec(pb::FieldCodec.ForString(10), pb::FieldCodec.ForInt32(16), 10); private readonly pbc::MapField deviceCount_ = new pbc::MapField(); /// /// Map from device type name (e.g., "CPU" or "GPU" ) to maximum /// number of devices of that type to use. If a particular device /// type is not found in the map, the system picks an appropriate /// number. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::MapField DeviceCount { get { return deviceCount_; } } /// Field number for the "intra_op_parallelism_threads" field. public const int IntraOpParallelismThreadsFieldNumber = 2; private int intraOpParallelismThreads_; /// /// The execution of an individual op (for some op types) can be /// parallelized on a pool of intra_op_parallelism_threads. /// 0 means the system picks an appropriate number. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int IntraOpParallelismThreads { get { return intraOpParallelismThreads_; } set { intraOpParallelismThreads_ = value; } } /// Field number for the "inter_op_parallelism_threads" field. public const int InterOpParallelismThreadsFieldNumber = 5; private int interOpParallelismThreads_; /// /// Nodes that perform blocking operations are enqueued on a pool of /// inter_op_parallelism_threads available in each process. /// /// 0 means the system picks an appropriate number. /// Negative means all operations are performed in caller's thread. /// /// Note that the first Session created in the process sets the /// number of threads for all future sessions unless use_per_session_threads is /// true or session_inter_op_thread_pool is configured. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int InterOpParallelismThreads { get { return interOpParallelismThreads_; } set { interOpParallelismThreads_ = value; } } /// Field number for the "use_per_session_threads" field. public const int UsePerSessionThreadsFieldNumber = 9; private bool usePerSessionThreads_; /// /// If true, use a new set of threads for this session rather than the global /// pool of threads. Only supported by direct sessions. /// /// If false, use the global threads created by the first session, or the /// per-session thread pools configured by session_inter_op_thread_pool. /// /// This option is deprecated. The same effect can be achieved by setting /// session_inter_op_thread_pool to have one element, whose num_threads equals /// inter_op_parallelism_threads. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool UsePerSessionThreads { get { return usePerSessionThreads_; } set { usePerSessionThreads_ = value; } } /// Field number for the "session_inter_op_thread_pool" field. public const int SessionInterOpThreadPoolFieldNumber = 12; private static readonly pb::FieldCodec _repeated_sessionInterOpThreadPool_codec = pb::FieldCodec.ForMessage(98, global::Tensorflow.ThreadPoolOptionProto.Parser); private readonly pbc::RepeatedField sessionInterOpThreadPool_ = new pbc::RepeatedField(); /// /// This option is experimental - it may be replaced with a different mechanism /// in the future. /// /// Configures session thread pools. If this is configured, then RunOptions for /// a Run call can select the thread pool to use. /// /// The intended use is for when some session invocations need to run in a /// background pool limited to a small number of threads: /// - For example, a session may be configured to have one large pool (for /// regular compute) and one small pool (for periodic, low priority work); /// using the small pool is currently the mechanism for limiting the inter-op /// parallelism of the low priority work. Note that it does not limit the /// parallelism of work spawned by a single op kernel implementation. /// - Using this setting is normally not needed in training, but may help some /// serving use cases. /// - It is also generally recommended to set the global_name field of this /// proto, to avoid creating multiple large pools. It is typically better to /// run the non-low-priority work, even across sessions, in a single large /// pool. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField SessionInterOpThreadPool { get { return sessionInterOpThreadPool_; } } /// Field number for the "placement_period" field. public const int PlacementPeriodFieldNumber = 3; private int placementPeriod_; /// /// Assignment of Nodes to Devices is recomputed every placement_period /// steps until the system warms up (at which point the recomputation /// typically slows down automatically). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int PlacementPeriod { get { return placementPeriod_; } set { placementPeriod_ = value; } } /// Field number for the "device_filters" field. public const int DeviceFiltersFieldNumber = 4; private static readonly pb::FieldCodec _repeated_deviceFilters_codec = pb::FieldCodec.ForString(34); private readonly pbc::RepeatedField deviceFilters_ = new pbc::RepeatedField(); /// /// When any filters are present sessions will ignore all devices which do not /// match the filters. Each filter can be partially specified, e.g. "/job:ps" /// "/job:worker/replica:3", etc. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField DeviceFilters { get { return deviceFilters_; } } /// Field number for the "gpu_options" field. public const int GpuOptionsFieldNumber = 6; private global::Tensorflow.GPUOptions gpuOptions_; /// /// Options that apply to all GPUs. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.GPUOptions GpuOptions { get { return gpuOptions_; } set { gpuOptions_ = value; } } /// Field number for the "allow_soft_placement" field. public const int AllowSoftPlacementFieldNumber = 7; private bool allowSoftPlacement_; /// /// Whether soft placement is allowed. If allow_soft_placement is true, /// an op will be placed on CPU if /// 1. there's no GPU implementation for the OP /// or /// 2. no GPU devices are known or registered /// or /// 3. need to co-locate with reftype input(s) which are from CPU. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool AllowSoftPlacement { get { return allowSoftPlacement_; } set { allowSoftPlacement_ = value; } } /// Field number for the "log_device_placement" field. public const int LogDevicePlacementFieldNumber = 8; private bool logDevicePlacement_; /// /// Whether device placements should be logged. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool LogDevicePlacement { get { return logDevicePlacement_; } set { logDevicePlacement_ = value; } } /// Field number for the "graph_options" field. public const int GraphOptionsFieldNumber = 10; private global::Tensorflow.GraphOptions graphOptions_; /// /// Options that apply to all graphs. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.GraphOptions GraphOptions { get { return graphOptions_; } set { graphOptions_ = value; } } /// Field number for the "operation_timeout_in_ms" field. public const int OperationTimeoutInMsFieldNumber = 11; private long operationTimeoutInMs_; /// /// Global timeout for all blocking operations in this session. If non-zero, /// and not overridden on a per-operation basis, this value will be used as the /// deadline for all blocking operations. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public long OperationTimeoutInMs { get { return operationTimeoutInMs_; } set { operationTimeoutInMs_ = value; } } /// Field number for the "rpc_options" field. public const int RpcOptionsFieldNumber = 13; private global::Tensorflow.RPCOptions rpcOptions_; /// /// Options that apply when this session uses the distributed runtime. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.RPCOptions RpcOptions { get { return rpcOptions_; } set { rpcOptions_ = value; } } /// Field number for the "cluster_def" field. public const int ClusterDefFieldNumber = 14; private global::Tensorflow.ClusterDef clusterDef_; /// /// Optional list of all workers to use in this session. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.ClusterDef ClusterDef { get { return clusterDef_; } set { clusterDef_ = value; } } /// Field number for the "isolate_session_state" field. public const int IsolateSessionStateFieldNumber = 15; private bool isolateSessionState_; /// /// If true, any resources such as Variables used in the session will not be /// shared with other sessions. However, when clusterspec propagation is /// enabled, this field is ignored and sessions are always isolated. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool IsolateSessionState { get { return isolateSessionState_; } set { isolateSessionState_ = value; } } /// Field number for the "experimental" field. public const int ExperimentalFieldNumber = 16; private global::Tensorflow.ConfigProto.Types.Experimental experimental_; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.ConfigProto.Types.Experimental Experimental { get { return experimental_; } set { experimental_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as ConfigProto); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(ConfigProto other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (!DeviceCount.Equals(other.DeviceCount)) return false; if (IntraOpParallelismThreads != other.IntraOpParallelismThreads) return false; if (InterOpParallelismThreads != other.InterOpParallelismThreads) return false; if (UsePerSessionThreads != other.UsePerSessionThreads) return false; if(!sessionInterOpThreadPool_.Equals(other.sessionInterOpThreadPool_)) return false; if (PlacementPeriod != other.PlacementPeriod) return false; if(!deviceFilters_.Equals(other.deviceFilters_)) return false; if (!object.Equals(GpuOptions, other.GpuOptions)) return false; if (AllowSoftPlacement != other.AllowSoftPlacement) return false; if (LogDevicePlacement != other.LogDevicePlacement) return false; if (!object.Equals(GraphOptions, other.GraphOptions)) return false; if (OperationTimeoutInMs != other.OperationTimeoutInMs) return false; if (!object.Equals(RpcOptions, other.RpcOptions)) return false; if (!object.Equals(ClusterDef, other.ClusterDef)) return false; if (IsolateSessionState != other.IsolateSessionState) return false; if (!object.Equals(Experimental, other.Experimental)) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; hash ^= DeviceCount.GetHashCode(); if (IntraOpParallelismThreads != 0) hash ^= IntraOpParallelismThreads.GetHashCode(); if (InterOpParallelismThreads != 0) hash ^= InterOpParallelismThreads.GetHashCode(); if (UsePerSessionThreads != false) hash ^= UsePerSessionThreads.GetHashCode(); hash ^= sessionInterOpThreadPool_.GetHashCode(); if (PlacementPeriod != 0) hash ^= PlacementPeriod.GetHashCode(); hash ^= deviceFilters_.GetHashCode(); if (gpuOptions_ != null) hash ^= GpuOptions.GetHashCode(); if (AllowSoftPlacement != false) hash ^= AllowSoftPlacement.GetHashCode(); if (LogDevicePlacement != false) hash ^= LogDevicePlacement.GetHashCode(); if (graphOptions_ != null) hash ^= GraphOptions.GetHashCode(); if (OperationTimeoutInMs != 0L) hash ^= OperationTimeoutInMs.GetHashCode(); if (rpcOptions_ != null) hash ^= RpcOptions.GetHashCode(); if (clusterDef_ != null) hash ^= ClusterDef.GetHashCode(); if (IsolateSessionState != false) hash ^= IsolateSessionState.GetHashCode(); if (experimental_ != null) hash ^= Experimental.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { deviceCount_.WriteTo(output, _map_deviceCount_codec); if (IntraOpParallelismThreads != 0) { output.WriteRawTag(16); output.WriteInt32(IntraOpParallelismThreads); } if (PlacementPeriod != 0) { output.WriteRawTag(24); output.WriteInt32(PlacementPeriod); } deviceFilters_.WriteTo(output, _repeated_deviceFilters_codec); if (InterOpParallelismThreads != 0) { output.WriteRawTag(40); output.WriteInt32(InterOpParallelismThreads); } if (gpuOptions_ != null) { output.WriteRawTag(50); output.WriteMessage(GpuOptions); } if (AllowSoftPlacement != false) { output.WriteRawTag(56); output.WriteBool(AllowSoftPlacement); } if (LogDevicePlacement != false) { output.WriteRawTag(64); output.WriteBool(LogDevicePlacement); } if (UsePerSessionThreads != false) { output.WriteRawTag(72); output.WriteBool(UsePerSessionThreads); } if (graphOptions_ != null) { output.WriteRawTag(82); output.WriteMessage(GraphOptions); } if (OperationTimeoutInMs != 0L) { output.WriteRawTag(88); output.WriteInt64(OperationTimeoutInMs); } sessionInterOpThreadPool_.WriteTo(output, _repeated_sessionInterOpThreadPool_codec); if (rpcOptions_ != null) { output.WriteRawTag(106); output.WriteMessage(RpcOptions); } if (clusterDef_ != null) { output.WriteRawTag(114); output.WriteMessage(ClusterDef); } if (IsolateSessionState != false) { output.WriteRawTag(120); output.WriteBool(IsolateSessionState); } if (experimental_ != null) { output.WriteRawTag(130, 1); output.WriteMessage(Experimental); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; size += deviceCount_.CalculateSize(_map_deviceCount_codec); if (IntraOpParallelismThreads != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(IntraOpParallelismThreads); } if (InterOpParallelismThreads != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(InterOpParallelismThreads); } if (UsePerSessionThreads != false) { size += 1 + 1; } size += sessionInterOpThreadPool_.CalculateSize(_repeated_sessionInterOpThreadPool_codec); if (PlacementPeriod != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(PlacementPeriod); } size += deviceFilters_.CalculateSize(_repeated_deviceFilters_codec); if (gpuOptions_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(GpuOptions); } if (AllowSoftPlacement != false) { size += 1 + 1; } if (LogDevicePlacement != false) { size += 1 + 1; } if (graphOptions_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(GraphOptions); } if (OperationTimeoutInMs != 0L) { size += 1 + pb::CodedOutputStream.ComputeInt64Size(OperationTimeoutInMs); } if (rpcOptions_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(RpcOptions); } if (clusterDef_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(ClusterDef); } if (IsolateSessionState != false) { size += 1 + 1; } if (experimental_ != null) { size += 2 + pb::CodedOutputStream.ComputeMessageSize(Experimental); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(ConfigProto other) { if (other == null) { return; } deviceCount_.Add(other.deviceCount_); if (other.IntraOpParallelismThreads != 0) { IntraOpParallelismThreads = other.IntraOpParallelismThreads; } if (other.InterOpParallelismThreads != 0) { InterOpParallelismThreads = other.InterOpParallelismThreads; } if (other.UsePerSessionThreads != false) { UsePerSessionThreads = other.UsePerSessionThreads; } sessionInterOpThreadPool_.Add(other.sessionInterOpThreadPool_); if (other.PlacementPeriod != 0) { PlacementPeriod = other.PlacementPeriod; } deviceFilters_.Add(other.deviceFilters_); if (other.gpuOptions_ != null) { if (gpuOptions_ == null) { gpuOptions_ = new global::Tensorflow.GPUOptions(); } GpuOptions.MergeFrom(other.GpuOptions); } if (other.AllowSoftPlacement != false) { AllowSoftPlacement = other.AllowSoftPlacement; } if (other.LogDevicePlacement != false) { LogDevicePlacement = other.LogDevicePlacement; } if (other.graphOptions_ != null) { if (graphOptions_ == null) { graphOptions_ = new global::Tensorflow.GraphOptions(); } GraphOptions.MergeFrom(other.GraphOptions); } if (other.OperationTimeoutInMs != 0L) { OperationTimeoutInMs = other.OperationTimeoutInMs; } if (other.rpcOptions_ != null) { if (rpcOptions_ == null) { rpcOptions_ = new global::Tensorflow.RPCOptions(); } RpcOptions.MergeFrom(other.RpcOptions); } if (other.clusterDef_ != null) { if (clusterDef_ == null) { clusterDef_ = new global::Tensorflow.ClusterDef(); } ClusterDef.MergeFrom(other.ClusterDef); } if (other.IsolateSessionState != false) { IsolateSessionState = other.IsolateSessionState; } if (other.experimental_ != null) { if (experimental_ == null) { experimental_ = new global::Tensorflow.ConfigProto.Types.Experimental(); } Experimental.MergeFrom(other.Experimental); } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 10: { deviceCount_.AddEntriesFrom(input, _map_deviceCount_codec); break; } case 16: { IntraOpParallelismThreads = input.ReadInt32(); break; } case 24: { PlacementPeriod = input.ReadInt32(); break; } case 34: { deviceFilters_.AddEntriesFrom(input, _repeated_deviceFilters_codec); break; } case 40: { InterOpParallelismThreads = input.ReadInt32(); break; } case 50: { if (gpuOptions_ == null) { gpuOptions_ = new global::Tensorflow.GPUOptions(); } input.ReadMessage(gpuOptions_); break; } case 56: { AllowSoftPlacement = input.ReadBool(); break; } case 64: { LogDevicePlacement = input.ReadBool(); break; } case 72: { UsePerSessionThreads = input.ReadBool(); break; } case 82: { if (graphOptions_ == null) { graphOptions_ = new global::Tensorflow.GraphOptions(); } input.ReadMessage(graphOptions_); break; } case 88: { OperationTimeoutInMs = input.ReadInt64(); break; } case 98: { sessionInterOpThreadPool_.AddEntriesFrom(input, _repeated_sessionInterOpThreadPool_codec); break; } case 106: { if (rpcOptions_ == null) { rpcOptions_ = new global::Tensorflow.RPCOptions(); } input.ReadMessage(rpcOptions_); break; } case 114: { if (clusterDef_ == null) { clusterDef_ = new global::Tensorflow.ClusterDef(); } input.ReadMessage(clusterDef_); break; } case 120: { IsolateSessionState = input.ReadBool(); break; } case 130: { if (experimental_ == null) { experimental_ = new global::Tensorflow.ConfigProto.Types.Experimental(); } input.ReadMessage(experimental_); break; } } } } #region Nested types /// Container for nested types declared in the ConfigProto message type. [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static partial class Types { /// /// Everything inside Experimental is subject to change and is not subject /// to API stability guarantees in /// https://www.tensorflow.org/guide/version_compat. /// public sealed partial class Experimental : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new Experimental()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigProto.Descriptor.NestedTypes[1]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental(Experimental other) : this() { collectiveGroupLeader_ = other.collectiveGroupLeader_; executorType_ = other.executorType_; recvBufMaxChunk_ = other.recvBufMaxChunk_; useNumaAffinity_ = other.useNumaAffinity_; collectiveDeterministicSequentialExecution_ = other.collectiveDeterministicSequentialExecution_; collectiveNccl_ = other.collectiveNccl_; shareSessionStateInClusterspecPropagation_ = other.shareSessionStateInClusterspecPropagation_; disableThreadSpinning_ = other.disableThreadSpinning_; shareClusterDevicesInSession_ = other.shareClusterDevicesInSession_; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental Clone() { return new Experimental(this); } /// Field number for the "collective_group_leader" field. public const int CollectiveGroupLeaderFieldNumber = 1; private string collectiveGroupLeader_ = ""; /// /// Task name for group resolution. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string CollectiveGroupLeader { get { return collectiveGroupLeader_; } set { collectiveGroupLeader_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } /// Field number for the "executor_type" field. public const int ExecutorTypeFieldNumber = 3; private string executorType_ = ""; /// /// Which executor to use, the default executor will be used /// if it is an empty string or "DEFAULT" /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string ExecutorType { get { return executorType_; } set { executorType_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } /// Field number for the "recv_buf_max_chunk" field. public const int RecvBufMaxChunkFieldNumber = 4; private int recvBufMaxChunk_; /// /// Guidance to formatting of large RecvBuf fields for transfer. /// Any positive value sets the max chunk size. 0 defaults to 4096. /// Any negative value indicates no max, i.e. one chunk only. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int RecvBufMaxChunk { get { return recvBufMaxChunk_; } set { recvBufMaxChunk_ = value; } } /// Field number for the "use_numa_affinity" field. public const int UseNumaAffinityFieldNumber = 5; private bool useNumaAffinity_; /// /// If true, and supported by the platform, the runtime will attempt to /// use NUMA affinity where applicable. One consequence will be the /// existence of as many CPU devices as there are available NUMA nodes. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool UseNumaAffinity { get { return useNumaAffinity_; } set { useNumaAffinity_ = value; } } /// Field number for the "collective_deterministic_sequential_execution" field. public const int CollectiveDeterministicSequentialExecutionFieldNumber = 6; private bool collectiveDeterministicSequentialExecution_; /// /// If true, make collective op execution order sequential and deterministic /// for potentially concurrent collective instances. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool CollectiveDeterministicSequentialExecution { get { return collectiveDeterministicSequentialExecution_; } set { collectiveDeterministicSequentialExecution_ = value; } } /// Field number for the "collective_nccl" field. public const int CollectiveNcclFieldNumber = 7; private bool collectiveNccl_; /// /// If true, use NCCL for CollectiveOps. This feature is highly /// experimental. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool CollectiveNccl { get { return collectiveNccl_; } set { collectiveNccl_ = value; } } /// Field number for the "share_session_state_in_clusterspec_propagation" field. public const int ShareSessionStateInClusterspecPropagationFieldNumber = 8; private bool shareSessionStateInClusterspecPropagation_; /// /// In the following, session state means the value of a variable, elements /// in a hash table, or any other resource, accessible by worker sessions /// held by a TF server. /// /// When ClusterSpec propagation is enabled, the value of /// isolate_session_state is ignored when deciding whether to share session /// states in a TF server (for backwards compatibility reasons). /// - If share_session_state_in_clusterspec_propagation is true, the session /// states are shared. /// - If share_session_state_in_clusterspec_propagation is false, session /// states are isolated. /// /// When clusterspec propagation is not used, the value of /// share_session_state_in_clusterspec_propagation is ignored when deciding /// whether to share session states in a TF server. /// - If isolate_session_state is true, session states are isolated. /// - If isolate_session_state is false, session states are shared. /// /// TODO(b/129330037): Add a single API that consistently treats /// isolate_session_state and ClusterSpec propagation. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool ShareSessionStateInClusterspecPropagation { get { return shareSessionStateInClusterspecPropagation_; } set { shareSessionStateInClusterspecPropagation_ = value; } } /// Field number for the "disable_thread_spinning" field. public const int DisableThreadSpinningFieldNumber = 9; private bool disableThreadSpinning_; /// /// If using a direct session, disable spinning while waiting for work in /// the thread pool. This may result in higher latency for completing ops, /// but in the case where there is a lot of spinning may result in lower /// CPU usage. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool DisableThreadSpinning { get { return disableThreadSpinning_; } set { disableThreadSpinning_ = value; } } /// Field number for the "share_cluster_devices_in_session" field. public const int ShareClusterDevicesInSessionFieldNumber = 10; private bool shareClusterDevicesInSession_; /// /// When true, WorkerSessions are created with device attributes from the /// full cluster. /// This is helpful when a worker wants to partition a graph /// (for example during a PartitionedCallOp). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool ShareClusterDevicesInSession { get { return shareClusterDevicesInSession_; } set { shareClusterDevicesInSession_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as Experimental); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(Experimental other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (CollectiveGroupLeader != other.CollectiveGroupLeader) return false; if (ExecutorType != other.ExecutorType) return false; if (RecvBufMaxChunk != other.RecvBufMaxChunk) return false; if (UseNumaAffinity != other.UseNumaAffinity) return false; if (CollectiveDeterministicSequentialExecution != other.CollectiveDeterministicSequentialExecution) return false; if (CollectiveNccl != other.CollectiveNccl) return false; if (ShareSessionStateInClusterspecPropagation != other.ShareSessionStateInClusterspecPropagation) return false; if (DisableThreadSpinning != other.DisableThreadSpinning) return false; if (ShareClusterDevicesInSession != other.ShareClusterDevicesInSession) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (CollectiveGroupLeader.Length != 0) hash ^= CollectiveGroupLeader.GetHashCode(); if (ExecutorType.Length != 0) hash ^= ExecutorType.GetHashCode(); if (RecvBufMaxChunk != 0) hash ^= RecvBufMaxChunk.GetHashCode(); if (UseNumaAffinity != false) hash ^= UseNumaAffinity.GetHashCode(); if (CollectiveDeterministicSequentialExecution != false) hash ^= CollectiveDeterministicSequentialExecution.GetHashCode(); if (CollectiveNccl != false) hash ^= CollectiveNccl.GetHashCode(); if (ShareSessionStateInClusterspecPropagation != false) hash ^= ShareSessionStateInClusterspecPropagation.GetHashCode(); if (DisableThreadSpinning != false) hash ^= DisableThreadSpinning.GetHashCode(); if (ShareClusterDevicesInSession != false) hash ^= ShareClusterDevicesInSession.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (CollectiveGroupLeader.Length != 0) { output.WriteRawTag(10); output.WriteString(CollectiveGroupLeader); } if (ExecutorType.Length != 0) { output.WriteRawTag(26); output.WriteString(ExecutorType); } if (RecvBufMaxChunk != 0) { output.WriteRawTag(32); output.WriteInt32(RecvBufMaxChunk); } if (UseNumaAffinity != false) { output.WriteRawTag(40); output.WriteBool(UseNumaAffinity); } if (CollectiveDeterministicSequentialExecution != false) { output.WriteRawTag(48); output.WriteBool(CollectiveDeterministicSequentialExecution); } if (CollectiveNccl != false) { output.WriteRawTag(56); output.WriteBool(CollectiveNccl); } if (ShareSessionStateInClusterspecPropagation != false) { output.WriteRawTag(64); output.WriteBool(ShareSessionStateInClusterspecPropagation); } if (DisableThreadSpinning != false) { output.WriteRawTag(72); output.WriteBool(DisableThreadSpinning); } if (ShareClusterDevicesInSession != false) { output.WriteRawTag(80); output.WriteBool(ShareClusterDevicesInSession); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (CollectiveGroupLeader.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(CollectiveGroupLeader); } if (ExecutorType.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(ExecutorType); } if (RecvBufMaxChunk != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(RecvBufMaxChunk); } if (UseNumaAffinity != false) { size += 1 + 1; } if (CollectiveDeterministicSequentialExecution != false) { size += 1 + 1; } if (CollectiveNccl != false) { size += 1 + 1; } if (ShareSessionStateInClusterspecPropagation != false) { size += 1 + 1; } if (DisableThreadSpinning != false) { size += 1 + 1; } if (ShareClusterDevicesInSession != false) { size += 1 + 1; } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(Experimental other) { if (other == null) { return; } if (other.CollectiveGroupLeader.Length != 0) { CollectiveGroupLeader = other.CollectiveGroupLeader; } if (other.ExecutorType.Length != 0) { ExecutorType = other.ExecutorType; } if (other.RecvBufMaxChunk != 0) { RecvBufMaxChunk = other.RecvBufMaxChunk; } if (other.UseNumaAffinity != false) { UseNumaAffinity = other.UseNumaAffinity; } if (other.CollectiveDeterministicSequentialExecution != false) { CollectiveDeterministicSequentialExecution = other.CollectiveDeterministicSequentialExecution; } if (other.CollectiveNccl != false) { CollectiveNccl = other.CollectiveNccl; } if (other.ShareSessionStateInClusterspecPropagation != false) { ShareSessionStateInClusterspecPropagation = other.ShareSessionStateInClusterspecPropagation; } if (other.DisableThreadSpinning != false) { DisableThreadSpinning = other.DisableThreadSpinning; } if (other.ShareClusterDevicesInSession != false) { ShareClusterDevicesInSession = other.ShareClusterDevicesInSession; } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 10: { CollectiveGroupLeader = input.ReadString(); break; } case 26: { ExecutorType = input.ReadString(); break; } case 32: { RecvBufMaxChunk = input.ReadInt32(); break; } case 40: { UseNumaAffinity = input.ReadBool(); break; } case 48: { CollectiveDeterministicSequentialExecution = input.ReadBool(); break; } case 56: { CollectiveNccl = input.ReadBool(); break; } case 64: { ShareSessionStateInClusterspecPropagation = input.ReadBool(); break; } case 72: { DisableThreadSpinning = input.ReadBool(); break; } case 80: { ShareClusterDevicesInSession = input.ReadBool(); break; } } } } } } #endregion } /// /// Options for a single Run() call. /// public sealed partial class RunOptions : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new RunOptions()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[6]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RunOptions() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RunOptions(RunOptions other) : this() { traceLevel_ = other.traceLevel_; timeoutInMs_ = other.timeoutInMs_; interOpThreadPool_ = other.interOpThreadPool_; outputPartitionGraphs_ = other.outputPartitionGraphs_; debugOptions_ = other.debugOptions_ != null ? other.debugOptions_.Clone() : null; reportTensorAllocationsUponOom_ = other.reportTensorAllocationsUponOom_; experimental_ = other.experimental_ != null ? other.experimental_.Clone() : null; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RunOptions Clone() { return new RunOptions(this); } /// Field number for the "trace_level" field. public const int TraceLevelFieldNumber = 1; private global::Tensorflow.RunOptions.Types.TraceLevel traceLevel_ = 0; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.RunOptions.Types.TraceLevel TraceLevel { get { return traceLevel_; } set { traceLevel_ = value; } } /// Field number for the "timeout_in_ms" field. public const int TimeoutInMsFieldNumber = 2; private long timeoutInMs_; /// /// Time to wait for operation to complete in milliseconds. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public long TimeoutInMs { get { return timeoutInMs_; } set { timeoutInMs_ = value; } } /// Field number for the "inter_op_thread_pool" field. public const int InterOpThreadPoolFieldNumber = 3; private int interOpThreadPool_; /// /// The thread pool to use, if session_inter_op_thread_pool is configured. /// To use the caller thread set this to -1 - this uses the caller thread /// to execute Session::Run() and thus avoids a context switch. Using the /// caller thread to execute Session::Run() should be done ONLY for simple /// graphs, where the overhead of an additional context switch is /// comparable with the overhead of Session::Run(). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int InterOpThreadPool { get { return interOpThreadPool_; } set { interOpThreadPool_ = value; } } /// Field number for the "output_partition_graphs" field. public const int OutputPartitionGraphsFieldNumber = 5; private bool outputPartitionGraphs_; /// /// Whether the partition graph(s) executed by the executor(s) should be /// outputted via RunMetadata. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool OutputPartitionGraphs { get { return outputPartitionGraphs_; } set { outputPartitionGraphs_ = value; } } /// Field number for the "debug_options" field. public const int DebugOptionsFieldNumber = 6; private global::Tensorflow.DebugOptions debugOptions_; /// /// EXPERIMENTAL. Options used to initialize DebuggerState, if enabled. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.DebugOptions DebugOptions { get { return debugOptions_; } set { debugOptions_ = value; } } /// Field number for the "report_tensor_allocations_upon_oom" field. public const int ReportTensorAllocationsUponOomFieldNumber = 7; private bool reportTensorAllocationsUponOom_; /// /// When enabled, causes tensor allocation information to be included in /// the error message when the Run() call fails because the allocator ran /// out of memory (OOM). /// /// Enabling this option can slow down the Run() call. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool ReportTensorAllocationsUponOom { get { return reportTensorAllocationsUponOom_; } set { reportTensorAllocationsUponOom_ = value; } } /// Field number for the "experimental" field. public const int ExperimentalFieldNumber = 8; private global::Tensorflow.RunOptions.Types.Experimental experimental_; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.RunOptions.Types.Experimental Experimental { get { return experimental_; } set { experimental_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as RunOptions); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(RunOptions other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (TraceLevel != other.TraceLevel) return false; if (TimeoutInMs != other.TimeoutInMs) return false; if (InterOpThreadPool != other.InterOpThreadPool) return false; if (OutputPartitionGraphs != other.OutputPartitionGraphs) return false; if (!object.Equals(DebugOptions, other.DebugOptions)) return false; if (ReportTensorAllocationsUponOom != other.ReportTensorAllocationsUponOom) return false; if (!object.Equals(Experimental, other.Experimental)) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (TraceLevel != 0) hash ^= TraceLevel.GetHashCode(); if (TimeoutInMs != 0L) hash ^= TimeoutInMs.GetHashCode(); if (InterOpThreadPool != 0) hash ^= InterOpThreadPool.GetHashCode(); if (OutputPartitionGraphs != false) hash ^= OutputPartitionGraphs.GetHashCode(); if (debugOptions_ != null) hash ^= DebugOptions.GetHashCode(); if (ReportTensorAllocationsUponOom != false) hash ^= ReportTensorAllocationsUponOom.GetHashCode(); if (experimental_ != null) hash ^= Experimental.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (TraceLevel != 0) { output.WriteRawTag(8); output.WriteEnum((int) TraceLevel); } if (TimeoutInMs != 0L) { output.WriteRawTag(16); output.WriteInt64(TimeoutInMs); } if (InterOpThreadPool != 0) { output.WriteRawTag(24); output.WriteInt32(InterOpThreadPool); } if (OutputPartitionGraphs != false) { output.WriteRawTag(40); output.WriteBool(OutputPartitionGraphs); } if (debugOptions_ != null) { output.WriteRawTag(50); output.WriteMessage(DebugOptions); } if (ReportTensorAllocationsUponOom != false) { output.WriteRawTag(56); output.WriteBool(ReportTensorAllocationsUponOom); } if (experimental_ != null) { output.WriteRawTag(66); output.WriteMessage(Experimental); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (TraceLevel != 0) { size += 1 + pb::CodedOutputStream.ComputeEnumSize((int) TraceLevel); } if (TimeoutInMs != 0L) { size += 1 + pb::CodedOutputStream.ComputeInt64Size(TimeoutInMs); } if (InterOpThreadPool != 0) { size += 1 + pb::CodedOutputStream.ComputeInt32Size(InterOpThreadPool); } if (OutputPartitionGraphs != false) { size += 1 + 1; } if (debugOptions_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(DebugOptions); } if (ReportTensorAllocationsUponOom != false) { size += 1 + 1; } if (experimental_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(Experimental); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(RunOptions other) { if (other == null) { return; } if (other.TraceLevel != 0) { TraceLevel = other.TraceLevel; } if (other.TimeoutInMs != 0L) { TimeoutInMs = other.TimeoutInMs; } if (other.InterOpThreadPool != 0) { InterOpThreadPool = other.InterOpThreadPool; } if (other.OutputPartitionGraphs != false) { OutputPartitionGraphs = other.OutputPartitionGraphs; } if (other.debugOptions_ != null) { if (debugOptions_ == null) { debugOptions_ = new global::Tensorflow.DebugOptions(); } DebugOptions.MergeFrom(other.DebugOptions); } if (other.ReportTensorAllocationsUponOom != false) { ReportTensorAllocationsUponOom = other.ReportTensorAllocationsUponOom; } if (other.experimental_ != null) { if (experimental_ == null) { experimental_ = new global::Tensorflow.RunOptions.Types.Experimental(); } Experimental.MergeFrom(other.Experimental); } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 8: { traceLevel_ = (global::Tensorflow.RunOptions.Types.TraceLevel) input.ReadEnum(); break; } case 16: { TimeoutInMs = input.ReadInt64(); break; } case 24: { InterOpThreadPool = input.ReadInt32(); break; } case 40: { OutputPartitionGraphs = input.ReadBool(); break; } case 50: { if (debugOptions_ == null) { debugOptions_ = new global::Tensorflow.DebugOptions(); } input.ReadMessage(debugOptions_); break; } case 56: { ReportTensorAllocationsUponOom = input.ReadBool(); break; } case 66: { if (experimental_ == null) { experimental_ = new global::Tensorflow.RunOptions.Types.Experimental(); } input.ReadMessage(experimental_); break; } } } } #region Nested types /// Container for nested types declared in the RunOptions message type. [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static partial class Types { /// /// TODO(pbar) Turn this into a TraceOptions proto which allows /// tracing to be controlled in a more orthogonal manner? /// public enum TraceLevel { [pbr::OriginalName("NO_TRACE")] NoTrace = 0, [pbr::OriginalName("SOFTWARE_TRACE")] SoftwareTrace = 1, [pbr::OriginalName("HARDWARE_TRACE")] HardwareTrace = 2, [pbr::OriginalName("FULL_TRACE")] FullTrace = 3, } /// /// Everything inside Experimental is subject to change and is not subject /// to API stability guarantees in /// https://www.tensorflow.org/guide/version_compat. /// public sealed partial class Experimental : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new Experimental()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.RunOptions.Descriptor.NestedTypes[0]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental(Experimental other) : this() { collectiveGraphKey_ = other.collectiveGraphKey_; useRunHandlerPool_ = other.useRunHandlerPool_; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public Experimental Clone() { return new Experimental(this); } /// Field number for the "collective_graph_key" field. public const int CollectiveGraphKeyFieldNumber = 1; private long collectiveGraphKey_; /// /// If non-zero, declares that this graph is going to use collective /// ops and must synchronize step_ids with any other graph with this /// same group_key value (in a distributed computation where tasks /// run disjoint graphs). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public long CollectiveGraphKey { get { return collectiveGraphKey_; } set { collectiveGraphKey_ = value; } } /// Field number for the "use_run_handler_pool" field. public const int UseRunHandlerPoolFieldNumber = 2; private bool useRunHandlerPool_; /// /// If true, then operations (using the inter-op pool) across all /// session::run() calls will be centrally scheduled, optimizing for (median /// and tail) latency. /// Consider using this option for CPU-bound workloads like inference. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool UseRunHandlerPool { get { return useRunHandlerPool_; } set { useRunHandlerPool_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as Experimental); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(Experimental other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (CollectiveGraphKey != other.CollectiveGraphKey) return false; if (UseRunHandlerPool != other.UseRunHandlerPool) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (CollectiveGraphKey != 0L) hash ^= CollectiveGraphKey.GetHashCode(); if (UseRunHandlerPool != false) hash ^= UseRunHandlerPool.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (CollectiveGraphKey != 0L) { output.WriteRawTag(8); output.WriteInt64(CollectiveGraphKey); } if (UseRunHandlerPool != false) { output.WriteRawTag(16); output.WriteBool(UseRunHandlerPool); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (CollectiveGraphKey != 0L) { size += 1 + pb::CodedOutputStream.ComputeInt64Size(CollectiveGraphKey); } if (UseRunHandlerPool != false) { size += 1 + 1; } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(Experimental other) { if (other == null) { return; } if (other.CollectiveGraphKey != 0L) { CollectiveGraphKey = other.CollectiveGraphKey; } if (other.UseRunHandlerPool != false) { UseRunHandlerPool = other.UseRunHandlerPool; } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 8: { CollectiveGraphKey = input.ReadInt64(); break; } case 16: { UseRunHandlerPool = input.ReadBool(); break; } } } } } } #endregion } /// /// Metadata output (i.e., non-Tensor) for a single Run() call. /// public sealed partial class RunMetadata : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new RunMetadata()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[7]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RunMetadata() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RunMetadata(RunMetadata other) : this() { stepStats_ = other.stepStats_ != null ? other.stepStats_.Clone() : null; costGraph_ = other.costGraph_ != null ? other.costGraph_.Clone() : null; partitionGraphs_ = other.partitionGraphs_.Clone(); functionGraphs_ = other.functionGraphs_.Clone(); _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public RunMetadata Clone() { return new RunMetadata(this); } /// Field number for the "step_stats" field. public const int StepStatsFieldNumber = 1; private global::Tensorflow.StepStats stepStats_; /// /// Statistics traced for this step. Populated if tracing is turned on via the /// "RunOptions" proto. /// EXPERIMENTAL: The format and set of events may change in future versions. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.StepStats StepStats { get { return stepStats_; } set { stepStats_ = value; } } /// Field number for the "cost_graph" field. public const int CostGraphFieldNumber = 2; private global::Tensorflow.CostGraphDef costGraph_; /// /// The cost graph for the computation defined by the run call. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.CostGraphDef CostGraph { get { return costGraph_; } set { costGraph_ = value; } } /// Field number for the "partition_graphs" field. public const int PartitionGraphsFieldNumber = 3; private static readonly pb::FieldCodec _repeated_partitionGraphs_codec = pb::FieldCodec.ForMessage(26, global::Tensorflow.GraphDef.Parser); private readonly pbc::RepeatedField partitionGraphs_ = new pbc::RepeatedField(); /// /// Graphs of the partitions executed by executors. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField PartitionGraphs { get { return partitionGraphs_; } } /// Field number for the "function_graphs" field. public const int FunctionGraphsFieldNumber = 4; private static readonly pb::FieldCodec _repeated_functionGraphs_codec = pb::FieldCodec.ForMessage(34, global::Tensorflow.RunMetadata.Types.FunctionGraphs.Parser); private readonly pbc::RepeatedField functionGraphs_ = new pbc::RepeatedField(); /// /// This is only populated for graphs that are run as functions in TensorFlow /// V2. There will be an entry below for each function that is traced. /// The main use cases of the post_optimization_graph and the partition_graphs /// is to give the caller insight into the graphs that were actually run by the /// runtime. Additional information (such as those in step_stats) will match /// these graphs. /// We also include the pre_optimization_graph since it is usually easier to /// read, and is helpful in situations where the caller wants to get a high /// level idea of what the built graph looks like (since the various graph /// optimization passes might change the structure of the graph significantly). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField FunctionGraphs { get { return functionGraphs_; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as RunMetadata); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(RunMetadata other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (!object.Equals(StepStats, other.StepStats)) return false; if (!object.Equals(CostGraph, other.CostGraph)) return false; if(!partitionGraphs_.Equals(other.partitionGraphs_)) return false; if(!functionGraphs_.Equals(other.functionGraphs_)) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (stepStats_ != null) hash ^= StepStats.GetHashCode(); if (costGraph_ != null) hash ^= CostGraph.GetHashCode(); hash ^= partitionGraphs_.GetHashCode(); hash ^= functionGraphs_.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (stepStats_ != null) { output.WriteRawTag(10); output.WriteMessage(StepStats); } if (costGraph_ != null) { output.WriteRawTag(18); output.WriteMessage(CostGraph); } partitionGraphs_.WriteTo(output, _repeated_partitionGraphs_codec); functionGraphs_.WriteTo(output, _repeated_functionGraphs_codec); if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (stepStats_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(StepStats); } if (costGraph_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(CostGraph); } size += partitionGraphs_.CalculateSize(_repeated_partitionGraphs_codec); size += functionGraphs_.CalculateSize(_repeated_functionGraphs_codec); if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(RunMetadata other) { if (other == null) { return; } if (other.stepStats_ != null) { if (stepStats_ == null) { stepStats_ = new global::Tensorflow.StepStats(); } StepStats.MergeFrom(other.StepStats); } if (other.costGraph_ != null) { if (costGraph_ == null) { costGraph_ = new global::Tensorflow.CostGraphDef(); } CostGraph.MergeFrom(other.CostGraph); } partitionGraphs_.Add(other.partitionGraphs_); functionGraphs_.Add(other.functionGraphs_); _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 10: { if (stepStats_ == null) { stepStats_ = new global::Tensorflow.StepStats(); } input.ReadMessage(stepStats_); break; } case 18: { if (costGraph_ == null) { costGraph_ = new global::Tensorflow.CostGraphDef(); } input.ReadMessage(costGraph_); break; } case 26: { partitionGraphs_.AddEntriesFrom(input, _repeated_partitionGraphs_codec); break; } case 34: { functionGraphs_.AddEntriesFrom(input, _repeated_functionGraphs_codec); break; } } } } #region Nested types /// Container for nested types declared in the RunMetadata message type. [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static partial class Types { public sealed partial class FunctionGraphs : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new FunctionGraphs()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.RunMetadata.Descriptor.NestedTypes[0]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public FunctionGraphs() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public FunctionGraphs(FunctionGraphs other) : this() { partitionGraphs_ = other.partitionGraphs_.Clone(); preOptimizationGraph_ = other.preOptimizationGraph_ != null ? other.preOptimizationGraph_.Clone() : null; postOptimizationGraph_ = other.postOptimizationGraph_ != null ? other.postOptimizationGraph_.Clone() : null; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public FunctionGraphs Clone() { return new FunctionGraphs(this); } /// Field number for the "partition_graphs" field. public const int PartitionGraphsFieldNumber = 1; private static readonly pb::FieldCodec _repeated_partitionGraphs_codec = pb::FieldCodec.ForMessage(10, global::Tensorflow.GraphDef.Parser); private readonly pbc::RepeatedField partitionGraphs_ = new pbc::RepeatedField(); /// /// TODO(nareshmodi): Include some sort of function/cache-key identifier? /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField PartitionGraphs { get { return partitionGraphs_; } } /// Field number for the "pre_optimization_graph" field. public const int PreOptimizationGraphFieldNumber = 2; private global::Tensorflow.GraphDef preOptimizationGraph_; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.GraphDef PreOptimizationGraph { get { return preOptimizationGraph_; } set { preOptimizationGraph_ = value; } } /// Field number for the "post_optimization_graph" field. public const int PostOptimizationGraphFieldNumber = 3; private global::Tensorflow.GraphDef postOptimizationGraph_; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.GraphDef PostOptimizationGraph { get { return postOptimizationGraph_; } set { postOptimizationGraph_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as FunctionGraphs); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(FunctionGraphs other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if(!partitionGraphs_.Equals(other.partitionGraphs_)) return false; if (!object.Equals(PreOptimizationGraph, other.PreOptimizationGraph)) return false; if (!object.Equals(PostOptimizationGraph, other.PostOptimizationGraph)) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; hash ^= partitionGraphs_.GetHashCode(); if (preOptimizationGraph_ != null) hash ^= PreOptimizationGraph.GetHashCode(); if (postOptimizationGraph_ != null) hash ^= PostOptimizationGraph.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { partitionGraphs_.WriteTo(output, _repeated_partitionGraphs_codec); if (preOptimizationGraph_ != null) { output.WriteRawTag(18); output.WriteMessage(PreOptimizationGraph); } if (postOptimizationGraph_ != null) { output.WriteRawTag(26); output.WriteMessage(PostOptimizationGraph); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; size += partitionGraphs_.CalculateSize(_repeated_partitionGraphs_codec); if (preOptimizationGraph_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(PreOptimizationGraph); } if (postOptimizationGraph_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(PostOptimizationGraph); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(FunctionGraphs other) { if (other == null) { return; } partitionGraphs_.Add(other.partitionGraphs_); if (other.preOptimizationGraph_ != null) { if (preOptimizationGraph_ == null) { preOptimizationGraph_ = new global::Tensorflow.GraphDef(); } PreOptimizationGraph.MergeFrom(other.PreOptimizationGraph); } if (other.postOptimizationGraph_ != null) { if (postOptimizationGraph_ == null) { postOptimizationGraph_ = new global::Tensorflow.GraphDef(); } PostOptimizationGraph.MergeFrom(other.PostOptimizationGraph); } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 10: { partitionGraphs_.AddEntriesFrom(input, _repeated_partitionGraphs_codec); break; } case 18: { if (preOptimizationGraph_ == null) { preOptimizationGraph_ = new global::Tensorflow.GraphDef(); } input.ReadMessage(preOptimizationGraph_); break; } case 26: { if (postOptimizationGraph_ == null) { postOptimizationGraph_ = new global::Tensorflow.GraphDef(); } input.ReadMessage(postOptimizationGraph_); break; } } } } } } #endregion } /// /// Defines a connection between two tensors in a `GraphDef`. /// public sealed partial class TensorConnection : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new TensorConnection()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[8]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public TensorConnection() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public TensorConnection(TensorConnection other) : this() { fromTensor_ = other.fromTensor_; toTensor_ = other.toTensor_; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public TensorConnection Clone() { return new TensorConnection(this); } /// Field number for the "from_tensor" field. public const int FromTensorFieldNumber = 1; private string fromTensor_ = ""; /// /// A tensor name. The value of this tensor will be substituted for /// the tensor named in `to_tensor`. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string FromTensor { get { return fromTensor_; } set { fromTensor_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } /// Field number for the "to_tensor" field. public const int ToTensorFieldNumber = 2; private string toTensor_ = ""; /// /// A tensor name. The value of this tensor will be bound to the /// value of the tensor named in `from_tensor`. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public string ToTensor { get { return toTensor_; } set { toTensor_ = pb::ProtoPreconditions.CheckNotNull(value, "value"); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as TensorConnection); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(TensorConnection other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if (FromTensor != other.FromTensor) return false; if (ToTensor != other.ToTensor) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; if (FromTensor.Length != 0) hash ^= FromTensor.GetHashCode(); if (ToTensor.Length != 0) hash ^= ToTensor.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { if (FromTensor.Length != 0) { output.WriteRawTag(10); output.WriteString(FromTensor); } if (ToTensor.Length != 0) { output.WriteRawTag(18); output.WriteString(ToTensor); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; if (FromTensor.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(FromTensor); } if (ToTensor.Length != 0) { size += 1 + pb::CodedOutputStream.ComputeStringSize(ToTensor); } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(TensorConnection other) { if (other == null) { return; } if (other.FromTensor.Length != 0) { FromTensor = other.FromTensor; } if (other.ToTensor.Length != 0) { ToTensor = other.ToTensor; } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 10: { FromTensor = input.ReadString(); break; } case 18: { ToTensor = input.ReadString(); break; } } } } } /// /// Defines a subgraph in another `GraphDef` as a set of feed points and nodes /// to be fetched or executed. /// /// Compare with the arguments to `Session::Run()`. /// public sealed partial class CallableOptions : pb::IMessage { private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new CallableOptions()); private pb::UnknownFieldSet _unknownFields; [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pb::MessageParser Parser { get { return _parser; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public static pbr::MessageDescriptor Descriptor { get { return global::Tensorflow.ConfigReflection.Descriptor.MessageTypes[9]; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] pbr::MessageDescriptor pb::IMessage.Descriptor { get { return Descriptor; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public CallableOptions() { OnConstruction(); } partial void OnConstruction(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public CallableOptions(CallableOptions other) : this() { feed_ = other.feed_.Clone(); fetch_ = other.fetch_.Clone(); target_ = other.target_.Clone(); runOptions_ = other.runOptions_ != null ? other.runOptions_.Clone() : null; tensorConnection_ = other.tensorConnection_.Clone(); feedDevices_ = other.feedDevices_.Clone(); fetchDevices_ = other.fetchDevices_.Clone(); fetchSkipSync_ = other.fetchSkipSync_; _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public CallableOptions Clone() { return new CallableOptions(this); } /// Field number for the "feed" field. public const int FeedFieldNumber = 1; private static readonly pb::FieldCodec _repeated_feed_codec = pb::FieldCodec.ForString(10); private readonly pbc::RepeatedField feed_ = new pbc::RepeatedField(); /// /// Tensors to be fed in the callable. Each feed is the name of a tensor. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField Feed { get { return feed_; } } /// Field number for the "fetch" field. public const int FetchFieldNumber = 2; private static readonly pb::FieldCodec _repeated_fetch_codec = pb::FieldCodec.ForString(18); private readonly pbc::RepeatedField fetch_ = new pbc::RepeatedField(); /// /// Fetches. A list of tensor names. The caller of the callable expects a /// tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The /// order of specified fetches does not change the execution order. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField Fetch { get { return fetch_; } } /// Field number for the "target" field. public const int TargetFieldNumber = 3; private static readonly pb::FieldCodec _repeated_target_codec = pb::FieldCodec.ForString(26); private readonly pbc::RepeatedField target_ = new pbc::RepeatedField(); /// /// Target Nodes. A list of node names. The named nodes will be run by the /// callable but their outputs will not be returned. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField Target { get { return target_; } } /// Field number for the "run_options" field. public const int RunOptionsFieldNumber = 4; private global::Tensorflow.RunOptions runOptions_; /// /// Options that will be applied to each run. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public global::Tensorflow.RunOptions RunOptions { get { return runOptions_; } set { runOptions_ = value; } } /// Field number for the "tensor_connection" field. public const int TensorConnectionFieldNumber = 5; private static readonly pb::FieldCodec _repeated_tensorConnection_codec = pb::FieldCodec.ForMessage(42, global::Tensorflow.TensorConnection.Parser); private readonly pbc::RepeatedField tensorConnection_ = new pbc::RepeatedField(); /// /// Tensors to be connected in the callable. Each TensorConnection denotes /// a pair of tensors in the graph, between which an edge will be created /// in the callable. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::RepeatedField TensorConnection { get { return tensorConnection_; } } /// Field number for the "feed_devices" field. public const int FeedDevicesFieldNumber = 6; private static readonly pbc::MapField.Codec _map_feedDevices_codec = new pbc::MapField.Codec(pb::FieldCodec.ForString(10), pb::FieldCodec.ForString(18), 50); private readonly pbc::MapField feedDevices_ = new pbc::MapField(); /// /// The Tensor objects fed in the callable and fetched from the callable /// are expected to be backed by host (CPU) memory by default. /// /// The options below allow changing that - feeding tensors backed by /// device memory, or returning tensors that are backed by device memory. /// /// The maps below map the name of a feed/fetch tensor (which appears in /// 'feed' or 'fetch' fields above), to the fully qualified name of the device /// owning the memory backing the contents of the tensor. /// /// For example, creating a callable with the following options: /// /// CallableOptions { /// feed: "a:0" /// feed: "b:0" /// /// fetch: "x:0" /// fetch: "y:0" /// /// feed_devices: { /// "a:0": "/job:localhost/replica:0/task:0/device:GPU:0" /// } /// /// fetch_devices: { /// "y:0": "/job:localhost/replica:0/task:0/device:GPU:0" /// } /// } /// /// means that the Callable expects: /// - The first argument ("a:0") is a Tensor backed by GPU memory. /// - The second argument ("b:0") is a Tensor backed by host memory. /// and of its return values: /// - The first output ("x:0") will be backed by host memory. /// - The second output ("y:0") will be backed by GPU memory. /// /// FEEDS: /// It is the responsibility of the caller to ensure that the memory of the fed /// tensors will be correctly initialized and synchronized before it is /// accessed by operations executed during the call to Session::RunCallable(). /// /// This is typically ensured by using the TensorFlow memory allocators /// (Device::GetAllocator()) to create the Tensor to be fed. /// /// Alternatively, for CUDA-enabled GPU devices, this typically means that the /// operation that produced the contents of the tensor has completed, i.e., the /// CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or /// cuStreamSynchronize()). /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::MapField FeedDevices { get { return feedDevices_; } } /// Field number for the "fetch_devices" field. public const int FetchDevicesFieldNumber = 7; private static readonly pbc::MapField.Codec _map_fetchDevices_codec = new pbc::MapField.Codec(pb::FieldCodec.ForString(10), pb::FieldCodec.ForString(18), 58); private readonly pbc::MapField fetchDevices_ = new pbc::MapField(); [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public pbc::MapField FetchDevices { get { return fetchDevices_; } } /// Field number for the "fetch_skip_sync" field. public const int FetchSkipSyncFieldNumber = 8; private bool fetchSkipSync_; /// /// By default, RunCallable() will synchronize the GPU stream before returning /// fetched tensors on a GPU device, to ensure that the values in those tensors /// have been produced. This simplifies interacting with the tensors, but /// potentially incurs a performance hit. /// /// If this options is set to true, the caller is responsible for ensuring /// that the values in the fetched tensors have been produced before they are /// used. The caller can do this by invoking `Device::Sync()` on the underlying /// device(s), or by feeding the tensors back to the same Session using /// `feed_devices` with the same corresponding device name. /// [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool FetchSkipSync { get { return fetchSkipSync_; } set { fetchSkipSync_ = value; } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override bool Equals(object other) { return Equals(other as CallableOptions); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public bool Equals(CallableOptions other) { if (ReferenceEquals(other, null)) { return false; } if (ReferenceEquals(other, this)) { return true; } if(!feed_.Equals(other.feed_)) return false; if(!fetch_.Equals(other.fetch_)) return false; if(!target_.Equals(other.target_)) return false; if (!object.Equals(RunOptions, other.RunOptions)) return false; if(!tensorConnection_.Equals(other.tensorConnection_)) return false; if (!FeedDevices.Equals(other.FeedDevices)) return false; if (!FetchDevices.Equals(other.FetchDevices)) return false; if (FetchSkipSync != other.FetchSkipSync) return false; return Equals(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override int GetHashCode() { int hash = 1; hash ^= feed_.GetHashCode(); hash ^= fetch_.GetHashCode(); hash ^= target_.GetHashCode(); if (runOptions_ != null) hash ^= RunOptions.GetHashCode(); hash ^= tensorConnection_.GetHashCode(); hash ^= FeedDevices.GetHashCode(); hash ^= FetchDevices.GetHashCode(); if (FetchSkipSync != false) hash ^= FetchSkipSync.GetHashCode(); if (_unknownFields != null) { hash ^= _unknownFields.GetHashCode(); } return hash; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public override string ToString() { return pb::JsonFormatter.ToDiagnosticString(this); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void WriteTo(pb::CodedOutputStream output) { feed_.WriteTo(output, _repeated_feed_codec); fetch_.WriteTo(output, _repeated_fetch_codec); target_.WriteTo(output, _repeated_target_codec); if (runOptions_ != null) { output.WriteRawTag(34); output.WriteMessage(RunOptions); } tensorConnection_.WriteTo(output, _repeated_tensorConnection_codec); feedDevices_.WriteTo(output, _map_feedDevices_codec); fetchDevices_.WriteTo(output, _map_fetchDevices_codec); if (FetchSkipSync != false) { output.WriteRawTag(64); output.WriteBool(FetchSkipSync); } if (_unknownFields != null) { _unknownFields.WriteTo(output); } } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public int CalculateSize() { int size = 0; size += feed_.CalculateSize(_repeated_feed_codec); size += fetch_.CalculateSize(_repeated_fetch_codec); size += target_.CalculateSize(_repeated_target_codec); if (runOptions_ != null) { size += 1 + pb::CodedOutputStream.ComputeMessageSize(RunOptions); } size += tensorConnection_.CalculateSize(_repeated_tensorConnection_codec); size += feedDevices_.CalculateSize(_map_feedDevices_codec); size += fetchDevices_.CalculateSize(_map_fetchDevices_codec); if (FetchSkipSync != false) { size += 1 + 1; } if (_unknownFields != null) { size += _unknownFields.CalculateSize(); } return size; } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(CallableOptions other) { if (other == null) { return; } feed_.Add(other.feed_); fetch_.Add(other.fetch_); target_.Add(other.target_); if (other.runOptions_ != null) { if (runOptions_ == null) { runOptions_ = new global::Tensorflow.RunOptions(); } RunOptions.MergeFrom(other.RunOptions); } tensorConnection_.Add(other.tensorConnection_); feedDevices_.Add(other.feedDevices_); fetchDevices_.Add(other.fetchDevices_); if (other.FetchSkipSync != false) { FetchSkipSync = other.FetchSkipSync; } _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields); } [global::System.Diagnostics.DebuggerNonUserCodeAttribute] public void MergeFrom(pb::CodedInputStream input) { uint tag; while ((tag = input.ReadTag()) != 0) { switch(tag) { default: _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input); break; case 10: { feed_.AddEntriesFrom(input, _repeated_feed_codec); break; } case 18: { fetch_.AddEntriesFrom(input, _repeated_fetch_codec); break; } case 26: { target_.AddEntriesFrom(input, _repeated_target_codec); break; } case 34: { if (runOptions_ == null) { runOptions_ = new global::Tensorflow.RunOptions(); } input.ReadMessage(runOptions_); break; } case 42: { tensorConnection_.AddEntriesFrom(input, _repeated_tensorConnection_codec); break; } case 50: { feedDevices_.AddEntriesFrom(input, _map_feedDevices_codec); break; } case 58: { fetchDevices_.AddEntriesFrom(input, _map_fetchDevices_codec); break; } case 64: { FetchSkipSync = input.ReadBool(); break; } } } } } #endregion } #endregion Designer generated code