| @@ -9,6 +9,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples", "t | |||||
| EndProject | EndProject | ||||
| Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Core", "src\TensorFlowNET.Core\TensorFlowNET.Core.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Core", "src\TensorFlowNET.Core\TensorFlowNET.Core.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" | ||||
| EndProject | EndProject | ||||
| Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowNET.Utility", "src\TensorFlowNET.Utility\TensorFlowNET.Utility.csproj", "{00D9085C-0FC7-453C-A0CC-BAD98F44FEA0}" | |||||
| EndProject | |||||
| Global | Global | ||||
| GlobalSection(SolutionConfigurationPlatforms) = preSolution | GlobalSection(SolutionConfigurationPlatforms) = preSolution | ||||
| Debug|Any CPU = Debug|Any CPU | Debug|Any CPU = Debug|Any CPU | ||||
| @@ -27,6 +29,10 @@ Global | |||||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU | {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU | ||||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU | {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU | ||||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU | {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU | ||||
| {00D9085C-0FC7-453C-A0CC-BAD98F44FEA0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
| {00D9085C-0FC7-453C-A0CC-BAD98F44FEA0}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
| {00D9085C-0FC7-453C-A0CC-BAD98F44FEA0}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
| {00D9085C-0FC7-453C-A0CC-BAD98F44FEA0}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
| EndGlobalSection | EndGlobalSection | ||||
| GlobalSection(SolutionProperties) = preSolution | GlobalSection(SolutionProperties) = preSolution | ||||
| HideSolutionNode = FALSE | HideSolutionNode = FALSE | ||||
| @@ -0,0 +1,23 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace Tensorflow | |||||
| { | |||||
| public static partial class tf | |||||
| { | |||||
| /// <summary> | |||||
| /// Inserts a dimension of 1 into a tensor's shape. | |||||
| /// </summary> | |||||
| /// <param name="input"></param> | |||||
| /// <param name="axis"></param> | |||||
| /// <param name="name"></param> | |||||
| /// <param name="dim"></param> | |||||
| /// <returns> | |||||
| /// A `Tensor` with the same data as `input`, but its shape has an additional | |||||
| /// dimension of size 1 added. | |||||
| /// </returns> | |||||
| public static Tensor expand_dims(Tensor input, int axis = -1, string name = "", int dim = -1) | |||||
| => array_ops.expand_dims(input, axis, name, dim); | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,13 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace Tensorflow | |||||
| { | |||||
| public partial class tf | |||||
| { | |||||
| public static Tensor read_file(string filename, string name = "") => gen_io_ops.read_file(filename, name); | |||||
| public static gen_image_ops image => new gen_image_ops(); | |||||
| } | |||||
| } | |||||
| @@ -6,9 +6,6 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static partial class tf | public static partial class tf | ||||
| { | { | ||||
| public static unsafe Tensor matmul(Tensor a, Tensor b) | |||||
| { | |||||
| return gen_math_ops.mat_mul(a, b); | |||||
| } | |||||
| public static unsafe Tensor matmul(Tensor a, Tensor b) => gen_math_ops.mat_mul(a, b); | |||||
| } | } | ||||
| } | } | ||||
| @@ -6,25 +6,19 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static partial class tf | public static partial class tf | ||||
| { | { | ||||
| public static Tensor add(Tensor a, Tensor b) | |||||
| { | |||||
| return gen_math_ops.add(a, b); | |||||
| } | |||||
| public static Tensor sub(Tensor a, Tensor b) | |||||
| { | |||||
| return gen_math_ops.sub(a, b); | |||||
| } | |||||
| public static Tensor multiply(Tensor x, Tensor y) | |||||
| { | |||||
| return gen_math_ops.mul(x, y); | |||||
| } | |||||
| public static Tensor pow(Tensor x, double y) | |||||
| { | |||||
| return gen_math_ops.pow(x, y); | |||||
| } | |||||
| public static Tensor add(Tensor a, Tensor b) => gen_math_ops.add(a, b); | |||||
| public static Tensor sub(Tensor a, Tensor b) => gen_math_ops.sub(a, b); | |||||
| public static Tensor subtract<T>(Tensor x, T[] y, string name = "") where T : struct | |||||
| => gen_math_ops.sub(x, ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"), name); | |||||
| public static Tensor multiply(Tensor x, Tensor y) => gen_math_ops.mul(x, y); | |||||
| public static Tensor divide<T>(Tensor x, T[] y, string name = "") where T : struct | |||||
| => x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"); | |||||
| public static Tensor pow(Tensor x, double y) => gen_math_ops.pow(x, y); | |||||
| /// <summary> | /// <summary> | ||||
| /// Computes the sum of elements across dimensions of a tensor. | /// Computes the sum of elements across dimensions of a tensor. | ||||
| @@ -32,9 +26,9 @@ namespace Tensorflow | |||||
| /// <param name="input"></param> | /// <param name="input"></param> | ||||
| /// <param name="axis"></param> | /// <param name="axis"></param> | ||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor reduce_sum(Tensor input, int[] axis = null) | |||||
| { | |||||
| return math_ops.reduce_sum(input); | |||||
| } | |||||
| public static Tensor reduce_sum(Tensor input, int[] axis = null) => math_ops.reduce_sum(input); | |||||
| public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = "") | |||||
| => math_ops.cast(x, dtype, name); | |||||
| } | } | ||||
| } | } | ||||
| @@ -28,8 +28,8 @@ namespace Tensorflow | |||||
| var graph = ops.get_default_graph(); | var graph = ops.get_default_graph(); | ||||
| Python.with<ops.name_scope>(new ops.name_scope(name, "import", input_map.Values), scope => | Python.with<ops.name_scope>(new ops.name_scope(name, "import", input_map.Values), scope => | ||||
| { | { | ||||
| /*prefix = scope; | |||||
| if (!string.IsNullOrEmpty(prefix)) | |||||
| prefix = scope; | |||||
| /*if (!string.IsNullOrEmpty(prefix)) | |||||
| prefix = prefix.Substring(0, prefix.Length - 1); | prefix = prefix.Substring(0, prefix.Length - 1); | ||||
| else | else | ||||
| prefix = "";*/ | prefix = "";*/ | ||||
| @@ -113,9 +113,16 @@ namespace Tensorflow | |||||
| var key = attr_def.Name; | var key = attr_def.Name; | ||||
| if(attr_def.DefaultValue != null) | if(attr_def.DefaultValue != null) | ||||
| { | { | ||||
| var value = node_def.Attr[key]; | |||||
| if (value == null) | |||||
| if (node_def.Attr.ContainsKey(key)) | |||||
| { | |||||
| var value = node_def.Attr[key]; | |||||
| if (value == null) | |||||
| node_def.Attr[key] = attr_def.DefaultValue; | |||||
| } | |||||
| else | |||||
| { | |||||
| node_def.Attr[key] = attr_def.DefaultValue; | node_def.Attr[key] = attr_def.DefaultValue; | ||||
| } | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -24,6 +24,9 @@ namespace Tensorflow | |||||
| return c_api.TF_NewOperation(_handle, opType, opName); | return c_api.TF_NewOperation(_handle, opType, opName); | ||||
| } | } | ||||
| public Operation get_operation_by_name(string name) | |||||
| => as_graph_element(name, allow_tensor: false, allow_operation: true) as Operation; | |||||
| public ITensorOrOperation _get_operation_by_name_unsafe(string name) | public ITensorOrOperation _get_operation_by_name_unsafe(string name) | ||||
| { | { | ||||
| return _nodes_by_name.ContainsKey(name) ? _nodes_by_name[name] : null; | return _nodes_by_name.ContainsKey(name) ? _nodes_by_name[name] : null; | ||||
| @@ -46,6 +46,10 @@ namespace Tensorflow | |||||
| } | } | ||||
| } | } | ||||
| public static Tensor expand_dims(Tensor input, int axis = -1, string name = "", int dim = -1) => expand_dims_v2(input, axis, name); | |||||
| private static Tensor expand_dims_v2(Tensor input, int axis, string name = "") => gen_array_ops.expand_dims(input, axis, name); | |||||
| public static Tensor rank(Tensor input, string name = "") | public static Tensor rank(Tensor input, string name = "") | ||||
| { | { | ||||
| return math_ops.rank_internal(input, name, optimize: true); | return math_ops.rank_internal(input, name, optimize: true); | ||||
| @@ -12,6 +12,13 @@ namespace Tensorflow | |||||
| public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | ||||
| public static Execute _execute = new Execute(); | public static Execute _execute = new Execute(); | ||||
| public static Tensor expand_dims(Tensor input, int axis, string name = "") | |||||
| { | |||||
| var _op = _op_def_lib._apply_op_helper("ExpandDims", name: name, args: new { input, dim = axis }); | |||||
| return _op.outputs[0]; | |||||
| } | |||||
| public static Tensor greater<Tx, Ty>(Tx x, Ty y, string name = "") | public static Tensor greater<Tx, Ty>(Tx x, Ty y, string name = "") | ||||
| { | { | ||||
| var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y }); | var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y }); | ||||
| @@ -0,0 +1,61 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace Tensorflow | |||||
| { | |||||
| public class gen_image_ops | |||||
| { | |||||
| public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||||
| public Tensor decode_jpeg(Tensor contents, | |||||
| int channels = 0, | |||||
| int ratio = 1, | |||||
| bool fancy_upscaling = true, | |||||
| bool try_recover_truncated = false, | |||||
| float acceptable_fraction = 1, | |||||
| string dct_method = "", | |||||
| string name = "") | |||||
| { | |||||
| // Add nodes to the TensorFlow graph. | |||||
| if (tf.context.executing_eagerly()) | |||||
| { | |||||
| throw new NotImplementedException("decode_jpeg"); | |||||
| } | |||||
| else | |||||
| { | |||||
| var _op = _op_def_lib._apply_op_helper("DecodeJpeg", name: name, args: new | |||||
| { | |||||
| contents, | |||||
| channels, | |||||
| ratio, | |||||
| fancy_upscaling, | |||||
| try_recover_truncated, | |||||
| acceptable_fraction, | |||||
| dct_method | |||||
| }); | |||||
| return _op.outputs[0]; | |||||
| } | |||||
| } | |||||
| public Tensor resize_bilinear(Tensor images, int[] size, bool align_corners = false, string name = "") | |||||
| { | |||||
| if (tf.context.executing_eagerly()) | |||||
| { | |||||
| throw new NotImplementedException("resize_bilinear"); | |||||
| } | |||||
| else | |||||
| { | |||||
| var _op = _op_def_lib._apply_op_helper("ResizeBilinear", name: name, args: new | |||||
| { | |||||
| images, | |||||
| size, | |||||
| align_corners | |||||
| }); | |||||
| return _op.outputs[0]; | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -21,5 +21,12 @@ namespace Tensorflow | |||||
| return _op.outputs; | return _op.outputs; | ||||
| } | } | ||||
| public static Tensor read_file(string filename, string name = "") | |||||
| { | |||||
| var _op = _op_def_lib._apply_op_helper("ReadFile", name: name, args: new { filename }); | |||||
| return _op.outputs[0]; | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -17,6 +17,13 @@ namespace Tensorflow | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= "") | |||||
| { | |||||
| var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); | |||||
| return _op.outputs[0]; | |||||
| } | |||||
| public static Tensor neg(Tensor x, string name = "") | public static Tensor neg(Tensor x, string name = "") | ||||
| { | { | ||||
| var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); | var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); | ||||
| @@ -4,7 +4,7 @@ using System.Text; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| public class math_ops | |||||
| public class math_ops : Python | |||||
| { | { | ||||
| public static Tensor add(Tensor x, Tensor y, string name = "") => gen_math_ops.add(x, y, name); | public static Tensor add(Tensor x, Tensor y, string name = "") => gen_math_ops.add(x, y, name); | ||||
| @@ -14,7 +14,14 @@ namespace Tensorflow | |||||
| if(base_type == x.dtype) | if(base_type == x.dtype) | ||||
| return x; | return x; | ||||
| throw new NotImplementedException("math_ops.cast"); | |||||
| return with<ops.name_scope, Tensor>(new ops.name_scope(name, "Cast", new { x }), scope => | |||||
| { | |||||
| x = ops.convert_to_tensor(x, name: "x"); | |||||
| if (x.dtype.as_base_dtype() != base_type) | |||||
| x = gen_math_ops.cast(x, base_type, name: name); | |||||
| return x; | |||||
| }); | |||||
| } | } | ||||
| /// <summary> | /// <summary> | ||||
| @@ -35,8 +35,7 @@ namespace Tensorflow | |||||
| c_api.TF_DeleteSessionOptions(opts); | c_api.TF_DeleteSessionOptions(opts); | ||||
| } | } | ||||
| public virtual NDArray run(object fetches, FeedItem[] feed_dict = null) | |||||
| public virtual NDArray run(object fetches, params FeedItem[] feed_dict) | |||||
| { | { | ||||
| return _run(fetches, feed_dict); | return _run(fetches, feed_dict); | ||||
| } | } | ||||
| @@ -62,6 +61,9 @@ namespace Tensorflow | |||||
| var subfeed_dtype = subfeed_t.dtype.as_numpy_datatype(); | var subfeed_dtype = subfeed_t.dtype.as_numpy_datatype(); | ||||
| switch (subfeed_val) | switch (subfeed_val) | ||||
| { | { | ||||
| case NDArray nd: | |||||
| feed_dict_tensor[subfeed_t] = nd; | |||||
| break; | |||||
| case float floatVal: | case float floatVal: | ||||
| feed_dict_tensor[subfeed_t] = (NDArray)floatVal; | feed_dict_tensor[subfeed_t] = (NDArray)floatVal; | ||||
| break; | break; | ||||
| @@ -193,25 +195,25 @@ namespace Tensorflow | |||||
| case TF_DataType.TF_INT16: | case TF_DataType.TF_INT16: | ||||
| var shorts = new short[tensor.size]; | var shorts = new short[tensor.size]; | ||||
| for (ulong i = 0; i < tensor.size; i++) | for (ulong i = 0; i < tensor.size; i++) | ||||
| shorts[i] = *(short*)(c_api.TF_TensorData(output) + (int)(tensor.dataTypeSize * i)); | |||||
| shorts[i] = *(short*)(c_api.TF_TensorData(output) + (int)(tensor.itemsize * i)); | |||||
| nd = np.array(shorts).reshape(ndims); | nd = np.array(shorts).reshape(ndims); | ||||
| break; | break; | ||||
| case TF_DataType.TF_INT32: | case TF_DataType.TF_INT32: | ||||
| var ints = new int[tensor.size]; | var ints = new int[tensor.size]; | ||||
| for (ulong i = 0; i < tensor.size; i++) | for (ulong i = 0; i < tensor.size; i++) | ||||
| ints[i] = *(int*)(c_api.TF_TensorData(output) + (int)(tensor.dataTypeSize * i)); | |||||
| ints[i] = *(int*)(c_api.TF_TensorData(output) + (int)(tensor.itemsize * i)); | |||||
| nd = np.array(ints).reshape(ndims); | nd = np.array(ints).reshape(ndims); | ||||
| break; | break; | ||||
| case TF_DataType.TF_FLOAT: | case TF_DataType.TF_FLOAT: | ||||
| var floats = new float[tensor.size]; | var floats = new float[tensor.size]; | ||||
| for (ulong i = 0; i < tensor.size; i++) | for (ulong i = 0; i < tensor.size; i++) | ||||
| floats[i] = *(float*)(c_api.TF_TensorData(output) + (int)(tensor.dataTypeSize * i)); | |||||
| floats[i] = *(float*)(c_api.TF_TensorData(output) + (int)(tensor.itemsize * i)); | |||||
| nd = np.array(floats).reshape(ndims); | nd = np.array(floats).reshape(ndims); | ||||
| break; | break; | ||||
| case TF_DataType.TF_DOUBLE: | case TF_DataType.TF_DOUBLE: | ||||
| var doubles = new double[tensor.size]; | var doubles = new double[tensor.size]; | ||||
| for (ulong i = 0; i < tensor.size; i++) | for (ulong i = 0; i < tensor.size; i++) | ||||
| doubles[i] = *(double*)(c_api.TF_TensorData(output) + (int)(tensor.dataTypeSize * i)); | |||||
| doubles[i] = *(double*)(c_api.TF_TensorData(output) + (int)(tensor.itemsize * i)); | |||||
| nd = np.array(doubles).reshape(ndims); | nd = np.array(doubles).reshape(ndims); | ||||
| break; | break; | ||||
| default: | default: | ||||
| @@ -35,8 +35,8 @@ namespace Tensorflow | |||||
| private TF_DataType _dtype = TF_DataType.DtInvalid; | private TF_DataType _dtype = TF_DataType.DtInvalid; | ||||
| public TF_DataType dtype => _handle == IntPtr.Zero ? _dtype : c_api.TF_TensorType(_handle); | public TF_DataType dtype => _handle == IntPtr.Zero ? _dtype : c_api.TF_TensorType(_handle); | ||||
| public ulong bytesize => _handle == IntPtr.Zero ? 0 : c_api.TF_TensorByteSize(_handle); | public ulong bytesize => _handle == IntPtr.Zero ? 0 : c_api.TF_TensorByteSize(_handle); | ||||
| public ulong dataTypeSize => _handle == IntPtr.Zero ? 0 : c_api.TF_DataTypeSize(dtype); | |||||
| public ulong size => _handle == IntPtr.Zero ? 0 : bytesize / dataTypeSize; | |||||
| public ulong itemsize => _handle == IntPtr.Zero ? 0 : c_api.TF_DataTypeSize(dtype); | |||||
| public ulong size => _handle == IntPtr.Zero ? 0 : bytesize / itemsize; | |||||
| public IntPtr buffer => _handle == IntPtr.Zero ? IntPtr.Zero : c_api.TF_TensorData(_handle); | public IntPtr buffer => _handle == IntPtr.Zero ? IntPtr.Zero : c_api.TF_TensorData(_handle); | ||||
| public int num_consumers(TF_Output oper_out) => _handle == IntPtr.Zero ? 0 : c_api.TF_OperationOutputNumConsumers(oper_out); | public int num_consumers(TF_Output oper_out) => _handle == IntPtr.Zero ? 0 : c_api.TF_OperationOutputNumConsumers(oper_out); | ||||
| @@ -122,7 +122,7 @@ namespace Tensorflow | |||||
| for (ulong i = 0; i < size; i++) | for (ulong i = 0; i < size; i++) | ||||
| { | { | ||||
| data[i] = Marshal.PtrToStructure<T>(buffer + (int)(i * dataTypeSize)); | |||||
| data[i] = Marshal.PtrToStructure<T>(buffer + (int)(i * itemsize)); | |||||
| } | } | ||||
| return data; | return data; | ||||
| @@ -21,7 +21,7 @@ namespace Tensorflow | |||||
| case TF_DataType.TF_STRING: | case TF_DataType.TF_STRING: | ||||
| return typeof(string); | return typeof(string); | ||||
| default: | default: | ||||
| throw new NotImplementedException("as_numpy_datatype failed"); | |||||
| return null; | |||||
| } | } | ||||
| } | } | ||||
| @@ -44,6 +44,7 @@ namespace Tensorflow | |||||
| // We first convert value to a numpy array or scalar. | // We first convert value to a numpy array or scalar. | ||||
| NDArray nparray = null; | NDArray nparray = null; | ||||
| var np_dt = dtype.as_numpy_datatype(); | |||||
| if (values is NDArray nd) | if (values is NDArray nd) | ||||
| { | { | ||||
| @@ -54,31 +55,61 @@ namespace Tensorflow | |||||
| if (values == null) | if (values == null) | ||||
| throw new ValueError("None values not supported."); | throw new ValueError("None values not supported."); | ||||
| switch (values) | |||||
| if(np_dt == null) | |||||
| { | { | ||||
| case bool boolVal: | |||||
| nparray = boolVal; | |||||
| break; | |||||
| case int intVal: | |||||
| nparray = intVal; | |||||
| break; | |||||
| case int[] intVals: | |||||
| nparray = np.array(intVals); | |||||
| break; | |||||
| case float floatVal: | |||||
| nparray = floatVal; | |||||
| break; | |||||
| case double doubleVal: | |||||
| nparray = doubleVal; | |||||
| break; | |||||
| case string strVal: | |||||
| nparray = strVal; | |||||
| break; | |||||
| case string[] strVals: | |||||
| nparray = strVals; | |||||
| break; | |||||
| default: | |||||
| throw new Exception("make_tensor_proto Not Implemented"); | |||||
| switch (values) | |||||
| { | |||||
| case bool boolVal: | |||||
| nparray = boolVal; | |||||
| break; | |||||
| case int intVal: | |||||
| nparray = intVal; | |||||
| break; | |||||
| case int[] intVals: | |||||
| nparray = np.array(intVals); | |||||
| break; | |||||
| case float floatVal: | |||||
| nparray = floatVal; | |||||
| break; | |||||
| case double doubleVal: | |||||
| nparray = doubleVal; | |||||
| break; | |||||
| case string strVal: | |||||
| nparray = strVal; | |||||
| break; | |||||
| case string[] strVals: | |||||
| nparray = strVals; | |||||
| break; | |||||
| default: | |||||
| throw new NotImplementedException("make_tensor_proto Not Implemented"); | |||||
| } | |||||
| } | |||||
| else | |||||
| { | |||||
| // convert data type | |||||
| switch (np_dt.Name) | |||||
| { | |||||
| case "Int32": | |||||
| if (values.GetType().IsArray) | |||||
| nparray = np.array((int[])values, np_dt); | |||||
| else | |||||
| nparray = (int)values; | |||||
| break; | |||||
| case "Single": | |||||
| if (values.GetType().IsArray) | |||||
| nparray = np.array((float[])values, np_dt); | |||||
| else | |||||
| nparray = (float)values; | |||||
| break; | |||||
| case "Double": | |||||
| nparray = (double)values; | |||||
| break; | |||||
| case "String": | |||||
| nparray = values.ToString(); | |||||
| break; | |||||
| default: | |||||
| throw new NotImplementedException("make_tensor_proto Not Implemented"); | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -169,9 +169,9 @@ namespace Tensorflow | |||||
| if (!_is_empty) | if (!_is_empty) | ||||
| { | { | ||||
| model_checkpoint_path = sess.run(_saver_def.SaveTensorName, new FeedItem[] { | |||||
| model_checkpoint_path = sess.run(_saver_def.SaveTensorName, | |||||
| new FeedItem(_saver_def.FilenameTensorName, checkpoint_file) | new FeedItem(_saver_def.FilenameTensorName, checkpoint_file) | ||||
| }); | |||||
| ); | |||||
| if (write_state) | if (write_state) | ||||
| { | { | ||||
| @@ -227,10 +227,8 @@ namespace Tensorflow | |||||
| if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
| ; | ; | ||||
| else | else | ||||
| sess.run(_saver_def.RestoreOpName, new FeedItem[] | |||||
| { | |||||
| new FeedItem(_saver_def.FilenameTensorName, save_path) | |||||
| }); | |||||
| sess.run(_saver_def.RestoreOpName, | |||||
| new FeedItem(_saver_def.FilenameTensorName, save_path)); | |||||
| } | } | ||||
| /// <summary> | /// <summary> | ||||
| @@ -418,6 +418,9 @@ namespace Tensorflow | |||||
| string name = "", TF_DataType preferred_dtype = TF_DataType.DtInvalid, | string name = "", TF_DataType preferred_dtype = TF_DataType.DtInvalid, | ||||
| bool as_ref = false) | bool as_ref = false) | ||||
| { | { | ||||
| if (dtype == TF_DataType.DtInvalid) | |||||
| dtype = preferred_dtype; | |||||
| switch (value) | switch (value) | ||||
| { | { | ||||
| case Tensor tensor: | case Tensor tensor: | ||||
| @@ -432,6 +435,8 @@ namespace Tensorflow | |||||
| return constant_op.constant(intArray, dtype: dtype, name: name); | return constant_op.constant(intArray, dtype: dtype, name: name); | ||||
| case float floatVal: | case float floatVal: | ||||
| return constant_op.constant(floatVal, dtype: dtype, name: name); | return constant_op.constant(floatVal, dtype: dtype, name: name); | ||||
| case float[] floatArray: | |||||
| return constant_op.constant(floatArray, dtype: dtype, name: name); | |||||
| case double doubleVal: | case double doubleVal: | ||||
| return constant_op.constant(doubleVal, dtype: dtype, name: name); | return constant_op.constant(doubleVal, dtype: dtype, name: name); | ||||
| case RefVariable varVal: | case RefVariable varVal: | ||||
| @@ -0,0 +1,37 @@ | |||||
| using ICSharpCode.SharpZipLib.GZip; | |||||
| using ICSharpCode.SharpZipLib.Tar; | |||||
| using System; | |||||
| using System.IO; | |||||
| using System.Threading; | |||||
| using System.Threading.Tasks; | |||||
| namespace TensorFlowNET.Utility | |||||
| { | |||||
| public class Compress | |||||
| { | |||||
| public static void ExtractTGZ(String gzArchiveName, String destFolder) | |||||
| { | |||||
| Console.WriteLine($"Extracting."); | |||||
| var task = Task.Run(() => | |||||
| { | |||||
| using (var inStream = File.OpenRead(gzArchiveName)) | |||||
| { | |||||
| using (var gzipStream = new GZipInputStream(inStream)) | |||||
| { | |||||
| using (TarArchive tarArchive = TarArchive.CreateInputTarArchive(gzipStream)) | |||||
| tarArchive.ExtractContents(destFolder); | |||||
| } | |||||
| } | |||||
| }); | |||||
| while (!task.IsCompleted) | |||||
| { | |||||
| Thread.Sleep(200); | |||||
| Console.Write("."); | |||||
| } | |||||
| Console.WriteLine(""); | |||||
| Console.WriteLine("Extracting is completed."); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,13 @@ | |||||
| <Project Sdk="Microsoft.NET.Sdk"> | |||||
| <PropertyGroup> | |||||
| <TargetFramework>netstandard2.0</TargetFramework> | |||||
| <AssemblyName>TensorFlowNET.Utility</AssemblyName> | |||||
| <RootNamespace>TensorFlowNET.Utility</RootNamespace> | |||||
| </PropertyGroup> | |||||
| <ItemGroup> | |||||
| <PackageReference Include="SharpZipLib" Version="1.1.0" /> | |||||
| </ItemGroup> | |||||
| </Project> | |||||
| @@ -0,0 +1,35 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.IO; | |||||
| using System.Net; | |||||
| using System.Text; | |||||
| using System.Threading; | |||||
| using System.Threading.Tasks; | |||||
| namespace TensorFlowNET.Utility | |||||
| { | |||||
| public class Web | |||||
| { | |||||
| public static bool Download(string url, string file) | |||||
| { | |||||
| if (File.Exists(file)) | |||||
| { | |||||
| Console.WriteLine($"{file} already exists."); | |||||
| return false; | |||||
| } | |||||
| var wc = new WebClient(); | |||||
| Console.WriteLine($"Downloading {file}"); | |||||
| var download = Task.Run(() => wc.DownloadFile(url, file)); | |||||
| while (!download.IsCompleted) | |||||
| { | |||||
| Thread.Sleep(1000); | |||||
| Console.Write("."); | |||||
| } | |||||
| Console.WriteLine(""); | |||||
| Console.WriteLine($"Downloaded {file}"); | |||||
| return true; | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,120 @@ | |||||
| using ICSharpCode.SharpZipLib.GZip; | |||||
| using ICSharpCode.SharpZipLib.Tar; | |||||
| using NumSharp.Core; | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.IO; | |||||
| using System.Linq; | |||||
| using System.Net; | |||||
| using System.Text; | |||||
| using System.Threading; | |||||
| using System.Threading.Tasks; | |||||
| using Tensorflow; | |||||
| namespace TensorFlowNET.Examples | |||||
| { | |||||
| /// <summary> | |||||
| /// Port from tensorflow\examples\label_image\label_image.py | |||||
| /// </summary> | |||||
| public class LabelImage : Python, IExample | |||||
| { | |||||
| string dir = "label_image_data"; | |||||
| string pbFile = "inception_v3_2016_08_28_frozen.pb"; | |||||
| string labelFile = "imagenet_slim_labels.txt"; | |||||
| string picFile = "grace_hopper.jpg"; | |||||
| int input_height = 299; | |||||
| int input_width = 299; | |||||
| int input_mean = 0; | |||||
| int input_std = 255; | |||||
| string input_layer = "input"; | |||||
| string output_layer = "InceptionV3/Predictions/Reshape_1"; | |||||
| public void Run() | |||||
| { | |||||
| PrepareData(); | |||||
| var graph = LoadGraph(Path.Join(dir, pbFile)); | |||||
| var t = ReadTensorFromImageFile(Path.Join(dir, picFile), | |||||
| input_height: input_height, | |||||
| input_width: input_width, | |||||
| input_mean: input_mean, | |||||
| input_std: input_std); | |||||
| var input_name = "import/" + input_layer; | |||||
| var output_name = "import/" + output_layer; | |||||
| var input_operation = graph.get_operation_by_name(input_name); | |||||
| var output_operation = graph.get_operation_by_name(output_name); | |||||
| NDArray results = null; | |||||
| with<Session>(tf.Session(graph), sess => | |||||
| { | |||||
| results = sess.run(output_operation.outputs[0], new FeedItem(input_operation.outputs[0], t)); | |||||
| }); | |||||
| // equivalent np.squeeze | |||||
| results.reshape(results.shape.Where(x => x > 1).ToArray()); | |||||
| // top_k = results.argsort()[-5:][::-1] | |||||
| var top_k = results.Data<int>().Take(5).ToArray(); | |||||
| var labels = LoadLabels(Path.Join(dir, labelFile)); | |||||
| foreach (var i in top_k) | |||||
| Console.WriteLine($"{labels[i]}, {results[i]}"); | |||||
| } | |||||
| private string[] LoadLabels(string file) | |||||
| { | |||||
| return File.ReadAllLines(file); | |||||
| } | |||||
| private Graph LoadGraph(string modelFile) | |||||
| { | |||||
| var graph = tf.Graph(); | |||||
| var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(modelFile)); | |||||
| importer.import_graph_def(graph_def); | |||||
| return graph; | |||||
| } | |||||
| private NDArray ReadTensorFromImageFile(string file_name, | |||||
| int input_height = 299, | |||||
| int input_width = 299, | |||||
| int input_mean = 0, | |||||
| int input_std = 255) | |||||
| { | |||||
| string input_name = "file_reader"; | |||||
| string output_name = "normalized"; | |||||
| Tensor image_reader = null; | |||||
| var file_reader = tf.read_file(file_name, input_name); | |||||
| image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||||
| var float_caster = tf.cast(image_reader, tf.float32); | |||||
| var dims_expander = tf.expand_dims(float_caster, 0); | |||||
| var resized = tf.image.resize_bilinear(dims_expander, new int[] { input_height, input_width }); | |||||
| var normalized = tf.divide(tf.subtract(resized, new float[] { input_mean }), new float[] { input_std }); | |||||
| return with<Session, NDArray>(tf.Session(), sess => | |||||
| { | |||||
| var result = sess.run(normalized); | |||||
| return result; | |||||
| }); | |||||
| } | |||||
| private void PrepareData() | |||||
| { | |||||
| Directory.CreateDirectory(dir); | |||||
| // get model file | |||||
| string url = "https://storage.googleapis.com/download.tensorflow.org/models/inception_v3_2016_08_28_frozen.pb.tar.gz"; | |||||
| string zipFile = Path.Join(dir, $"{pbFile}.tar.gz"); | |||||
| Utility.Web.Download(url, zipFile); | |||||
| if (!File.Exists(Path.Join(dir, pbFile))) | |||||
| Utility.Compress.ExtractTGZ(zipFile, dir); | |||||
| // download sample picture | |||||
| string pic = "grace_hopper.jpg"; | |||||
| Utility.Web.Download($"https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/label_image/data/{pic}", Path.Join(dir, pic)); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -65,21 +65,17 @@ namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| foreach (var (x, y) in Python.zip<double>(train_X, train_Y)) | foreach (var (x, y) in Python.zip<double>(train_X, train_Y)) | ||||
| { | { | ||||
| sess.run(optimizer, feed_dict: new FeedItem[] | |||||
| { | |||||
| sess.run(optimizer, | |||||
| new FeedItem(X, x), | new FeedItem(X, x), | ||||
| new FeedItem(Y, y) | |||||
| }); | |||||
| new FeedItem(Y, y)); | |||||
| } | } | ||||
| // Display logs per epoch step | // Display logs per epoch step | ||||
| if ((epoch + 1) % display_step == 0) | if ((epoch + 1) % display_step == 0) | ||||
| { | { | ||||
| var c = sess.run(cost, feed_dict: new FeedItem[] | |||||
| { | |||||
| var c = sess.run(cost, | |||||
| new FeedItem(X, train_X), | new FeedItem(X, train_X), | ||||
| new FeedItem(Y, train_Y) | |||||
| }); | |||||
| new FeedItem(Y, train_Y)); | |||||
| var rW = sess.run(W); | var rW = sess.run(W); | ||||
| Console.WriteLine($"Epoch: {epoch + 1} cost={c} " + | Console.WriteLine($"Epoch: {epoch + 1} cost={c} " + | ||||
| $"W={rW} b={sess.run(b)}"); | $"W={rW} b={sess.run(b)}"); | ||||
| @@ -12,6 +12,7 @@ | |||||
| <ItemGroup> | <ItemGroup> | ||||
| <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | ||||
| <ProjectReference Include="..\..\src\TensorFlowNET.Utility\TensorFlowNET.Utility.csproj" /> | |||||
| </ItemGroup> | </ItemGroup> | ||||
| </Project> | </Project> | ||||
| @@ -33,11 +33,9 @@ namespace TensorFlowNET.UnitTest | |||||
| using(var sess = tf.Session()) | using(var sess = tf.Session()) | ||||
| { | { | ||||
| var o = sess.run(c, feed_dict: new FeedItem[] | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, 3.0f), | new FeedItem(a, 3.0f), | ||||
| new FeedItem(b, 2.0f) | |||||
| }); | |||||
| new FeedItem(b, 2.0f)); | |||||
| Assert.AreEqual((float)o, 5.0f); | Assert.AreEqual((float)o, 5.0f); | ||||
| } | } | ||||
| } | } | ||||
| @@ -17,10 +17,8 @@ namespace TensorFlowNET.UnitTest | |||||
| Python.with<Session>(tf.Session(), sess => | Python.with<Session>(tf.Session(), sess => | ||||
| { | { | ||||
| var result = sess.run(y, feed_dict: new FeedItem[] | |||||
| { | |||||
| new FeedItem(x, 2) | |||||
| }); | |||||
| var result = sess.run(y, | |||||
| new FeedItem(x, 2)); | |||||
| Assert.AreEqual((int)result, 6); | Assert.AreEqual((int)result, 6); | ||||
| }); | }); | ||||
| } | } | ||||