diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs
index 16bd48fd..2836c4dd 100644
--- a/src/TensorFlowNET.Core/APIs/tf.tile.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs
@@ -1,4 +1,5 @@
-using System;
+using NumSharp.Core;
+using System;
using System.Collections.Generic;
using System.Text;
@@ -9,6 +10,9 @@ namespace Tensorflow
public static Tensor tile(Tensor input,
Tensor multiples,
string name = null) => gen_array_ops.tile(input, multiples, name);
+ public static Tensor tile(NDArray input,
+ int[] multiples,
+ string name = null) => gen_array_ops.tile(input, multiples, name);
}
}
diff --git a/src/TensorFlowNET.Core/Operations/array_ops.py.cs b/src/TensorFlowNET.Core/Operations/array_ops.py.cs
index c5641f06..83acd21e 100644
--- a/src/TensorFlowNET.Core/Operations/array_ops.py.cs
+++ b/src/TensorFlowNET.Core/Operations/array_ops.py.cs
@@ -224,7 +224,10 @@ namespace Tensorflow
///
/// When building ops to compute gradients, this op prevents the contribution of
- /// its inputs to be taken into account.Normally, the gradient generator adds ops
/// to a graph to compute the derivatives of a specified 'loss' by recursively
/// finding out inputs that contributed to its computation.If you insert this op
/// in the graph it inputs are masked from the gradient generator. They are not
+ /// its inputs to be taken into account.Normally, the gradient generator adds ops
+ /// to a graph to compute the derivatives of a specified 'loss' by recursively
+ /// finding out inputs that contributed to its computation.If you insert this op
+ /// in the graph it inputs are masked from the gradient generator. They are not
/// taken into account for computing gradients.
///
///
diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
index de8cc9d5..f2c5841f 100644
--- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
@@ -1,4 +1,5 @@
-using System;
+using NumSharp.Core;
+using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
@@ -156,6 +157,11 @@ namespace Tensorflow
var _op = _op_def_lib._apply_op_helper("Tile", name, new { input, multiples });
return _op.outputs[0];
}
+ public static Tensor tile(NDArray input, int[] multiples, string name = null)
+ {
+ var _op = _op_def_lib._apply_op_helper("Tile", name, new { input, multiples });
+ return _op.outputs[0];
+ }
public static Tensor transpose(Tensor x, int[] perm, string name = null)
{
diff --git a/src/TensorFlowNET.Core/Tensors/dtypes.cs b/src/TensorFlowNET.Core/Tensors/dtypes.cs
index 005b5df9..e010c432 100644
--- a/src/TensorFlowNET.Core/Tensors/dtypes.cs
+++ b/src/TensorFlowNET.Core/Tensors/dtypes.cs
@@ -47,7 +47,7 @@ namespace Tensorflow
dtype = TF_DataType.TF_STRING;
break;
default:
- throw new Exception("Not Implemented");
+ throw new Exception("as_dtype Not Implemented");
}
return dtype;
diff --git a/src/TensorFlowNET.Core/ops.py.cs b/src/TensorFlowNET.Core/ops.py.cs
index 50fd59c9..00f1add5 100644
--- a/src/TensorFlowNET.Core/ops.py.cs
+++ b/src/TensorFlowNET.Core/ops.py.cs
@@ -441,6 +441,8 @@ namespace Tensorflow
switch (value)
{
+ case NDArray nd:
+ return constant_op.constant(nd, dtype: dtype, name: name);
case Tensor tensor:
return tensor;
case string str:
diff --git a/test/TensorFlowNET.Examples/NaiveBayesClassifier.cs b/test/TensorFlowNET.Examples/NaiveBayesClassifier.cs
index d3300519..5c4874b2 100644
--- a/test/TensorFlowNET.Examples/NaiveBayesClassifier.cs
+++ b/test/TensorFlowNET.Examples/NaiveBayesClassifier.cs
@@ -15,22 +15,87 @@ namespace TensorFlowNET.Examples
public Normal dist { get; set; }
public void Run()
{
- np.array(1.0f, 1.0f);
- var X = np.array(new float[][] { new float[] { 1.0f, 1.0f }, new float[] { 2.0f, 2.0f }, new float[] { -1.0f, -1.0f }, new float[] { -2.0f, -2.0f }, new float[] { 1.0f, -1.0f }, new float[] { 2.0f, -2.0f }, });
- var y = np.array(0,0,1,1,2,2);
+ var X = np.array(new double[][] { new double[] { 5.1, 3.5},new double[] { 4.9, 3.0 },new double[] { 4.7, 3.2 },
+ new double[] { 4.6, 3.1 },new double[] { 5.0, 3.6 },new double[] { 5.4, 3.9 },
+ new double[] { 4.6, 3.4 },new double[] { 5.0, 3.4 },new double[] { 4.4, 2.9 },
+ new double[] { 4.9, 3.1 },new double[] { 5.4, 3.7 },new double[] {4.8, 3.4 },
+ new double[] {4.8, 3.0 },new double[] {4.3, 3.0 },new double[] {5.8, 4.0 },
+ new double[] {5.7, 4.4 },new double[] {5.4, 3.9 },new double[] {5.1, 3.5 },
+ new double[] {5.7, 3.8 },new double[] {5.1, 3.8 },new double[] {5.4, 3.4 },
+ new double[] {5.1, 3.7 },new double[] {5.1, 3.3 },new double[] {4.8, 3.4 },
+ new double[] {5.0 , 3.0 },new double[] {5.0 , 3.4 },new double[] {5.2, 3.5 },
+ new double[] {5.2, 3.4 },new double[] {4.7, 3.2 },new double[] {4.8, 3.1 },
+ new double[] {5.4, 3.4 },new double[] {5.2, 4.1},new double[] {5.5, 4.2 },
+ new double[] {4.9, 3.1 },new double[] {5.0 , 3.2 },new double[] {5.5, 3.5 },
+ new double[] {4.9, 3.6 },new double[] {4.4, 3.0 },new double[] {5.1, 3.4 },
+ new double[] {5.0 , 3.5 },new double[] {4.5, 2.3 },new double[] {4.4, 3.2 },
+ new double[] {5.0 , 3.5 },new double[] {5.1, 3.8 },new double[] {4.8, 3.0},
+ new double[] {5.1, 3.8 },new double[] {4.6, 3.2 },new double[] { 5.3, 3.7 },
+ new double[] {5.0 , 3.3 },new double[] {7.0 , 3.2 },new double[] {6.4, 3.2 },
+ new double[] {6.9, 3.1 },new double[] {5.5, 2.3 },new double[] {6.5, 2.8 },
+ new double[] {5.7, 2.8 },new double[] {6.3, 3.3 },new double[] {4.9, 2.4 },
+ new double[] {6.6, 2.9 },new double[] {5.2, 2.7 },new double[] {5.0 , 2.0 },
+ new double[] {5.9, 3.0 },new double[] {6.0 , 2.2 },new double[] {6.1, 2.9 },
+ new double[] {5.6, 2.9 },new double[] {6.7, 3.1 },new double[] {5.6, 3.0 },
+ new double[] {5.8, 2.7 },new double[] {6.2, 2.2 },new double[] {5.6, 2.5 },
+ new double[] {5.9, 3.0},new double[] {6.1, 2.8},new double[] {6.3, 2.5},
+ new double[] {6.1, 2.8},new double[] {6.4, 2.9},new double[] {6.6, 3.0 },
+ new double[] {6.8, 2.8},new double[] {6.7, 3.0 },new double[] {6.0 , 2.9},
+ new double[] {5.7, 2.6},new double[] {5.5, 2.4},new double[] {5.5, 2.4},
+ new double[] {5.8, 2.7},new double[] {6.0 , 2.7},new double[] {5.4, 3.0 },
+ new double[] {6.0 , 3.4},new double[] {6.7, 3.1},new double[] {6.3, 2.3},
+ new double[] {5.6, 3.0 },new double[] {5.5, 2.5},new double[] {5.5, 2.6},
+ new double[] {6.1, 3.0 },new double[] {5.8, 2.6},new double[] {5.0 , 2.3},
+ new double[] {5.6, 2.7},new double[] {5.7, 3.0 },new double[] {5.7, 2.9},
+ new double[] {6.2, 2.9},new double[] {5.1, 2.5},new double[] {5.7, 2.8},
+ new double[] {6.3, 3.3},new double[] {5.8, 2.7},new double[] {7.1, 3.0 },
+ new double[] {6.3, 2.9},new double[] {6.5, 3.0 },new double[] {7.6, 3.0 },
+ new double[] {4.9, 2.5},new double[] {7.3, 2.9},new double[] {6.7, 2.5},
+ new double[] {7.2, 3.6},new double[] {6.5, 3.2},new double[] {6.4, 2.7},
+ new double[] {6.8, 3.00 },new double[] {5.7, 2.5},new double[] {5.8, 2.8},
+ new double[] {6.4, 3.2},new double[] {6.5, 3.0 },new double[] {7.7, 3.8},
+ new double[] {7.7, 2.6},new double[] {6.0 , 2.2},new double[] {6.9, 3.2},
+ new double[] {5.6, 2.8},new double[] {7.7, 2.8},new double[] {6.3, 2.7},
+ new double[] {6.7, 3.3},new double[] {7.2, 3.2},new double[] {6.2, 2.8},
+ new double[] {6.1, 3.0 },new double[] {6.4, 2.8},new double[] {7.2, 3.0 },
+ new double[] {7.4, 2.8},new double[] {7.9, 3.8},new double[] {6.4, 2.8},
+ new double[] {6.3, 2.8},new double[] {6.1, 2.6},new double[] {7.7, 3.0 },
+ new double[] {6.3, 3.4},new double[] {6.4, 3.1},new double[] {6.0, 3.0},
+ new double[] {6.9, 3.1},new double[] {6.7, 3.1},new double[] {6.9, 3.1},
+ new double[] {5.8, 2.7},new double[] {6.8, 3.2},new double[] {6.7, 3.3},
+ new double[] {6.7, 3.0 },new double[] {6.3, 2.5},new double[] {6.5, 3.0 },
+ new double[] {6.2, 3.4},new double[] {5.9, 3.0 }, new double[] {5.8, 3.0 }});
+
+ var y = np.array(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
fit(X, y);
// Create a regular grid and classify each point
+ double x_min = (double) X.amin(0)[0] - 0.5;
+ double y_min = (double) X.amin(0)[1] - 0.5;
+ double x_max = (double) X.amax(0)[0] + 0.5;
+ double y_max = (double) X.amax(0)[1] + 0.5;
+
+ var (xx, yy) = np.meshgrid(np.linspace(x_min, x_max, 30), np.linspace(y_min, y_max, 30));
+ var s = tf.Session();
+ var samples = np.vstack(xx.ravel(), yy.ravel());
+ var Z = s.run(predict(samples));
+
}
public void fit(NDArray X, NDArray y)
{
NDArray unique_y = y.unique();
- Dictionary>> dic = new Dictionary>>();
+ Dictionary>> dic = new Dictionary>>();
// Init uy in dic
foreach (int uy in unique_y.Data())
{
- dic.Add(uy, new List>());
+ dic.Add(uy, new List>());
}
// Separate training points by class
// Shape : nb_classes * nb_samples * nb_features
@@ -38,10 +103,10 @@ namespace TensorFlowNET.Examples
for (int i = 0; i < y.size; i++)
{
long curClass = (long)y[i];
- List> l = dic[curClass];
- List pair = new List();
- pair.Add((float)X[i,0]);
- pair.Add((float)X[i, 1]);
+ List> l = dic[curClass];
+ List pair = new List();
+ pair.Add((double)X[i,0]);
+ pair.Add((double)X[i, 1]);
l.Add(pair);
if (l.Count > maxCount)
{
@@ -49,8 +114,8 @@ namespace TensorFlowNET.Examples
}
dic[curClass] = l;
}
- float[,,] points = new float[dic.Count, maxCount, X.shape[1]];
- foreach (KeyValuePair>> kv in dic)
+ double[,,] points = new double[dic.Count, maxCount, X.shape[1]];
+ foreach (KeyValuePair>> kv in dic)
{
int j = (int) kv.Key;
for (int i = 0; i < maxCount; i++)
@@ -62,7 +127,7 @@ namespace TensorFlowNET.Examples
}
}
- NDArray points_by_class = np.array(points);
+ NDArray points_by_class = np.array(points);
// estimate mean and variance for each class / feature
// shape : nb_classes * nb_features
var cons = tf.constant(points_by_class);
@@ -87,7 +152,10 @@ namespace TensorFlowNET.Examples
// Conditional probabilities log P(x|c) with shape
// (nb_samples, nb_classes)
- Tensor tile = tf.tile(new Tensor(X), new Tensor(new int[] { -1, nb_classes, nb_features }));
+ var t1= ops.convert_to_tensor(X, TF_DataType.TF_DOUBLE);
+ //var t2 = ops.convert_to_tensor(new int[] { 1, nb_classes });
+ //Tensor tile = tf.tile(t1, t2);
+ Tensor tile = tf.tile(X, new int[] { 1, nb_classes });
Tensor r = tf.reshape(tile, new Tensor(new int[] { -1, nb_classes, nb_features }));
var cond_probs = tf.reduce_sum(dist.log_prob(r));
// uniform priors