You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

RetrainImageClassifier.cs 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. using NumSharp;
  2. using System;
  3. using System.Collections.Generic;
  4. using System.Diagnostics;
  5. using System.IO;
  6. using System.Linq;
  7. using System.Text;
  8. using Tensorflow;
  9. using TensorFlowNET.Examples.Utility;
  10. using static Tensorflow.Python;
  11. namespace TensorFlowNET.Examples.ImageProcess
  12. {
  13. /// <summary>
  14. /// In this tutorial, we will reuse the feature extraction capabilities from powerful image classifiers trained on ImageNet
  15. /// and simply train a new classification layer on top. Transfer learning is a technique that shortcuts much of this
  16. /// by taking a piece of a model that has already been trained on a related task and reusing it in a new model.
  17. ///
  18. /// https://www.tensorflow.org/hub/tutorials/image_retraining
  19. /// </summary>
  20. public class RetrainImageClassifier : IExample
  21. {
  22. public int Priority => 16;
  23. public bool Enabled { get; set; } = false;
  24. public bool ImportGraph { get; set; } = true;
  25. public string Name => "Retrain Image Classifier";
  26. const string data_dir = "retrain_images";
  27. string summaries_dir = Path.Join(data_dir, "retrain_logs");
  28. string image_dir = Path.Join(data_dir, "flower_photos");
  29. string bottleneck_dir = Path.Join(data_dir, "bottleneck");
  30. string tfhub_module = "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/3";
  31. float testing_percentage = 0.1f;
  32. float validation_percentage = 0.1f;
  33. Tensor resized_image_tensor;
  34. Dictionary<string, Dictionary<string, string[]>> image_lists;
  35. public bool Run()
  36. {
  37. PrepareData();
  38. var graph = tf.Graph().as_default();
  39. tf.train.import_meta_graph("graph/InceptionV3.meta");
  40. Tensor bottleneck_tensor = graph.OperationByName("module_apply_default/hub_output/feature_vector/SpatialSqueeze");
  41. Tensor resized_image_tensor = graph.OperationByName("Placeholder");
  42. Tensor final_tensor = graph.OperationByName("final_result");
  43. Tensor ground_truth_input = graph.OperationByName("input/GroundTruthInput");
  44. var sw = new Stopwatch();
  45. with(tf.Session(graph), sess =>
  46. {
  47. // Initialize all weights: for the module to their pretrained values,
  48. // and for the newly added retraining layer to random initial values.
  49. var init = tf.global_variables_initializer();
  50. sess.run(init);
  51. var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding();
  52. // We'll make sure we've calculated the 'bottleneck' image summaries and
  53. // cached them on disk.
  54. cache_bottlenecks(sess, image_lists, image_dir,
  55. bottleneck_dir, jpeg_data_tensor,
  56. decoded_image_tensor, resized_image_tensor,
  57. bottleneck_tensor, tfhub_module);
  58. // Create the operations we need to evaluate the accuracy of our new layer.
  59. var (evaluation_step, _) = add_evaluation_step(final_tensor, ground_truth_input);
  60. // Merge all the summaries and write them out to the summaries_dir
  61. var merged = tf.summary.merge_all();
  62. });
  63. return false;
  64. }
  65. /// <summary>
  66. /// Inserts the operations we need to evaluate the accuracy of our results.
  67. /// </summary>
  68. /// <param name="result_tensor"></param>
  69. /// <param name="ground_truth_tensor"></param>
  70. /// <returns></returns>
  71. private (Tensor, Tensor) add_evaluation_step(Tensor result_tensor, Tensor ground_truth_tensor)
  72. {
  73. Tensor evaluation_step = null, correct_prediction = null, prediction = null;
  74. with(tf.name_scope("accuracy"), scope =>
  75. {
  76. with(tf.name_scope("correct_prediction"), delegate
  77. {
  78. prediction = tf.argmax(result_tensor, 1);
  79. correct_prediction = tf.equal(prediction, ground_truth_tensor);
  80. });
  81. with(tf.name_scope("accuracy"), delegate
  82. {
  83. evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32));
  84. });
  85. });
  86. tf.summary.scalar("accuracy", evaluation_step);
  87. return (evaluation_step, prediction);
  88. }
  89. /// <summary>
  90. /// Ensures all the training, testing, and validation bottlenecks are cached.
  91. /// </summary>
  92. /// <param name="sess"></param>
  93. /// <param name="image_lists"></param>
  94. /// <param name="image_dir"></param>
  95. /// <param name="bottleneck_dir"></param>
  96. /// <param name="jpeg_data_tensor"></param>
  97. /// <param name="decoded_image_tensor"></param>
  98. /// <param name="resized_image_tensor"></param>
  99. /// <param name="bottleneck_tensor"></param>
  100. /// <param name="tfhub_module"></param>
  101. private void cache_bottlenecks(Session sess, Dictionary<string, Dictionary<string, string[]>> image_lists,
  102. string image_dir, string bottleneck_dir, Tensor jpeg_data_tensor, Tensor decoded_image_tensor,
  103. Tensor resized_input_tensor, Tensor bottleneck_tensor, string module_name)
  104. {
  105. int how_many_bottlenecks = 0;
  106. foreach(var (label_name, label_lists) in image_lists)
  107. {
  108. foreach(var category in new string[] { "training", "testing", "validation" })
  109. {
  110. var category_list = label_lists[category];
  111. foreach(var (index, unused_base_name) in enumerate(category_list))
  112. {
  113. get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category,
  114. bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
  115. resized_input_tensor, bottleneck_tensor, module_name);
  116. how_many_bottlenecks++;
  117. if (how_many_bottlenecks % 100 == 0)
  118. print($"{how_many_bottlenecks} bottleneck files created.");
  119. }
  120. }
  121. }
  122. }
  123. private float[] get_or_create_bottleneck(Session sess, Dictionary<string, Dictionary<string, string[]>> image_lists,
  124. string label_name, int index, string image_dir, string category, string bottleneck_dir,
  125. Tensor jpeg_data_tensor, Tensor decoded_image_tensor, Tensor resized_input_tensor,
  126. Tensor bottleneck_tensor, string module_name)
  127. {
  128. var label_lists = image_lists[label_name];
  129. var sub_dir_path = Path.Join(bottleneck_dir, label_name);
  130. Directory.CreateDirectory(sub_dir_path);
  131. string bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
  132. bottleneck_dir, category, module_name);
  133. if (!File.Exists(bottleneck_path))
  134. create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
  135. image_dir, category, sess, jpeg_data_tensor,
  136. decoded_image_tensor, resized_input_tensor,
  137. bottleneck_tensor);
  138. var bottleneck_string = File.ReadAllText(bottleneck_path);
  139. var bottleneck_values = Array.ConvertAll(bottleneck_string.Split(','), x => float.Parse(x));
  140. return bottleneck_values;
  141. }
  142. private void create_bottleneck_file(string bottleneck_path, Dictionary<string, Dictionary<string, string[]>> image_lists,
  143. string label_name, int index, string image_dir, string category, Session sess,
  144. Tensor jpeg_data_tensor, Tensor decoded_image_tensor, Tensor resized_input_tensor, Tensor bottleneck_tensor)
  145. {
  146. // Create a single bottleneck file.
  147. print("Creating bottleneck at " + bottleneck_path);
  148. var image_path = get_image_path(image_lists, label_name, index, image_dir, category);
  149. if (!File.Exists(image_path))
  150. print($"File does not exist {image_path}");
  151. var image_data = File.ReadAllBytes(image_path);
  152. var bottleneck_values = run_bottleneck_on_image(
  153. sess, image_data, jpeg_data_tensor, decoded_image_tensor,
  154. resized_input_tensor, bottleneck_tensor);
  155. var values = bottleneck_values.Data<float>();
  156. var bottleneck_string = string.Join(",", values);
  157. File.WriteAllText(bottleneck_path, bottleneck_string);
  158. }
  159. /// <summary>
  160. /// Runs inference on an image to extract the 'bottleneck' summary layer.
  161. /// </summary>
  162. /// <param name="sess">Current active TensorFlow Session.</param>
  163. /// <param name="image_data">Data of raw JPEG data.</param>
  164. /// <param name="image_data_tensor">Input data layer in the graph.</param>
  165. /// <param name="decoded_image_tensor">Output of initial image resizing and preprocessing.</param>
  166. /// <param name="resized_input_tensor">The input node of the recognition graph.</param>
  167. /// <param name="bottleneck_tensor">Layer before the final softmax.</param>
  168. /// <returns></returns>
  169. private NDArray run_bottleneck_on_image(Session sess, byte[] image_data, Tensor image_data_tensor,
  170. Tensor decoded_image_tensor, Tensor resized_input_tensor, Tensor bottleneck_tensor)
  171. {
  172. // First decode the JPEG image, resize it, and rescale the pixel values.
  173. var resized_input_values = sess.run(decoded_image_tensor, new FeedItem(image_data_tensor, image_data));
  174. // Then run it through the recognition network.
  175. var bottleneck_values = sess.run(bottleneck_tensor, new FeedItem(resized_input_tensor, resized_input_values));
  176. bottleneck_values = np.squeeze(bottleneck_values);
  177. return bottleneck_values;
  178. }
  179. private string get_bottleneck_path(Dictionary<string, Dictionary<string, string[]>> image_lists, string label_name, int index,
  180. string bottleneck_dir, string category, string module_name)
  181. {
  182. module_name = (module_name.Replace("://", "~") // URL scheme.
  183. .Replace('/', '~') // URL and Unix paths.
  184. .Replace(':', '~').Replace('\\', '~')); // Windows paths.
  185. return get_image_path(image_lists, label_name, index, bottleneck_dir,
  186. category) + "_" + module_name + ".txt";
  187. }
  188. private string get_image_path(Dictionary<string, Dictionary<string, string[]>> image_lists, string label_name,
  189. int index, string image_dir, string category)
  190. {
  191. if (!image_lists.ContainsKey(label_name))
  192. print($"Label does not exist {label_name}");
  193. var label_lists = image_lists[label_name];
  194. if (!label_lists.ContainsKey(category))
  195. print($"Category does not exist {category}");
  196. var category_list = label_lists[category];
  197. if (category_list.Length == 0)
  198. print($"Label {label_name} has no images in the category {category}.");
  199. var mod_index = index % len(category_list);
  200. var base_name = category_list[mod_index].Split(Path.DirectorySeparatorChar).Last();
  201. var sub_dir = label_name;
  202. var full_path = Path.Join(image_dir, sub_dir, base_name);
  203. return full_path;
  204. }
  205. public void PrepareData()
  206. {
  207. // get a set of images to teach the network about the new classes
  208. string fileName = "flower_photos.tgz";
  209. string url = $"http://download.tensorflow.org/models/{fileName}";
  210. Web.Download(url, data_dir, fileName);
  211. Compress.ExtractTGZ(Path.Join(data_dir, fileName), data_dir);
  212. // download graph meta data
  213. url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/InceptionV3.meta";
  214. Web.Download(url, "graph", "InceptionV3.meta");
  215. // Prepare necessary directories that can be used during training
  216. Directory.CreateDirectory(summaries_dir);
  217. Directory.CreateDirectory(bottleneck_dir);
  218. // Look at the folder structure, and create lists of all the images.
  219. image_lists = create_image_lists();
  220. var class_count = len(image_lists);
  221. if (class_count == 0)
  222. print($"No valid folders of images found at {image_dir}");
  223. if (class_count == 1)
  224. print("Only one valid folder of images found at " +
  225. image_dir +
  226. " - multiple classes are needed for classification.");
  227. }
  228. private (Tensor, Tensor) add_jpeg_decoding()
  229. {
  230. // height, width, depth
  231. var input_dim = (299, 299, 3);
  232. var jpeg_data = tf.placeholder(tf.chars, name: "DecodeJPGInput");
  233. var decoded_image = tf.image.decode_jpeg(jpeg_data, channels: input_dim.Item3);
  234. // Convert from full range of uint8 to range [0,1] of float32.
  235. var decoded_image_as_float = tf.image.convert_image_dtype(decoded_image, tf.float32);
  236. var decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0);
  237. var resize_shape = tf.stack(new int[] { input_dim.Item1, input_dim.Item2 });
  238. var resize_shape_as_int = tf.cast(resize_shape, dtype: tf.int32);
  239. var resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int);
  240. return (jpeg_data, resized_image);
  241. }
  242. /// <summary>
  243. /// Builds a list of training images from the file system.
  244. /// </summary>
  245. private Dictionary<string, Dictionary<string, string[]>> create_image_lists()
  246. {
  247. var sub_dirs = tf.gfile.Walk(image_dir)
  248. .Select(x => x.Item1)
  249. .OrderBy(x => x)
  250. .ToArray();
  251. var result = new Dictionary<string, Dictionary<string, string[]>>();
  252. foreach(var sub_dir in sub_dirs)
  253. {
  254. var dir_name = sub_dir.Split(Path.DirectorySeparatorChar).Last();
  255. print($"Looking for images in '{dir_name}'");
  256. var file_list = Directory.GetFiles(sub_dir);
  257. if (len(file_list) < 20)
  258. print($"WARNING: Folder has less than 20 images, which may cause issues.");
  259. var label_name = dir_name.ToLower();
  260. result[label_name] = new Dictionary<string, string[]>();
  261. int testing_count = (int)Math.Floor(file_list.Length * testing_percentage);
  262. int validation_count = (int)Math.Floor(file_list.Length * validation_percentage);
  263. result[label_name]["testing"] = file_list.Take(testing_count).ToArray();
  264. result[label_name]["validation"] = file_list.Skip(testing_count).Take(validation_count).ToArray();
  265. result[label_name]["training"] = file_list.Skip(testing_count + validation_count).ToArray();
  266. }
  267. return result;
  268. }
  269. }
  270. }