You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LayersApi.cs 17 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. using NumSharp;
  2. using System.Collections.Generic;
  3. using Tensorflow.Keras.ArgsDefinition;
  4. using Tensorflow.Keras.Engine;
  5. using static Tensorflow.Binding;
  6. using static Tensorflow.KerasApi;
  7. namespace Tensorflow.Keras.Layers
  8. {
  9. public partial class LayersApi
  10. {
  11. /// <summary>
  12. /// Functional interface for the batch normalization layer.
  13. /// http://arxiv.org/abs/1502.03167
  14. /// </summary>
  15. /// <param name="inputs"></param>
  16. /// <param name="axis"></param>
  17. /// <param name="momentum"></param>
  18. /// <param name="epsilon"></param>
  19. /// <param name="center"></param>
  20. /// <param name="scale"></param>
  21. /// <param name="beta_initializer"></param>
  22. /// <param name="gamma_initializer"></param>
  23. /// <param name="moving_mean_initializer"></param>
  24. /// <param name="moving_variance_initializer"></param>
  25. /// <param name="training"></param>
  26. /// <param name="trainable"></param>
  27. /// <param name="name"></param>
  28. /// <param name="renorm"></param>
  29. /// <param name="renorm_momentum"></param>
  30. /// <returns></returns>
  31. public BatchNormalization BatchNormalization(int axis = -1,
  32. float momentum = 0.99f,
  33. float epsilon = 0.001f,
  34. bool center = true,
  35. bool scale = true,
  36. IInitializer beta_initializer = null,
  37. IInitializer gamma_initializer = null,
  38. IInitializer moving_mean_initializer = null,
  39. IInitializer moving_variance_initializer = null,
  40. bool trainable = true,
  41. string name = null,
  42. bool renorm = false,
  43. float renorm_momentum = 0.99f)
  44. => new BatchNormalization(new BatchNormalizationArgs
  45. {
  46. Axis = axis,
  47. Momentum = momentum,
  48. Epsilon = epsilon,
  49. Center = center,
  50. Scale = scale,
  51. BetaInitializer = beta_initializer ?? tf.zeros_initializer,
  52. GammaInitializer = gamma_initializer ?? tf.ones_initializer,
  53. MovingMeanInitializer = moving_mean_initializer ?? tf.zeros_initializer,
  54. MovingVarianceInitializer = moving_variance_initializer ?? tf.ones_initializer,
  55. Renorm = renorm,
  56. RenormMomentum = renorm_momentum,
  57. Trainable = trainable,
  58. Name = name
  59. });
  60. /// <summary>
  61. ///
  62. /// </summary>
  63. /// <param name="filters"></param>
  64. /// <param name="kernel_size"></param>
  65. /// <param name="strides"></param>
  66. /// <param name="padding"></param>
  67. /// <param name="data_format"></param>
  68. /// <param name="dilation_rate"></param>
  69. /// <param name="groups"></param>
  70. /// <param name="activation">tf.keras.activations</param>
  71. /// <param name="use_bias"></param>
  72. /// <param name="kernel_initializer"></param>
  73. /// <param name="bias_initializer"></param>
  74. /// <param name="kernel_regularizer"></param>
  75. /// <param name="bias_regularizer"></param>
  76. /// <param name="activity_regularizer"></param>
  77. /// <returns></returns>
  78. public Conv2D Conv2D(int filters,
  79. TensorShape kernel_size = null,
  80. TensorShape strides = null,
  81. string padding = "valid",
  82. string data_format = null,
  83. TensorShape dilation_rate = null,
  84. int groups = 1,
  85. Activation activation = null,
  86. bool use_bias = true,
  87. IInitializer kernel_initializer = null,
  88. IInitializer bias_initializer = null,
  89. IRegularizer kernel_regularizer = null,
  90. IRegularizer bias_regularizer = null,
  91. IRegularizer activity_regularizer = null)
  92. => new Conv2D(new Conv2DArgs
  93. {
  94. Rank = 2,
  95. Filters = filters,
  96. KernelSize = kernel_size,
  97. Strides = strides == null ? (1, 1) : strides,
  98. Padding = padding,
  99. DataFormat = data_format,
  100. DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
  101. Groups = groups,
  102. UseBias = use_bias,
  103. KernelRegularizer = kernel_regularizer,
  104. KernelInitializer = kernel_initializer == null ? tf.glorot_uniform_initializer : kernel_initializer,
  105. BiasInitializer = bias_initializer == null ? tf.zeros_initializer : bias_initializer,
  106. BiasRegularizer = bias_regularizer,
  107. ActivityRegularizer = activity_regularizer,
  108. Activation = activation ?? keras.activations.Linear
  109. });
  110. public Conv2D Conv2D(int filters,
  111. TensorShape kernel_size = null,
  112. TensorShape strides = null,
  113. string padding = "valid",
  114. string data_format = null,
  115. TensorShape dilation_rate = null,
  116. int groups = 1,
  117. string activation = null,
  118. bool use_bias = true,
  119. string kernel_initializer = "glorot_uniform",
  120. string bias_initializer = "zeros",
  121. string kernel_regularizer = null,
  122. string bias_regularizer = null,
  123. string activity_regularizer = null)
  124. => new Conv2D(new Conv2DArgs
  125. {
  126. Rank = 2,
  127. Filters = filters,
  128. KernelSize = kernel_size,
  129. Strides = strides == null ? (1, 1) : strides,
  130. Padding = padding,
  131. DataFormat = data_format,
  132. DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
  133. Groups = groups,
  134. UseBias = use_bias,
  135. KernelInitializer = GetInitializerByName(kernel_initializer),
  136. BiasInitializer = GetInitializerByName(bias_initializer),
  137. Activation = GetActivationByName(activation)
  138. });
  139. public Dense Dense(int units,
  140. Activation activation = null,
  141. IInitializer kernel_initializer = null,
  142. bool use_bias = true,
  143. IInitializer bias_initializer = null,
  144. TensorShape input_shape = null)
  145. => new Dense(new DenseArgs
  146. {
  147. Units = units,
  148. Activation = activation ?? keras.activations.Linear,
  149. KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
  150. BiasInitializer = bias_initializer ?? (use_bias ? tf.zeros_initializer : null),
  151. InputShape = input_shape
  152. });
  153. public Dense Dense(int units)
  154. => new Dense(new DenseArgs
  155. {
  156. Units = units,
  157. Activation = GetActivationByName("linear")
  158. });
  159. public Dense Dense(int units,
  160. string activation = null,
  161. TensorShape input_shape = null)
  162. => new Dense(new DenseArgs
  163. {
  164. Units = units,
  165. Activation = GetActivationByName(activation),
  166. InputShape = input_shape
  167. });
  168. /// <summary>
  169. /// Densely-connected layer class. aka fully-connected<br></br>
  170. /// `outputs = activation(inputs * kernel + bias)`
  171. /// </summary>
  172. /// <param name="inputs"></param>
  173. /// <param name="units">Python integer, dimensionality of the output space.</param>
  174. /// <param name="activation"></param>
  175. /// <param name="use_bias">Boolean, whether the layer uses a bias.</param>
  176. /// <param name="kernel_initializer"></param>
  177. /// <param name="bias_initializer"></param>
  178. /// <param name="trainable"></param>
  179. /// <param name="name"></param>
  180. /// <param name="reuse"></param>
  181. /// <returns></returns>
  182. public Tensor dense(Tensor inputs,
  183. int units,
  184. Activation activation = null,
  185. bool use_bias = true,
  186. IInitializer kernel_initializer = null,
  187. IInitializer bias_initializer = null,
  188. bool trainable = true,
  189. string name = null,
  190. bool? reuse = null)
  191. {
  192. if (bias_initializer == null)
  193. bias_initializer = tf.zeros_initializer;
  194. var layer = new Dense(new DenseArgs
  195. {
  196. Units = units,
  197. Activation = activation,
  198. UseBias = use_bias,
  199. BiasInitializer = bias_initializer,
  200. KernelInitializer = kernel_initializer,
  201. Trainable = trainable,
  202. Name = name
  203. });
  204. return layer.Apply(inputs);
  205. }
  206. public Dropout Dropout(float rate, TensorShape noise_shape = null, int? seed = null)
  207. => new Dropout(new DropoutArgs
  208. {
  209. Rate = rate,
  210. NoiseShape = noise_shape,
  211. Seed = seed
  212. });
  213. /// <summary>
  214. /// Turns positive integers (indexes) into dense vectors of fixed size.
  215. /// This layer can only be used as the first layer in a model.
  216. /// e.g. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
  217. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding
  218. /// </summary>
  219. /// <param name="input_dim">Size of the vocabulary, i.e. maximum integer index + 1.</param>
  220. /// <param name="output_dim">Dimension of the dense embedding.</param>
  221. /// <param name="embeddings_initializer">Initializer for the embeddings matrix (see keras.initializers).</param>
  222. /// <param name="mask_zero"></param>
  223. /// <returns></returns>
  224. public Embedding Embedding(int input_dim,
  225. int output_dim,
  226. IInitializer embeddings_initializer = null,
  227. bool mask_zero = false,
  228. TensorShape input_shape = null,
  229. int input_length = -1)
  230. => new Embedding(new EmbeddingArgs
  231. {
  232. InputDim = input_dim,
  233. OutputDim = output_dim,
  234. MaskZero = mask_zero,
  235. InputShape = input_shape ?? input_length,
  236. InputLength = input_length,
  237. EmbeddingsInitializer = embeddings_initializer
  238. });
  239. public Flatten Flatten(string data_format = null)
  240. => new Flatten(new FlattenArgs
  241. {
  242. DataFormat = data_format
  243. });
  244. /// <summary>
  245. /// `Input()` is used to instantiate a Keras tensor.
  246. /// </summary>
  247. /// <param name="shape">A shape tuple not including the batch size.</param>
  248. /// <param name="name"></param>
  249. /// <param name="sparse"></param>
  250. /// <param name="ragged"></param>
  251. /// <returns></returns>
  252. public Tensors Input(TensorShape shape,
  253. string name = null,
  254. bool sparse = false,
  255. bool ragged = false)
  256. {
  257. var input_layer = new InputLayer(new InputLayerArgs
  258. {
  259. InputShape = shape,
  260. Name = name,
  261. Sparse = sparse,
  262. Ragged = ragged
  263. });
  264. return input_layer.InboundNodes[0].Outputs;
  265. }
  266. public MaxPooling2D MaxPooling2D(TensorShape pool_size = null,
  267. TensorShape strides = null,
  268. string padding = "valid")
  269. => new MaxPooling2D(new MaxPooling2DArgs
  270. {
  271. PoolSize = pool_size ?? (2, 2),
  272. Strides = strides,
  273. Padding = padding
  274. });
  275. /// <summary>
  276. /// Max pooling layer for 2D inputs (e.g. images).
  277. /// </summary>
  278. /// <param name="inputs">The tensor over which to pool. Must have rank 4.</param>
  279. /// <param name="pool_size"></param>
  280. /// <param name="strides"></param>
  281. /// <param name="padding"></param>
  282. /// <param name="data_format"></param>
  283. /// <param name="name"></param>
  284. /// <returns></returns>
  285. public Tensor max_pooling2d(Tensor inputs,
  286. int[] pool_size,
  287. int[] strides,
  288. string padding = "valid",
  289. string data_format = "channels_last",
  290. string name = null)
  291. {
  292. var layer = new MaxPooling2D(new MaxPooling2DArgs
  293. {
  294. PoolSize = pool_size,
  295. Strides = strides,
  296. Padding = padding,
  297. DataFormat = data_format,
  298. Name = name
  299. });
  300. return layer.Apply(inputs);
  301. }
  302. /// <summary>
  303. /// Leaky version of a Rectified Linear Unit.
  304. /// </summary>
  305. /// <param name="alpha">Negative slope coefficient.</param>
  306. /// <returns></returns>
  307. public Layer LeakyReLU(float alpha = 0.3f)
  308. => new LeakyReLu(new LeakyReLuArgs
  309. {
  310. Alpha = alpha
  311. });
  312. public Layer SimpleRNN(int units) => SimpleRNN(units, "tanh");
  313. public Layer SimpleRNN(int units,
  314. Activation activation = null)
  315. => new SimpleRNN(new SimpleRNNArgs
  316. {
  317. Units = units,
  318. Activation = activation
  319. });
  320. public Layer SimpleRNN(int units,
  321. string activation = "tanh")
  322. => new SimpleRNN(new SimpleRNNArgs
  323. {
  324. Units = units,
  325. Activation = GetActivationByName(activation)
  326. });
  327. public Layer LSTM(int units,
  328. Activation activation = null,
  329. Activation recurrent_activation = null,
  330. bool use_bias = true,
  331. IInitializer kernel_initializer = null,
  332. IInitializer recurrent_initializer = null,
  333. IInitializer bias_initializer = null,
  334. bool unit_forget_bias = true,
  335. float dropout = 0f,
  336. float recurrent_dropout = 0f,
  337. int implementation = 2,
  338. bool return_sequences = false,
  339. bool return_state = false,
  340. bool go_backwards = false,
  341. bool stateful = false,
  342. bool time_major = false,
  343. bool unroll = false)
  344. => new LSTM(new LSTMArgs
  345. {
  346. Units = units,
  347. Activation = activation ?? keras.activations.Tanh,
  348. RecurrentActivation = recurrent_activation ?? keras.activations.Sigmoid,
  349. KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
  350. RecurrentInitializer = recurrent_initializer ?? tf.orthogonal_initializer,
  351. BiasInitializer = bias_initializer ?? tf.zeros_initializer,
  352. Dropout = dropout,
  353. RecurrentDropout = recurrent_dropout,
  354. Implementation = implementation,
  355. ReturnSequences = return_sequences,
  356. ReturnState = return_state,
  357. GoBackwards = go_backwards,
  358. Stateful = stateful,
  359. TimeMajor = time_major,
  360. Unroll = unroll
  361. });
  362. public Rescaling Rescaling(float scale,
  363. float offset = 0,
  364. TensorShape input_shape = null)
  365. => new Rescaling(new RescalingArgs
  366. {
  367. Scale = scale,
  368. Offset = offset,
  369. InputShape = input_shape
  370. });
  371. public Add Add()
  372. => new Add(new MergeArgs { });
  373. public Subtract Subtract()
  374. => new Subtract(new MergeArgs { });
  375. public GlobalAveragePooling2D GlobalAveragePooling2D()
  376. => new GlobalAveragePooling2D(new Pooling2DArgs { });
  377. Activation GetActivationByName(string name)
  378. => name switch
  379. {
  380. "linear" => keras.activations.Linear,
  381. "relu" => keras.activations.Relu,
  382. "sigmoid" => keras.activations.Sigmoid,
  383. "tanh" => keras.activations.Tanh,
  384. _ => keras.activations.Linear
  385. };
  386. IInitializer GetInitializerByName(string name)
  387. => name switch
  388. {
  389. "glorot_uniform" => tf.glorot_uniform_initializer,
  390. "zeros" => tf.zeros_initializer,
  391. "ones" => tf.ones_initializer,
  392. _ => tf.glorot_uniform_initializer
  393. };
  394. }
  395. }