You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LayersApi.cs 20 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. using NumSharp;
  2. using System.Collections.Generic;
  3. using Tensorflow.Keras.ArgsDefinition;
  4. using Tensorflow.Keras.Engine;
  5. using static Tensorflow.Binding;
  6. using static Tensorflow.KerasApi;
  7. namespace Tensorflow.Keras.Layers
  8. {
  9. public partial class LayersApi
  10. {
  11. public Preprocessing preprocessing { get; } = new Preprocessing();
  12. /// <summary>
  13. /// Functional interface for the batch normalization layer.
  14. /// http://arxiv.org/abs/1502.03167
  15. /// </summary>
  16. /// <param name="inputs"></param>
  17. /// <param name="axis"></param>
  18. /// <param name="momentum"></param>
  19. /// <param name="epsilon"></param>
  20. /// <param name="center"></param>
  21. /// <param name="scale"></param>
  22. /// <param name="beta_initializer"></param>
  23. /// <param name="gamma_initializer"></param>
  24. /// <param name="moving_mean_initializer"></param>
  25. /// <param name="moving_variance_initializer"></param>
  26. /// <param name="training"></param>
  27. /// <param name="trainable"></param>
  28. /// <param name="name"></param>
  29. /// <param name="renorm"></param>
  30. /// <param name="renorm_momentum"></param>
  31. /// <returns></returns>
  32. public BatchNormalization BatchNormalization(int axis = -1,
  33. float momentum = 0.99f,
  34. float epsilon = 0.001f,
  35. bool center = true,
  36. bool scale = true,
  37. IInitializer beta_initializer = null,
  38. IInitializer gamma_initializer = null,
  39. IInitializer moving_mean_initializer = null,
  40. IInitializer moving_variance_initializer = null,
  41. bool trainable = true,
  42. string name = null,
  43. bool renorm = false,
  44. float renorm_momentum = 0.99f)
  45. => new BatchNormalization(new BatchNormalizationArgs
  46. {
  47. Axis = axis,
  48. Momentum = momentum,
  49. Epsilon = epsilon,
  50. Center = center,
  51. Scale = scale,
  52. BetaInitializer = beta_initializer ?? tf.zeros_initializer,
  53. GammaInitializer = gamma_initializer ?? tf.ones_initializer,
  54. MovingMeanInitializer = moving_mean_initializer ?? tf.zeros_initializer,
  55. MovingVarianceInitializer = moving_variance_initializer ?? tf.ones_initializer,
  56. Renorm = renorm,
  57. RenormMomentum = renorm_momentum,
  58. Trainable = trainable,
  59. Name = name
  60. });
  61. /// <summary>
  62. ///
  63. /// </summary>
  64. /// <param name="filters"></param>
  65. /// <param name="kernel_size"></param>
  66. /// <param name="strides"></param>
  67. /// <param name="padding"></param>
  68. /// <param name="data_format"></param>
  69. /// <param name="dilation_rate"></param>
  70. /// <param name="groups"></param>
  71. /// <param name="activation">tf.keras.activations</param>
  72. /// <param name="use_bias"></param>
  73. /// <param name="kernel_initializer"></param>
  74. /// <param name="bias_initializer"></param>
  75. /// <param name="kernel_regularizer"></param>
  76. /// <param name="bias_regularizer"></param>
  77. /// <param name="activity_regularizer"></param>
  78. /// <returns></returns>
  79. public Conv2D Conv2D(int filters,
  80. TensorShape kernel_size = null,
  81. TensorShape strides = null,
  82. string padding = "valid",
  83. string data_format = null,
  84. TensorShape dilation_rate = null,
  85. int groups = 1,
  86. Activation activation = null,
  87. bool use_bias = true,
  88. IInitializer kernel_initializer = null,
  89. IInitializer bias_initializer = null,
  90. IRegularizer kernel_regularizer = null,
  91. IRegularizer bias_regularizer = null,
  92. IRegularizer activity_regularizer = null)
  93. => new Conv2D(new Conv2DArgs
  94. {
  95. Rank = 2,
  96. Filters = filters,
  97. KernelSize = kernel_size,
  98. Strides = strides == null ? (1, 1) : strides,
  99. Padding = padding,
  100. DataFormat = data_format,
  101. DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
  102. Groups = groups,
  103. UseBias = use_bias,
  104. KernelRegularizer = kernel_regularizer,
  105. KernelInitializer = kernel_initializer == null ? tf.glorot_uniform_initializer : kernel_initializer,
  106. BiasInitializer = bias_initializer == null ? tf.zeros_initializer : bias_initializer,
  107. BiasRegularizer = bias_regularizer,
  108. ActivityRegularizer = activity_regularizer,
  109. Activation = activation ?? keras.activations.Linear
  110. });
  111. public Conv2D Conv2D(int filters,
  112. TensorShape kernel_size = null,
  113. TensorShape strides = null,
  114. string padding = "valid",
  115. string data_format = null,
  116. TensorShape dilation_rate = null,
  117. int groups = 1,
  118. string activation = null,
  119. bool use_bias = true,
  120. string kernel_initializer = "glorot_uniform",
  121. string bias_initializer = "zeros",
  122. string kernel_regularizer = null,
  123. string bias_regularizer = null,
  124. string activity_regularizer = null)
  125. => new Conv2D(new Conv2DArgs
  126. {
  127. Rank = 2,
  128. Filters = filters,
  129. KernelSize = kernel_size,
  130. Strides = strides == null ? (1, 1) : strides,
  131. Padding = padding,
  132. DataFormat = data_format,
  133. DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
  134. Groups = groups,
  135. UseBias = use_bias,
  136. KernelInitializer = GetInitializerByName(kernel_initializer),
  137. BiasInitializer = GetInitializerByName(bias_initializer),
  138. Activation = GetActivationByName(activation)
  139. });
  140. /// <summary>
  141. /// Transposed convolution layer (sometimes called Deconvolution).
  142. /// </summary>
  143. /// <param name="filters"></param>
  144. /// <param name="kernel_size"></param>
  145. /// <param name="strides"></param>
  146. /// <param name="padding"></param>
  147. /// <param name="data_format"></param>
  148. /// <param name="dilation_rate"></param>
  149. /// <param name="activation"></param>
  150. /// <param name="use_bias"></param>
  151. /// <param name="kernel_initializer"></param>
  152. /// <param name="bias_initializer"></param>
  153. /// <param name="kernel_regularizer"></param>
  154. /// <param name="bias_regularizer"></param>
  155. /// <param name="activity_regularizer"></param>
  156. /// <returns></returns>
  157. public Conv2DTranspose Conv2DTranspose(int filters,
  158. TensorShape kernel_size = null,
  159. TensorShape strides = null,
  160. string padding = "valid",
  161. string data_format = null,
  162. TensorShape dilation_rate = null,
  163. string activation = null,
  164. bool use_bias = true,
  165. string kernel_initializer = null,
  166. string bias_initializer = null,
  167. string kernel_regularizer = null,
  168. string bias_regularizer = null,
  169. string activity_regularizer = null)
  170. => new Conv2DTranspose(new Conv2DArgs
  171. {
  172. Rank = 2,
  173. Filters = filters,
  174. KernelSize = kernel_size,
  175. Strides = strides == null ? (1, 1) : strides,
  176. Padding = padding,
  177. DataFormat = data_format,
  178. DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
  179. UseBias = use_bias,
  180. KernelInitializer = GetInitializerByName(kernel_initializer),
  181. BiasInitializer = GetInitializerByName(bias_initializer),
  182. Activation = GetActivationByName(activation)
  183. });
  184. public Dense Dense(int units,
  185. Activation activation = null,
  186. IInitializer kernel_initializer = null,
  187. bool use_bias = true,
  188. IInitializer bias_initializer = null,
  189. TensorShape input_shape = null)
  190. => new Dense(new DenseArgs
  191. {
  192. Units = units,
  193. Activation = activation ?? keras.activations.Linear,
  194. KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
  195. BiasInitializer = bias_initializer ?? (use_bias ? tf.zeros_initializer : null),
  196. InputShape = input_shape
  197. });
  198. public Dense Dense(int units)
  199. => new Dense(new DenseArgs
  200. {
  201. Units = units,
  202. Activation = GetActivationByName("linear")
  203. });
  204. public Dense Dense(int units,
  205. string activation = null,
  206. TensorShape input_shape = null)
  207. => new Dense(new DenseArgs
  208. {
  209. Units = units,
  210. Activation = GetActivationByName(activation),
  211. InputShape = input_shape
  212. });
  213. /// <summary>
  214. /// Densely-connected layer class. aka fully-connected<br></br>
  215. /// `outputs = activation(inputs * kernel + bias)`
  216. /// </summary>
  217. /// <param name="inputs"></param>
  218. /// <param name="units">Python integer, dimensionality of the output space.</param>
  219. /// <param name="activation"></param>
  220. /// <param name="use_bias">Boolean, whether the layer uses a bias.</param>
  221. /// <param name="kernel_initializer"></param>
  222. /// <param name="bias_initializer"></param>
  223. /// <param name="trainable"></param>
  224. /// <param name="name"></param>
  225. /// <param name="reuse"></param>
  226. /// <returns></returns>
  227. public Tensor dense(Tensor inputs,
  228. int units,
  229. Activation activation = null,
  230. bool use_bias = true,
  231. IInitializer kernel_initializer = null,
  232. IInitializer bias_initializer = null,
  233. bool trainable = true,
  234. string name = null,
  235. bool? reuse = null)
  236. {
  237. if (bias_initializer == null)
  238. bias_initializer = tf.zeros_initializer;
  239. var layer = new Dense(new DenseArgs
  240. {
  241. Units = units,
  242. Activation = activation,
  243. UseBias = use_bias,
  244. BiasInitializer = bias_initializer,
  245. KernelInitializer = kernel_initializer,
  246. Trainable = trainable,
  247. Name = name
  248. });
  249. return layer.Apply(inputs);
  250. }
  251. public Dropout Dropout(float rate, TensorShape noise_shape = null, int? seed = null)
  252. => new Dropout(new DropoutArgs
  253. {
  254. Rate = rate,
  255. NoiseShape = noise_shape,
  256. Seed = seed
  257. });
  258. /// <summary>
  259. /// Turns positive integers (indexes) into dense vectors of fixed size.
  260. /// This layer can only be used as the first layer in a model.
  261. /// e.g. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
  262. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding
  263. /// </summary>
  264. /// <param name="input_dim">Size of the vocabulary, i.e. maximum integer index + 1.</param>
  265. /// <param name="output_dim">Dimension of the dense embedding.</param>
  266. /// <param name="embeddings_initializer">Initializer for the embeddings matrix (see keras.initializers).</param>
  267. /// <param name="mask_zero"></param>
  268. /// <returns></returns>
  269. public Embedding Embedding(int input_dim,
  270. int output_dim,
  271. IInitializer embeddings_initializer = null,
  272. bool mask_zero = false,
  273. TensorShape input_shape = null,
  274. int input_length = -1)
  275. => new Embedding(new EmbeddingArgs
  276. {
  277. InputDim = input_dim,
  278. OutputDim = output_dim,
  279. MaskZero = mask_zero,
  280. InputShape = input_shape ?? input_length,
  281. InputLength = input_length,
  282. EmbeddingsInitializer = embeddings_initializer
  283. });
  284. public Flatten Flatten(string data_format = null)
  285. => new Flatten(new FlattenArgs
  286. {
  287. DataFormat = data_format
  288. });
  289. /// <summary>
  290. /// `Input()` is used to instantiate a Keras tensor.
  291. /// </summary>
  292. /// <param name="shape">A shape tuple not including the batch size.</param>
  293. /// <param name="name"></param>
  294. /// <param name="sparse"></param>
  295. /// <param name="ragged"></param>
  296. /// <returns></returns>
  297. public Tensors Input(TensorShape shape,
  298. string name = null,
  299. bool sparse = false,
  300. bool ragged = false)
  301. {
  302. var input_layer = new InputLayer(new InputLayerArgs
  303. {
  304. InputShape = shape,
  305. Name = name,
  306. Sparse = sparse,
  307. Ragged = ragged
  308. });
  309. return input_layer.InboundNodes[0].Outputs;
  310. }
  311. public MaxPooling1D MaxPooling1D(int? pool_size = null,
  312. int? strides = null,
  313. string padding = "valid")
  314. => new MaxPooling1D(new Pooling1DArgs
  315. {
  316. PoolSize = pool_size ?? 2,
  317. Strides = strides ?? (pool_size ?? 2),
  318. Padding = padding
  319. });
  320. public MaxPooling2D MaxPooling2D(TensorShape pool_size = null,
  321. TensorShape strides = null,
  322. string padding = "valid")
  323. => new MaxPooling2D(new MaxPooling2DArgs
  324. {
  325. PoolSize = pool_size ?? (2, 2),
  326. Strides = strides,
  327. Padding = padding
  328. });
  329. /// <summary>
  330. /// Max pooling layer for 2D inputs (e.g. images).
  331. /// </summary>
  332. /// <param name="inputs">The tensor over which to pool. Must have rank 4.</param>
  333. /// <param name="pool_size"></param>
  334. /// <param name="strides"></param>
  335. /// <param name="padding"></param>
  336. /// <param name="data_format"></param>
  337. /// <param name="name"></param>
  338. /// <returns></returns>
  339. public Tensor max_pooling2d(Tensor inputs,
  340. int[] pool_size,
  341. int[] strides,
  342. string padding = "valid",
  343. string data_format = "channels_last",
  344. string name = null)
  345. {
  346. var layer = new MaxPooling2D(new MaxPooling2DArgs
  347. {
  348. PoolSize = pool_size,
  349. Strides = strides,
  350. Padding = padding,
  351. DataFormat = data_format,
  352. Name = name
  353. });
  354. return layer.Apply(inputs);
  355. }
  356. /// <summary>
  357. /// Leaky version of a Rectified Linear Unit.
  358. /// </summary>
  359. /// <param name="alpha">Negative slope coefficient.</param>
  360. /// <returns></returns>
  361. public Layer LeakyReLU(float alpha = 0.3f)
  362. => new LeakyReLu(new LeakyReLuArgs
  363. {
  364. Alpha = alpha
  365. });
  366. public Layer SimpleRNN(int units) => SimpleRNN(units, "tanh");
  367. public Layer SimpleRNN(int units,
  368. Activation activation = null)
  369. => new SimpleRNN(new SimpleRNNArgs
  370. {
  371. Units = units,
  372. Activation = activation
  373. });
  374. public Layer SimpleRNN(int units,
  375. string activation = "tanh")
  376. => new SimpleRNN(new SimpleRNNArgs
  377. {
  378. Units = units,
  379. Activation = GetActivationByName(activation)
  380. });
  381. public Layer LSTM(int units,
  382. Activation activation = null,
  383. Activation recurrent_activation = null,
  384. bool use_bias = true,
  385. IInitializer kernel_initializer = null,
  386. IInitializer recurrent_initializer = null,
  387. IInitializer bias_initializer = null,
  388. bool unit_forget_bias = true,
  389. float dropout = 0f,
  390. float recurrent_dropout = 0f,
  391. int implementation = 2,
  392. bool return_sequences = false,
  393. bool return_state = false,
  394. bool go_backwards = false,
  395. bool stateful = false,
  396. bool time_major = false,
  397. bool unroll = false)
  398. => new LSTM(new LSTMArgs
  399. {
  400. Units = units,
  401. Activation = activation ?? keras.activations.Tanh,
  402. RecurrentActivation = recurrent_activation ?? keras.activations.Sigmoid,
  403. KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
  404. RecurrentInitializer = recurrent_initializer ?? tf.orthogonal_initializer,
  405. BiasInitializer = bias_initializer ?? tf.zeros_initializer,
  406. Dropout = dropout,
  407. RecurrentDropout = recurrent_dropout,
  408. Implementation = implementation,
  409. ReturnSequences = return_sequences,
  410. ReturnState = return_state,
  411. GoBackwards = go_backwards,
  412. Stateful = stateful,
  413. TimeMajor = time_major,
  414. Unroll = unroll
  415. });
  416. public Rescaling Rescaling(float scale,
  417. float offset = 0,
  418. TensorShape input_shape = null)
  419. => new Rescaling(new RescalingArgs
  420. {
  421. Scale = scale,
  422. Offset = offset,
  423. InputShape = input_shape
  424. });
  425. public Add Add()
  426. => new Add(new MergeArgs { });
  427. public Subtract Subtract()
  428. => new Subtract(new MergeArgs { });
  429. public GlobalAveragePooling2D GlobalAveragePooling2D()
  430. => new GlobalAveragePooling2D(new Pooling2DArgs { });
  431. public GlobalAveragePooling1D GlobalAveragePooling1D(string data_format = "channels_last")
  432. => new GlobalAveragePooling1D(new Pooling1DArgs { DataFormat = data_format });
  433. public GlobalAveragePooling2D GlobalAveragePooling2D(string data_format = "channels_last")
  434. => new GlobalAveragePooling2D(new Pooling2DArgs { DataFormat = data_format });
  435. public GlobalMaxPooling1D GlobalMaxPooling1D(string data_format = "channels_last")
  436. => new GlobalMaxPooling1D(new Pooling1DArgs { DataFormat = data_format });
  437. public GlobalMaxPooling2D GlobalMaxPooling2D(string data_format = "channels_last")
  438. => new GlobalMaxPooling2D(new Pooling2DArgs { DataFormat = data_format });
  439. Activation GetActivationByName(string name)
  440. => name switch
  441. {
  442. "linear" => keras.activations.Linear,
  443. "relu" => keras.activations.Relu,
  444. "sigmoid" => keras.activations.Sigmoid,
  445. "tanh" => keras.activations.Tanh,
  446. _ => keras.activations.Linear
  447. };
  448. IInitializer GetInitializerByName(string name)
  449. => name switch
  450. {
  451. "glorot_uniform" => tf.glorot_uniform_initializer,
  452. "zeros" => tf.zeros_initializer,
  453. "ones" => tf.ones_initializer,
  454. _ => tf.glorot_uniform_initializer
  455. };
  456. }
  457. }