Browse Source

v0.01 added base function and tested

master
Edge 4 years ago
parent
commit
e28cfa1408
100 changed files with 19875 additions and 13 deletions
  1. +11933
    -0
      Edge计算引擎.pdf
  2. BIN
      Edge计算引擎.xmind
  3. +540
    -13
      README.md
  4. +231
    -0
      README_EN.md
  5. +2079
    -0
      a.csv
  6. +16
    -0
      autodiff/dor.h
  7. +26
    -0
      autodiff/graph.h
  8. +14
    -0
      autodiff/mor.h
  9. +72
    -0
      autodiff/node.h
  10. +16
    -0
      autodiff/por.h
  11. +99
    -0
      autodiff/vectmath.h
  12. +34
    -0
      conv_.cpp
  13. +1800
    -0
      data/data.csv
  14. +100
    -0
      data/nerual_data.csv
  15. +100
    -0
      data/new_data2.csv
  16. +107
    -0
      data_struct/data_struct_pro.c
  17. +47
    -0
      data_struct/data_struct_pro.h
  18. +93
    -0
      data_struct_test.cpp
  19. +196
    -0
      file_pro/data_read.h
  20. +87
    -0
      grad_edge/matrix_grad.h
  21. BIN
      image-20200128154352842.png
  22. BIN
      image-20200418210521131.png
  23. +62
    -0
      install_diff/Makefile
  24. BIN
      install_diff/bin/ann
  25. BIN
      install_diff/bin/gradient
  26. BIN
      install_diff/bin/gradient_descent
  27. BIN
      install_diff/bin/simple
  28. BIN
      install_diff/bin/speed
  29. +31
    -0
      install_diff/examples/Makefile
  30. BIN
      install_diff/examples/obj/ann.o
  31. BIN
      install_diff/examples/obj/gradient.o
  32. BIN
      install_diff/examples/obj/gradient_descent.o
  33. BIN
      install_diff/examples/obj/simple.o
  34. BIN
      install_diff/examples/obj/speed.o
  35. +126
    -0
      install_diff/examples/src/ann.cpp
  36. +15
    -0
      install_diff/examples/src/gradient.cpp
  37. +27
    -0
      install_diff/examples/src/gradient_descent.cpp
  38. BIN
      install_diff/examples/src/ma
  39. +13
    -0
      install_diff/examples/src/simple.cpp
  40. +51
    -0
      install_diff/examples/src/speed.cpp
  41. BIN
      install_diff/lib/libautodiff.a
  42. +22
    -0
      install_diff/root/Makefile
  43. +16
    -0
      install_diff/root/include/dor.h
  44. +26
    -0
      install_diff/root/include/graph.h
  45. +14
    -0
      install_diff/root/include/mor.h
  46. +72
    -0
      install_diff/root/include/node.h
  47. +16
    -0
      install_diff/root/include/por.h
  48. +99
    -0
      install_diff/root/include/vectmath.h
  49. BIN
      install_diff/root/obj/graph.o
  50. BIN
      install_diff/root/obj/node.o
  51. +33
    -0
      install_diff/root/src/graph.cpp
  52. +275
    -0
      install_diff/root/src/node.cpp
  53. BIN
      logistic/.logistic_def.h.swp
  54. BIN
      logistic/log
  55. +20
    -0
      logistic/logistic_def.cpp
  56. +35
    -0
      logistic/logistic_def.h
  57. +96
    -0
      main.cpp
  58. BIN
      matrix/.matrix_pro.h.swp
  59. +15
    -0
      matrix/conv_test.cpp
  60. +121
    -0
      matrix/matrix_def.h
  61. +442
    -0
      matrix/matrix_pro.h
  62. +1
    -0
      mytest.csv
  63. +2
    -0
      mytest.txt
  64. +58
    -0
      neral/test.py
  65. +91
    -0
      nerual_test.cpp
  66. BIN
      pics/image-20200418210521131.png
  67. +1
    -0
      picture/00.svg
  68. +1
    -0
      picture/01.svg
  69. +1
    -0
      picture/02.svg
  70. BIN
      picture/WX20191119-105411@2x.png
  71. BIN
      picture/WX20191119-125244@2x.png
  72. BIN
      picture/apply_axis_0.png
  73. BIN
      picture/apply_axis_1.png
  74. BIN
      picture/autograd.jpg
  75. +1
    -0
      picture/cpu.svg
  76. +1
    -0
      picture/jabber.svg
  77. BIN
      picture/logo.png
  78. BIN
      picture/logo2.png
  79. BIN
      picture/nerual_test1.png
  80. BIN
      picture/path.png
  81. +1
    -0
      picture/processwire (1).svg
  82. +1
    -0
      picture/啊.svg
  83. +1
    -0
      picture/彩虹.svg
  84. +1
    -0
      picture/方向.svg
  85. +1
    -0
      picture/星月.svg
  86. +1
    -0
      picture/火箭.svg
  87. +4
    -0
      requitement/.gitignore
  88. +62
    -0
      requitement/Makefile
  89. +31
    -0
      requitement/examples/Makefile
  90. +126
    -0
      requitement/examples/src/ann.cpp
  91. +15
    -0
      requitement/examples/src/gradient.cpp
  92. +27
    -0
      requitement/examples/src/gradient_descent.cpp
  93. +13
    -0
      requitement/examples/src/simple.cpp
  94. +51
    -0
      requitement/examples/src/speed.cpp
  95. +22
    -0
      requitement/root/Makefile
  96. +16
    -0
      requitement/root/include/dor.h
  97. +26
    -0
      requitement/root/include/graph.h
  98. +14
    -0
      requitement/root/include/mor.h
  99. +72
    -0
      requitement/root/include/node.h
  100. +16
    -0
      requitement/root/include/por.h

+ 11933
- 0
Edge计算引擎.pdf
File diff suppressed because it is too large
View File


BIN
Edge计算引擎.xmind View File


+ 540
- 13
README.md View File

@@ -1,21 +1,548 @@
#### 从命令行创建一个新的仓库
new line

```bash
touch README.md
git init
git add README.md
git commit -m "first commit"
git remote add origin https://git.trustie.net/Edge/Edge-Computing-Engine.git
git push -u origin master

<div align=center><img src="./picture/01.svg"/></div>
# Edge-Engine



## Edge : 一个开源的科学计算引擎

[README for English_version](./README_EN.md)

声明:本项目禁止闭源商用,如有需要请和作者取得联系

email: zk@likedge.top

<br>[![GitHub license](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](https://raw.githubusercontent.com/tesseract-ocr/tesseract/master/LICENSE)</br>

------



> 项目开始日期 : 2019/10/01
>
> 目前项目总代码 : 810 行
>
> 测试 : main.cpp | nerual_network.cpp | 新增全连接神经网络架构(新增全连接网络正向传播和反向传播的测试demo)
>
> 测试环境:
>
> MacBook Pro
>
> 编译器环境:
>
> Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/c++/4.2.1
> Apple LLVM version 10.0.1 (clang-1001.0.46.4)
> Target: x86_64-apple-darwin18.7.0
> Thread model: posix

<img src="image-20200418210521131.png" alt="image-20200418210521131" />



------

这是什么?

![path](./picture/path.png)

## 安装编译

```
git clone git@github.com:AllenZYJ/Edge-Computing-Engine.git

cd to install_diff
```

进入install_diff目录:

执行

```
make
make install
```

编译demo入口程序

```shell
➜ edge-computing-engine git:(master) ✗ g++ main.cpp -o ma -lautodiff
```

或者BP测试程序

```shell
➜ edge-computing-engine git:(master) ✗ g++ nerual_network.cpp -o ma
```

运行

```shell
➜ edge-computing-engine git:(master) ✗ ./main
```

最新卷积实现:

```c++
double conv_test(Matrix mid1,int input_dim = 3,int output_channels = 3,int stride = 1,int kernel_size = 2,int mode = 0,int padding = 0)
```



序贯模型api使用方法:

edge_network(int input, int num_neuron)

作为序列模型api

edge_network作为一个类型存在,位于matrix_grad.h中结构体类型的数据

定义了前向传播函数,前向传播无激活版,反向传播,末层反向传播,四大最常用的函数主体.

完整的序列模型:

![image-20200128154352842](/Users/zhangyiji/Documents/code/cpp_demo/my_os/Edge-Computing-Engine/image-20200128154352842.png)

## 新的demo程序实现5层全连接层,可自定义神经元和激活函数,损失函数

全连接层使用方法:

第一层的权重自定义,而后调用forward函数前向传播一层,自动求出激活以后的值,激活函数可自定义.

首先定义一个权重矩阵和偏置矩阵,第一个矩阵的维度大小使用数据列去定义:

```c
Matrix bias1 = CreateRandMat(2,1);
Matrix weight1 = CreateRandMat(2,data.col);
```

之后可以输出第一层前向传播的值,同时可以定义下一层的bias的维度, row使用第一层的权重矩阵的行,第二层的权重矩阵的行使用了第一层的输出的行, 而列自行定义即可, 这一点体现了前向传播算法的维度相容. 也就是:

```c
Matrix output1 = sequaltial.forward(get_T(get_row(data_mine,index)),weight1,bias1);
```

#### 从命令行推送已经创建的仓库
```c
Matrix weight2 = CreateRandMat(output1.row,2);
Matrix bias2 = CreateRandMat(weight2.row,1);
Matrix output2 = sequaltial.forward(output1,weight2,bias2);
```

同时第二层的输出也可以求出来,以此类推 .

最终输出代码见nerual_test.cpp ![nerual_test1](/Users/zhangyiji/Documents/code/cpp_demo/my_os/Edge-Computing-Engine/picture/nerual_test1.png)

```bash
git remote add origin https://git.trustie.net/Edge/Edge-Computing-Engine.git
git push -u origin master
代码:

```c
Matrix data_mine = CreateRandMat(2,1);
Matrix label = CreateMatrix(2,1);
Matrix weight1 = CreateRandMat(2,2);
Matrix weight2 = CreateRandMat(2,2);
Matrix weight3 = CreateRandMat(2,2);
Matrix weight4 = CreateRandMat(2,2);
for(int epoch = 0;epoch<20;epoch++)
{
cout_mat(weight1);
edge_network sequaltial(2,2);

Matrix output1 = sequaltial.forward(data_mine,weight1);
Matrix output2 = sequaltial.forward(output1,weight2);
Matrix output3 = sequaltial.forward(output2,weight3);
Matrix output4 = sequaltial.forward(output3,weight4);
Matrix output_end = sequaltial.end_layer_backward(label,output4);
//get the forward
Matrix backward1 = sequaltial.backward(output_end,output3,weight4);
Matrix grad_w1w2 = mul_simple(backward1,data_mine);
Matrix backward2 = sequaltial.backward(backward1,output2,weight3);
Matrix grad_w3w4 = mul_simple(backward2,data_mine);
Matrix backward3 = sequaltial.backward(backward2,output1,weight2);
Matrix grad_w5w6 = mul_simple(backward3,data_mine);
Matrix backward4 = sequaltial.backward(backward3,output4,weight1);
Matrix grad_w7w8 = mul_simple(backward4,data_mine);
weight1 = subtract(weight1,times_mat(0.0001,padding(grad_w1w2,2,2)));
weight2 = subtract(weight2,times_mat(0.0001,padding(grad_w3w4,2,2)));
weight3 = subtract(weight3,times_mat(0.0001,padding(grad_w5w6,2,2)));
weight4 = subtract(weight4,times_mat(0.0001,padding(grad_w7w8,2,2)));
}
```
```shell
---------epoch: 0------------
loss: 4.65667
loss: 3.28273
---------epoch: 1------------
loss: 4.65655
loss: 3.28265
---------epoch: 2------------
loss: 4.65643
loss: 3.28257
---------epoch: 3------------
loss: 4.65631
loss: 3.28249
---------epoch: 4------------
loss: 4.65619
loss: 3.2824
---------epoch: 5------------
loss: 4.65607
loss: 3.28232
---------epoch: 6------------
loss: 4.65596
loss: 3.28224
---------epoch: 7------------
loss: 4.65584
loss: 3.28216
---------epoch: 8------------
loss: 4.65572
loss: 3.28208
---------epoch: 9------------
loss: 4.6556
loss: 3.282
---------epoch: 10------------
loss: 4.65548
loss: 3.28192
---------epoch: 11------------
loss: 4.65536
loss: 3.28184
---------epoch: 12------------
loss: 4.65524
loss: 3.28176
---------epoch: 13------------
loss: 4.65512
loss: 3.28168
---------epoch: 14------------
loss: 4.65501
loss: 3.2816
---------epoch: 15------------
loss: 4.65489
loss: 3.28152
---------epoch: 16------------
loss: 4.65477
loss: 3.28144
---------epoch: 17------------
loss: 4.65465
loss: 3.28136
---------epoch: 18------------
loss: 4.65453
loss: 3.28128
---------epoch: 19------------
loss: 4.65441
loss: 3.2812
```

## Bp反向传播的demo程序基于Pytorch官方代码模拟实现测试

迭代结果 :

W1: 0.6944 1.52368
-1.46644 -0.154097
W2: 1.10079
0.462984
loss: 0.559269

epoch:100 , 可自行测试.

输出最终损失和参数迭代结果.

-----------split-line-----------
2.79955
0.36431
-0.451694
epoch: 100 error: 6.05895
-----------split-line-----------
0.009167(sum of loss)

### 目前实现的程序接口

### API:

- [x] Matrix read_csv(string &file_path)读取格式化文件(csv),返回一个自动计算长度的矩阵.

- [x] 实现格式化文件写入接口.比较pandas.to_csv.

- [x] 矩阵广播机制,实现padding接口

- [x] 全连接层前向传播和反向传播接口,支持自动求导

- [x] 矩阵微分和自动求导接口封装

- [x] int save_txt(Matrix mid1,string path = "./",string delimiter = ",",string header="./") 设计文件流获取文件头部接口 , 写入格式化文件 , 已设计支持矩阵类型数据写入,支持自定义表头,写入文件路径 , 自定义分隔符,默认为" , ".

- [x] Create a matrix : create(row,cols)开辟一个矩阵结构的内存,元素初值为0;

- [x] Change the element for matrix void move_ele(int &ele1, int &ele2),修改某一个位置的元素的值.

- [x] Matrix1+Matrix2 : Matrix add(Matrix mid1,Matrix mid2,int flag=1),矩阵加和操作接口,可选位运算加速.

- [x] Flag is how to compete the ele ,default 1 ,bitwise operation(位运算加速).

- [x] Matrix1-Matrix2 : Matrix subtract(Matrix mid1,Matrix mid2)

- [x] Matrix1*Matrix2 : Matrix mul(Matrix mid1,Matrix mid2)

- [x] Matrix1*n : Matrix times_mat(int times,Matrix mid1)

- [x] Matrix1's Transposition : Matrix get_T(Matrix mid1)矩阵转置

- [x] Mul(matrix1,matrix2)矩阵乘积(完整数学定义).

- [x] double* flatten(Matrix mid1) : Return a flattened array.矩阵展开

- [x] Matrix matrix_rs(Matrix mid1,int rs_row,int rs_col) 矩阵的结构压缩

- [x] double matrix_sum(Matrix mid1)矩阵求和

- [x] double matrix_mean(Matrix mid1)均值

- [x] Matrix appply(Matrix mid1,Matrix mid2,int axis = 0)矩阵拼接

- [x] Matrix iloc(Matrix mid1,int start_x=0,int end_x=0,int start_y=0,int end_y=0)矩阵切片

- [x] Matrix mul_simple(Matrix mid1,Matrix mid2)为了贴合机器学习的需要,实现了矩阵对应元素相乘,请与传统意义的矩阵乘法区分开.

- [x] Relu激活函数矩阵接口

- [x] 均方误差矩阵接口

- [x] 创建随机权重矩阵接口

### 即将着手开发:

- [ ] 卷积神经网络定义(包括但不限于卷积核,池化层定义,自定义损失接口).

- [ ] 随机森林算法封装.

- [ ] 主流网络架构实现.


## 反向传播测试demo:

```c
#include<iostream>
#include<ctime>
#include<string>
#include<time.h>
#include<math.h>
#include<fstream>
#include<stdlib.h>
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
using namespace std;
clock_t start, stop;
double duration;
int main()
{
welcome();
string path = "./data/nerual_data.csv";
Matrix data = read_csv(path);
Matrix bais = CreateMatrix(data.row,1);
Matrix x = iloc(data,0,100,0,2);
Matrix y = iloc(data,0,100,2,3);
int N=100,in_Dim=2,H_num=2,out_Dim=2;
double learning_rate = 0.0001;
Matrix W1 = CreateRandMat(in_Dim,H_num);
Matrix W2 = CreateRandMat(H_num,out_Dim);
cout_mat(W1);
cout_mat(W2);
for(int epoch = 0;epoch<100;epoch++)
{
Matrix x_w1 = mul(x,W1);
Matrix re = mat_relu(x_w1);
Matrix out = mul(re,W2);
Matrix mat_sq = mat_sq_loss(out,y);
Matrix grad_y_pred = times_mat(2.0,subtract(out,y));
Matrix grad_w2 = mul(get_T(re),grad_y_pred);
Matrix grad_h_relu = mul(grad_y_pred,get_T(W2));
Matrix grad_h_relu_copy = mat_relu(grad_h_relu);
Matrix grad_w1 = mul(get_T(x),grad_h_relu_copy);
Matrix dw1 = times_mat(learning_rate,mul(get_T(x),grad_h_relu_copy));
W1 = subtract(W1,dw1);
W2 = subtract(W2,times_mat(learning_rate,grad_w2));
cout<<"W1: ";
cout_mat(W1);
cout<<"W2: ";
cout_mat(W2);
cout<<"loss"<<": ";
cout<<matrix_sum(mat_sq)/100<<endl;
}
}
```



## 演示:矩阵乘法

Matrix **A**:

| 第1列 | 第2列 | 第3列 | 第4列 | 第5列 |
| ------- | ------- | ------- | ------- | ------- |
| 72.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 64.0000 | 0.0000 | 0.0000 | 0.0000 |
| 16.0000 | 8.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 56.0000 | 16.0000 | 32.0000 |
| 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |

MAtrix **B**:

| 第1列 | 第2列 | 第3列 | 第4列 | 第5列 | 第6列 |
| ------- | ------- | ------- | ------- | ------ | ------ |
| 72.0000 | 0.0000 | 16.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 64.0000 | 8.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 56.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 16.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 32.0000 | 0.0000 | 0.0000 |

To

| 第1列 | 第2列 | 第3列 | 第4列 | 第5列 | 第6列 |
| --------- | --------- | --------- | --------- | ------ | ------ |
| 5184.0000 | 0.0000 | 1152.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 4096.0000 | 512.0000 | 0.0000 | 0.0000 | 0.0000 |
| 1152.0000 | 512.0000 | 320.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 4416.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |

## 演示: 矩阵展开(flatten).

double* flatten(Matrix mid1)

| 1 | 2 | 3 |
| :--: | :--: | :--: |
| 2 | 4 | 6 |
| 7 | 8 | 9 |

​ To

| 1 | 2 | 3 | 2 | 4 | 6 | 7 | 8 | 9 |
| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | :----------------- |
| | | | | | | | | Like numpy.flatten |

function:

## 演示: 邻接矩阵的参数定义:

​ Matrix appply(Matrix mid1,Matrix mid2,int axis = 0)

> 参数 axis=0 :

| 0 | 7 | 2 |
| ---- | ---- | ---- |
| 0 | 3 | 1 |
| 0 | 0 | 0 |
| 0 | 0 | 11 |
| 0 | 7 | 2 |
| 0 | 3 | 1 |
| 0 | 0 | 0 |
| 0 | 0 | 11 |
------

> axis = 1:

| 0 | 7 | 2 | 0 | 7 | 2 |
| ---- | ---- | ---- | ---- | ---- | ---- |
| 0 | 3 | 1 | 0 | 3 | 1 |
| 0 | 0 | 0 | 0 | 0 | 0 |
| 0 | 0 | 11 | 0 | 0 | 11 |

------

## 更新2019/11/18/00:12

- [x] read_csv 通过文件流读取逗号分隔符文件,返回一个自动计算长度的矩阵.

例如 CSV's head :

| -0.017612 | 14.053064 | 0 |
| --------- | --------- | ---- |
| -1.395634 | 4.662541 | 1 |
| -0.752157 | 6.53862 | 0 |
| -1.322371 | 7.152853 | 0 |
| 0.423363 | 11.054677 | 0 |
| 0.406704 | 7.067335 | 1 |

Get:

![](./picture/WX20191119-105411@2x.png)


## Logistic Regression demo base Edge:


```c
#include<iostream>
#include<ctime>
#include<string>
#include <time.h>
#include <math.h>
#include <fstream>
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
using namespace std;
clock_t start, stop;
double duration;
int main()
{
welcome();
string path = "./new_data2.csv";
Matrix data = read_csv(path);
Matrix bais = CreateMatrix(data.row,1);
data = appply(data,bais,1);
Matrix y = iloc(data,0,0,3,4);
Matrix x_1 = iloc(data,0,0,0,3);
Matrix x_2 = get_T(x_1);
double alpha = 0.002;
int max_epoch = 100;
Matrix weight = CreateMatrix(3,1);
change_va(weight,0,0,1);
change_va(weight,1,0,1);
change_va(weight,2,0,1);
int epoch = 0;
for(epoch = 0;epoch<=max_epoch;epoch++)
{
cout<<"-----------split-line-----------"<<endl;
Matrix temp_mul = mul(x_1,weight);
Matrix h =e_sigmoid(temp_mul);
Matrix error = subtract(y,h);
Matrix temp_update = mul(x_2,error);
Matrix updata = add(weight,times_mat(alpha,temp_update),0);
cout_mat(weight);
cout<<"epoch: "<<epoch<<" error: "<<matrix_sum(error)<<endl;
cout<<"-----------split-line-----------"<<endl;
}
stop = clock();
printf("%f\n", (double)(stop - start) / CLOCKS_PER_SEC);
return 0;
}
```
Something :

> 1. 矩阵元素默认为1
> 2. 使用位运算加速防止填充过大的数值,但是会损失一定精度,慎用.
> 3. 记得delete(matrix)在你使用完一个矩阵计算单元以后.
> 4. api接口更多的接近于pandas和numpy的使用习惯.
> 5. 更多的细节参见目前最新的代码
> 6. 欢迎star和关注.
> 7. autodiff部分感谢国外博主Omar的思路提醒.
>

------



<div align = center><img src = './picture/星月.svg'></div>


个人小站:[极度空间](http://likedge.top/)

作者邮箱:zk@likedge.top | edge@ibooker.org.cn

QQ:2533524298

+ 231
- 0
README_EN.md View File

@@ -0,0 +1,231 @@


<div align=center><img src="./picture/01.svg"/></div>

# Edge-Engine

## Edge : 一个开源的科学计算引擎



[![license](https://img.shields.io/github/license/mashape/apistatus.svg?maxAge=2592000) ](https://github.com/AllenZYJ/Edge-Computing-Engine/blob/add-license-1/LICENSE)![](https://img.shields.io/badge/Bulid-Version1.0-green.svg)

> 项目开始日期 : 2019/10/01

> 目前项目总代码 : 709 行
>
> 测试代码 : 810 行
>
> 测试环境:
>
> MacBook Pro
>
> 编译器环境:
>
> Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/c++/4.2.1
> Apple LLVM version 10.0.1 (clang-1001.0.46.4)
> Target: x86_64-apple-darwin18.7.0
> Thread model: posix
>
> 目前实现的:

How to install and run the demo:

`git clone git@github.com:AllenZYJ/Edge-Computing-Engine.git`

`cd to this dir `

`g++ main.cpp -o main`

`./main`

Matrix API:

- [x] Matrix read_csv(string &file_path)
- [x] Create a matrix : create(row,cols)
- [x] Change the element for matrix void move_ele(int &ele1, int &ele2)
- [x] Matrix1+Matrix2 : Matrix add(Matrix mid1,Matrix mid2,int flag=1)
- [x] Flag is how to compete the ele ,default 1 ,bitwise operation(位运算加速).
- [x] Matrix1-Matrix2 : Matrix subtract(Matrix mid1,Matrix mid2)
- [x] Matrix1*Matrix2 : Matrix mul(Matrix mid1,Matrix mid2)
- [x] Matrix1*n : Matrix times_mat(int times,Matrix mid1)
- [x] Matrix1's Transposition : Matrix get_T(Matrix mid1)
- [x] Mul(matrix1,matrix2)
- [x] double* flatten(Matrix mid1) : Return a flattened array.
- [x] Matrix matrix_rs(Matrix mid1,int rs_row,int rs_col)
- [x] double matrix_sum(Matrix mid1)
- [x] double matrix_mean(Matrix mid1)
- [x] Matrix appply(Matrix mid1,Matrix mid2,int axis = 0)
- [x] Matrix iloc(Matrix mid1,int start_x=0,int end_x=0,int start_y=0,int end_y=0)

## Demo: mat*mat

Matrix **A**:

| 第1列 | 第2列 | 第3列 | 第4列 | 第5列 |
| ------- | ------- | ------- | ------- | ------- |
| 72.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 64.0000 | 0.0000 | 0.0000 | 0.0000 |
| 16.0000 | 8.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 56.0000 | 16.0000 | 32.0000 |
| 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |

MAtrix **B**:

| 第1列 | 第2列 | 第3列 | 第4列 | 第5列 | 第6列 |
| ------- | ------- | ------- | ------- | ------ | ------ |
| 72.0000 | 0.0000 | 16.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 64.0000 | 8.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 56.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 16.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 32.0000 | 0.0000 | 0.0000 |

To

| 第1列 | 第2列 | 第3列 | 第4列 | 第5列 | 第6列 |
| --------- | --------- | --------- | --------- | ------ | ------ |
| 5184.0000 | 0.0000 | 1152.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 4096.0000 | 512.0000 | 0.0000 | 0.0000 | 0.0000 |
| 1152.0000 | 512.0000 | 320.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 4416.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |

## Demo : mat.flatten

double* flatten(Matrix mid1)

| 1 | 2 | 3 |
| :--: | :--: | :--: |
| 2 | 4 | 6 |
| 7 | 8 | 9 |

​ To

| 1 | 2 | 3 | 2 | 4 | 6 | 7 | 8 | 9 |
| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | :----------------- |
| | | | | | | | | Like numpy.flatten |

## Demo : apply nearly mat

​ Matrix appply(Matrix mid1,Matrix mid2,int axis = 0)

> if axis=0 :

| 0 | 7 | 2 |
| ---- | ---- | ---- |
| 0 | 3 | 1 |
| 0 | 0 | 0 |
| 0 | 0 | 11 |
| 0 | 7 | 2 |
| 0 | 3 | 1 |
| 0 | 0 | 0 |
| 0 | 0 | 11 |
------

> axis = 1:

| 0 | 7 | 2 | 0 | 7 | 2 |
| ---- | ---- | ---- | ---- | ---- | ---- |
| 0 | 3 | 1 | 0 | 3 | 1 |
| 0 | 0 | 0 | 0 | 0 | 0 |
| 0 | 0 | 11 | 0 | 0 | 11 |



- [x] read_csv

- [x] return a matrix

CSV head :

| -0.017612 | 14.053064 | 0 |
| --------- | --------- | ---- |
| -1.395634 | 4.662541 | 1 |
| -0.752157 | 6.53862 | 0 |
| -1.322371 | 7.152853 | 0 |
| 0.423363 | 11.054677 | 0 |
| 0.406704 | 7.067335 | 1 |

Get:

![](./picture/WX20191119-105411@2x.png)


## Test demo:

```c
#include<iostream>
#include<ctime>
#include<string>
#include <time.h>
#include <math.h>
#include <fstream>
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
using namespace std;
clock_t start, stop;
double duration;
int main()
{
welcome();
string path = "./new_data2.csv";
Matrix data = read_csv(path);
Matrix bais = CreateMatrix(data.row,1);
data = appply(data,bais,1);
Matrix y = iloc(data,0,0,3,4);
Matrix x_1 = iloc(data,0,0,0,3);
Matrix x_2 = get_T(x_1);
double alpha = 0.002;
int max_epoch = 100;
Matrix weight = CreateMatrix(3,1);
change_va(weight,0,0,1);
change_va(weight,1,0,1);
change_va(weight,2,0,1);
int epoch = 0;
for(epoch = 0;epoch<=max_epoch;epoch++)
{
cout<<"-----------split-line-----------"<<endl;
Matrix temp_mul = mul(x_1,weight);
Matrix h =e_sigmoid(temp_mul);
Matrix error = subtract(y,h);
Matrix temp_update = mul(x_2,error);
Matrix updata = add(weight,times_mat(alpha,temp_update),0);
cout_mat(weight);
cout<<"epoch: "<<epoch<<" error: "<<matrix_sum(error)<<endl;
cout<<"-----------split-line-----------"<<endl;
}
stop = clock();
printf("%f\n", (double)(stop - start) / CLOCKS_PER_SEC);
return 0;
}
```
Something :

> 1. Matrix'element is default 1
> 2. Dynamically allocate memory to prevent matrix from being too large
> 3. To save memory and delete later, use pointer to open up array space temporarily
> 4. if free please delete(matrix);
> 5. Api design like numpy or pandas
> 6. Talking is cheap u can get the code
> 7. welcome 🏃watched and star.
>

------



<div align = center><img src = './picture/星月.svg'></div>



个人小站:[极度空间](likedge.top)

作者邮箱:zk@likedge.top | edge@ibooker.org.cn

QQ:2533524298

+ 2079
- 0
a.csv
File diff suppressed because it is too large
View File


+ 16
- 0
autodiff/dor.h View File

@@ -0,0 +1,16 @@
#ifndef DYADIC_OPERATION_RESULT
#define DYADIC_OPERATION_RESULT

struct DyadicOperationResult {
double value;
double left_grad;
double right_grad;

DyadicOperationResult(double value, double left_grad, double right_grad){
this->value = value;
this->left_grad = left_grad;
this->right_grad = right_grad;
}
};

#endif /* end of include guard: DYADIC_OPERATION_RESULT */

+ 26
- 0
autodiff/graph.h View File

@@ -0,0 +1,26 @@
#ifndef GRAPH_H
#define GRAPH_H

#include <map>
#include <vector>
#include <utility>

class Graph {
private:
std::map<long int, std::vector<std::pair<double, long int> > > nodes;
static Graph* instance;
Graph();

public:
static long int uid_counter;
static long int uid();
static Graph* getInstance();

void connect(const long int& uid, const std::pair<double, long int>& edge);
std::vector<std::pair<double, long int> > get(const long int& uid) const;
bool has(const long int& uid) const;

void new_recording();
};

#endif /* end of include guard: GRAPH_H */

+ 14
- 0
autodiff/mor.h View File

@@ -0,0 +1,14 @@
#ifndef MONADIC_OPERATION_RESULT
#define MONADIC_OPERATION_RESULT

struct MonadicOperationResult {
double value;
double grad;

MonadicOperationResult(double value, double grad){
this->value = value;
this->grad = grad;
}
};

#endif /* end of include guard: MONADIC_OPERATION_RESULT */

+ 72
- 0
autodiff/node.h View File

@@ -0,0 +1,72 @@
#ifndef NODE_H
#define NODE_H

#include <cmath>
#include <iostream>

#include "graph.h"
#include "mor.h"
#include "dor.h"
#include "por.h"

class Node {
private:
double value;
long int uid;

double gradient_recursive(Graph* graph, const long int& current_uid, const long int& stop_uid) const;

public:
Node(const double& value=0);
Node(const Node& node);

static Node monadic_operation(const Node& n, MonadicOperationResult (*)(const double&));
static Node dyadic_operation(const Node& l, const Node& r, DyadicOperationResult (*)(const double&, const double&));
static Node polyadic_operation(const std::vector<Node>& nodes, PolyadicOperationResult (*)(const std::vector<double>&));

double gradient(const Node& node) const;
std::vector<double> gradient(const std::vector<Node>& nodes) const;
std::vector<std::vector<double> > gradient(const std::vector<std::vector<Node> >& nodes) const;

friend Node operator+(const Node& l, const Node& r);
friend Node operator-(const Node& l, const Node& r);
friend Node operator*(const Node& l, const Node& r);
friend Node operator/(const Node& l, const Node& r);

Node& operator+=(const Node& r);
Node& operator-=(const Node& r);
Node& operator*=(const Node& r);
Node& operator/=(const Node& r);

friend bool operator==(const Node& l, const Node& r);
friend bool operator<(const Node& l, const Node& r);
friend bool operator>(const Node& l, const Node& r);
friend bool operator<=(const Node& l, const Node& r);
friend bool operator>=(const Node& l, const Node& r);

friend Node sin(const Node& x);
friend Node cos(const Node& x);
friend Node tan(const Node& x);
friend Node sinh(const Node& x);
friend Node cosh(const Node& x);
friend Node tanh(const Node& x);
friend Node asin(const Node& x);
friend Node acos(const Node& x);
friend Node atan(const Node& x);

friend Node log(const Node& x, const Node& base);
friend Node log10(const Node& x);
friend Node ln(const Node& x);

friend Node pow(const Node& x, const Node& p);
friend Node exp(const Node& x);
friend Node sqrt(const Node& x);

friend Node abs(const Node& x);
friend Node min(const Node& l, const Node& r);
friend Node max(const Node& l, const Node& r);

friend std::ostream& operator<<(std::ostream& os, const Node& node);
};

#endif /* end of include guard: NODE_H */

+ 16
- 0
autodiff/por.h View File

@@ -0,0 +1,16 @@
#ifndef POLYADIC_OPERATION_RESULT
#define POLYADIC_OPERATION_RESULT

#include <vector>

struct PolyadicOperationResult {
double value;
std::vector<double> gradients;

PolyadicOperationResult(double value, const std::vector<double>& gradients){
this->value = value;
this->gradients = gradients;
}
};

#endif /* end of include guard: POLYADIC_OPERATION_RESULT */

+ 99
- 0
autodiff/vectmath.h View File

@@ -0,0 +1,99 @@
#ifndef VECTMATH
#define VECTMATH

#include <vector>
#include <cassert>
#include <iostream>
#include <algorithm>
#include <functional>

// dot product
template <typename T>
std::vector<std::vector<T> > dot(const std::vector<std::vector<T> >& a, const std::vector<std::vector<T> >& b){
assert(a[0].size()==b.size());

T w=0;
std::vector<std::vector<T> > result(a.size(), std::vector<T>(b[0].size()));
for (int i=0 ; i<a.size() ; i++){
for (int j=0 ; j<b[0].size() ; j++){
for (int h=0 ; h<b.size() ; h++){
w += a[i][h]*b[h][j];
}
result[i][j] = w;
w=0;
}
}

return result;
}

// operators
template <typename U, typename V>
std::vector<U>& operator-=(std::vector<U>& u, const std::vector<V>& v){
assert(u.size()==v.size());
for(size_t i=0 ; i<u.size() ; i++){
u[i] -= v[i];
}
return u;
}

template <typename U>
std::vector<U> operator+(const std::vector<U>& u, const std::vector<U>& v){
assert(u.size()==v.size());
std::vector<U> w(u.size());
for(size_t i=0 ; i<w.size() ; i++){
w[i] = u[i]+v[i];
}
return w;
}

template <typename U, typename S>
std::vector<U> operator*(const S& s, const std::vector<U>& u){
std::vector<U> result(u.size());
for(size_t i=0 ; i<u.size() ; i++){
result[i] = s*u[i];
}
return result;
}

template <typename U>
std::vector<U>& operator>>(U (*fun)(U&), std::vector<U>& u){
std::transform(u.begin(), u.end(), u.begin(), fun);
return u;
}

template <typename U, typename S>
std::vector<U>& operator>>(S (*fun)(S&), std::vector<U>& u){
for(auto& v : u){
fun >> v;
}
return u;
}

template <typename U>
std::vector<U>& operator>>(U (*fun)(), std::vector<U>& u){
for(auto& e : u){
e = fun();
}
return u;
}

template <typename U, typename S>
std::vector<U>& operator>>(S (*fun)(), std::vector<U>& u){
for(auto& v : u){
fun >> v;
}
return u;
}

template <typename U>
std::ostream& operator<<(std::ostream& os, const std::vector<U>& u){
os << "[";
for(size_t i=0 ; i<u.size() ; i++){
os << u[i] << (i<u.size()-1?", ":"");
}
os << "]";
return os;
}

#endif

+ 34
- 0
conv_.cpp View File

@@ -0,0 +1,34 @@
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20200925
*/
#include<iostream>
#include<ctime>
#include<string>
#include <time.h>
#include <math.h>
#include <fstream>
#include "./autodiff/node.h"
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
#include"./grad_edge/matrix_grad.h"
using namespace std;
clock_t start, stop;
double duration;
int main()
{
welcome();
conv_test(CreateRandMat(5,6),3,4,1,2,0,0);
return 0;

}

+ 1800
- 0
data/data.csv
File diff suppressed because it is too large
View File


+ 100
- 0
data/nerual_data.csv View File

@@ -0,0 +1,100 @@
-0.017612,14.053064,0
-1.395634,4.662541,1
-0.752157,6.53862,0
-1.322371,7.152853,0
0.423363,11.054677,0
0.406704,7.067335,1
0.667394,12.741452,0
-2.46015,6.866805,1
0.569411,9.548755,0
-0.026632,10.427743,0
0.850433,6.920334,1
1.347183,13.1755,0
1.176813,3.16702,1
-1.781871,9.097953,0
-0.566606,5.749003,1
0.931635,1.589505,1
-0.024205,6.151823,1
-0.036453,2.690988,1
-0.196949,0.444165,1
1.014459,5.754399,1
1.985298,3.230619,1
-1.693453,-0.55754,1
-0.576525,11.778922,0
-0.346811,-1.67873,1
-2.124484,2.672471,1
1.217916,9.597015,0
-0.733928,9.098687,0
-3.642001,-1.618087,1
0.315985,3.523953,1
1.416614,9.619232,0
-0.386323,3.989286,1
0.556921,8.294984,1
1.224863,11.58736,0
-1.347803,-2.406051,1
1.196604,4.951851,1
0.275221,9.543647,0
0.470575,9.332488,0
-1.889567,9.542662,0
-1.527893,12.150579,0
-1.185247,11.309318,0
-0.445678,3.297303,1
1.042222,6.105155,1
-0.618787,10.320986,0
1.152083,0.548467,1
0.828534,2.676045,1
-1.237728,10.549033,0
-0.683565,-2.166125,1
0.229456,5.921938,1
-0.959885,11.555336,0
0.492911,10.993324,0
0.184992,8.721488,0
-0.355715,10.325976,0
-0.397822,8.058397,0
0.824839,13.730343,0
1.507278,5.027866,1
0.099671,6.835839,1
-0.344008,10.717485,0
1.785928,7.718645,1
-0.918801,11.560217,0
-0.364009,4.7473,1
-0.841722,4.119083,1
0.490426,1.960539,1
-0.007194,9.075792,0
0.356107,12.447863,0
0.342578,12.281162,0
-0.810823,-1.466018,1
2.530777,6.476801,1
1.296683,11.607559,0
0.475487,12.040035,0
-0.783277,11.009725,0
0.074798,11.02365,0
-1.337472,0.468339,1
-0.102781,13.763651,0
-0.147324,2.874846,1
0.518389,9.887035,0
1.015399,7.571882,0
-1.658086,-0.027255,1
1.319944,2.171228,1
2.056216,5.019981,1
-0.851633,4.375691,1
-1.510047,6.061992,0
-1.076637,-3.181888,1
1.821096,10.28399,0
3.01015,8.401766,1
-1.099458,1.688274,1
-0.834872,-1.733869,1
-0.846637,3.849075,1
1.400102,12.628781,0
1.752842,5.468166,1
0.078557,0.059736,1
0.089392,-0.7153,1
1.825662,12.693808,0
0.197445,9.744638,0
0.126117,0.922311,1
-0.679797,1.22053,1
0.677983,2.556666,1
0.761349,10.693862,0
-2.168791,0.143632,1
1.38861,9.341997,0
0.317029,14.739025,0

+ 100
- 0
data/new_data2.csv View File

@@ -0,0 +1,100 @@
1,-0.017612,14.053064,0
1,-1.395634,4.662541,1
1,-0.752157,6.53862,0
1,-1.322371,7.152853,0
1,0.423363,11.054677,0
1,0.406704,7.067335,1
1,0.667394,12.741452,0
1,-2.46015,6.866805,1
1,0.569411,9.548755,0
1,-0.026632,10.427743,0
1,0.850433,6.920334,1
1,1.347183,13.1755,0
1,1.176813,3.16702,1
1,-1.781871,9.097953,0
1,-0.566606,5.749003,1
1,0.931635,1.589505,1
1,-0.024205,6.151823,1
1,-0.036453,2.690988,1
1,-0.196949,0.444165,1
1,1.014459,5.754399,1
1,1.985298,3.230619,1
1,-1.693453,-0.55754,1
1,-0.576525,11.778922,0
1,-0.346811,-1.67873,1
1,-2.124484,2.672471,1
1,1.217916,9.597015,0
1,-0.733928,9.098687,0
1,-3.642001,-1.618087,1
1,0.315985,3.523953,1
1,1.416614,9.619232,0
1,-0.386323,3.989286,1
1,0.556921,8.294984,1
1,1.224863,11.58736,0
1,-1.347803,-2.406051,1
1,1.196604,4.951851,1
1,0.275221,9.543647,0
1,0.470575,9.332488,0
1,-1.889567,9.542662,0
1,-1.527893,12.150579,0
1,-1.185247,11.309318,0
1,-0.445678,3.297303,1
1,1.042222,6.105155,1
1,-0.618787,10.320986,0
1,1.152083,0.548467,1
1,0.828534,2.676045,1
1,-1.237728,10.549033,0
1,-0.683565,-2.166125,1
1,0.229456,5.921938,1
1,-0.959885,11.555336,0
1,0.492911,10.993324,0
1,0.184992,8.721488,0
1,-0.355715,10.325976,0
1,-0.397822,8.058397,0
1,0.824839,13.730343,0
1,1.507278,5.027866,1
1,0.099671,6.835839,1
1,-0.344008,10.717485,0
1,1.785928,7.718645,1
1,-0.918801,11.560217,0
1,-0.364009,4.7473,1
1,-0.841722,4.119083,1
1,0.490426,1.960539,1
1,-0.007194,9.075792,0
1,0.356107,12.447863,0
1,0.342578,12.281162,0
1,-0.810823,-1.466018,1
1,2.530777,6.476801,1
1,1.296683,11.607559,0
1,0.475487,12.040035,0
1,-0.783277,11.009725,0
1,0.074798,11.02365,0
1,-1.337472,0.468339,1
1,-0.102781,13.763651,0
1,-0.147324,2.874846,1
1,0.518389,9.887035,0
1,1.015399,7.571882,0
1,-1.658086,-0.027255,1
1,1.319944,2.171228,1
1,2.056216,5.019981,1
1,-0.851633,4.375691,1
1,-1.510047,6.061992,0
1,-1.076637,-3.181888,1
1,1.821096,10.28399,0
1,3.01015,8.401766,1
1,-1.099458,1.688274,1
1,-0.834872,-1.733869,1
1,-0.846637,3.849075,1
1,1.400102,12.628781,0
1,1.752842,5.468166,1
1,0.078557,0.059736,1
1,0.089392,-0.7153,1
1,1.825662,12.693808,0
1,0.197445,9.744638,0
1,0.126117,0.922311,1
1,-0.679797,1.22053,1
1,0.677983,2.556666,1
1,0.761349,10.693862,0
1,-2.168791,0.143632,1
1,1.38861,9.341997,0
1,0.317029,14.739025,0

+ 107
- 0
data_struct/data_struct_pro.c View File

@@ -0,0 +1,107 @@
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20201213
*/
#include "data_struct_pro.h"
#define random(x) (rand()%x)
link_list::link_list()
{
cout<<"test"<<endl;
}


//init function on link_list
//n:len
link_list::link_list(int n)
{
this->len=n;
this->link_list_head =(lnode*)malloc(sizeof(lnode));
lnode *node1=(lnode*)malloc(sizeof(lnode));
lnode *p= this->link_list_head;
// this->link_list_head =new lnode(1,0);
for(int index_link_node=0;index_link_node<n;index_link_node++)
{
lnode *node1=(lnode*)malloc(sizeof(lnode));
p->next_index=node1;
p->data=index_link_node;
cout<<p->data<<endl;
cout<<p->next_index<<endl;
p=p->next_index;
// cout<<"Node:"<<this->link_list_head->next_index<<endl;

}
}
//travel around the link_list
void list_ergodic(link_list link_list_ergodic)
{
cout<<"len:"<<link_list_ergodic.len<<endl;
lnode *ldir_head=link_list_ergodic.link_list_head;
for(int i = 0;i<=link_list_ergodic.len;i++){
cout<<"list["<<i<<"]:"<<ldir_head->data<<endl;
ldir_head=ldir_head->next_index;
}
}
//insert element for what u input,in this index
void insert_element_ll(int index_toinsert,link_list ll_toinsert,int to_insertele)
{
lnode *insert_p=ll_toinsert.link_list_head;
for(int index_move = 0;index_move<ll_toinsert.len;index_move++){

if(index_move==index_toinsert-1)
{
lnode *node1=(lnode*)malloc(sizeof(lnode));
lnode *old_next_index=insert_p->next_index;
insert_p->next_index=node1;
insert_p->next_index->data=to_insertele;
node1->next_index=old_next_index;

}
insert_p=insert_p->next_index;
}
}
//delete the element from where you index
void dele_element_ll(int index_todele,link_list ll_todele)
{
lnode *dele_p=ll_todele.link_list_head;
for(int index_move=0;index_move<ll_todele.len;index_move++)
{
if(index_move==index_todele-1)
{
lnode *node1=(lnode*)malloc(sizeof(lnode));
lnode *old_index_to_dele=dele_p->next_index;
dele_p->next_index=old_index_to_dele->next_index;
}
dele_p=dele_p->next_index;
}

}
void insert_end_ll(int to_insert_end,link_list ll_toinsert_end)
{

lnode *end_p=ll_toinsert_end.link_list_head;
for(int index_move_toend=0;index_move_toend<=ll_toinsert_end.len;index_move_toend++)
{
cout<<index_move_toend<<endl;
if(index_move_toend==ll_toinsert_end.len)
{
lnode *node_to_insert_end=(lnode*)malloc(sizeof(lnode));
node_to_insert_end->data=to_insert_end;
cout<<index_move_toend<<endl;
end_p->next_index=node_to_insert_end;
}
end_p=end_p->next_index;
}
ll_toinsert_end.len+=1;
cout<<ll_toinsert_end.len<<endl;
//insert element to end of link_list

}


+ 47
- 0
data_struct/data_struct_pro.h View File

@@ -0,0 +1,47 @@
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20201213

Data_struct_define
link_list: ll
graph


---------------------------------------------------------------
if you have the better answer on it , it is nothing, just test~
---------------------------------------------------------------

*/
#ifndef DATA_STRUCT_PRO
#define DATA_STRUCT_PRO
// #pragma once

struct lnode{
int data;
lnode *next_index;
lnode(int n,lnode next) // 构造函数
{
next_index=&next;
data = n;
}
};
class link_list{
public:
int len;
lnode *NODE_link_list;
lnode *link_list_head;
link_list();
link_list(int n);
void list_ergodic(link_list link_list_ergodic);
void insert_element_ll(int index_toinsert,link_list ll_toinsert);
void dele_element_ll(int index_todele,link_list ll_todele);
void insert_end_ll(int to_insert_end,link_list ll_toinsert_end);
};
#endif

+ 93
- 0
data_struct_test.cpp View File

@@ -0,0 +1,93 @@
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20201213
*/

#include<iostream>
#include<ctime>
#include<string>
#include <time.h>
#include <math.h>
#include <fstream>
#include"./autodiff/node.h"
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
#include"./grad_edge/matrix_grad.h"
#include"./data_struct/data_struct_pro.h"
#include"./data_struct/data_struct_pro.c"
using namespace std;
int a[101],n;//定义全局变量,这两个变量需要在子函数中使用
int i,j,t;
//quick_sort
void quicksort(int left,int right)
{
int i,j,t,temp;
if(left>right)
return;

temp=a[left]; //temp中存的就是基准数
i=left;
j=right;
while(i!=j)
{
//顺序很重要,要先从右边开始找
while(a[j]>=temp && i<j)
j--;
//再找右边的
while(a[i]<=temp && i<j)
i++;
//交换两个数在数组中的位置
if(i<j)
{
t=a[i];
a[i]=a[j];
a[j]=t;
}
}
//最终将基准数归位
a[left]=a[i];
a[i]=temp;

quicksort(left,i-1);//继续处理左边的,这里是一个递归的过程
quicksort(i+1,right);//继续处理右边的 ,这里是一个递归的过程
}
int main()
{
// Node_link_list c(3);
int len=10;
link_list list1 = link_list(5);
//遍历
list_ergodic(list1);
cout<<"----------insert element------"<<endl;
insert_element_ll(1,list1,5);
list_ergodic(list1);
cout<<"-------delement_ll---------"<<endl;
dele_element_ll(119,list1);
list_ergodic(list1);
cout<<"----------------end insert------------------"<<endl;
insert_end_ll(9,list1);
list_ergodic(list1);
// return 1;
//quicksort

//读入数据
scanf("%d",&n);
for(i=1;i<=n;i++)
scanf("%d",&a[i]);
quicksort(1,n); //快速排序调用
//输出排序后的结果
for(i=1;i<=n;i++)
printf("%d ",a[i]);
getchar();getchar();
return 0;
}

+ 196
- 0
file_pro/data_read.h View File

@@ -0,0 +1,196 @@
#include"../matrix/matrix_pro.h"
#include"../matrix/matrix_def.h"
#include<iostream>
#include<fstream>
#include<string>
#include<typeinfo>
#include<cstring>
using namespace std;
string data;
double str2double(char *src)
{
double ret = 0,sign = 1;
char *p = src;
if(*p == '+'){sign = 1;p ++;}
else if(*p == '-'){sign = -1; p ++; }
while(*p && (*p != '.'))
{
ret*=10;
ret += (*p) - '0';
p++;
}
if(*p == '.')
{
double step = 0.1;
p++;
while(*p)
{
ret+=step*((*p)-'0');
step/=10;
p++;
}
}
return ret*sign;
}

str_Matrix read_file(string &file_path)
{
int count_rows = 0,count_col = 0;
ifstream infile;
infile.open(file_path);
while(!infile.eof())
{
count_col = 0;
count_rows+=1;
infile >> data;
int iSize = data.size();
int flag_if = 1;
for(int i = 0;i < iSize; i++)
{
if(data[i]==',')
{
count_col++;
}
}
}
cout<<count_rows<<endl;
cout<<count_col+1<<endl;
str_Matrix data_ma = CreateStr_Ma(1666,1666);
ifstream infile2;
infile2.open(file_path);
int next_flag = 0;
while(!infile2.eof())
{
infile2 >> data;
string base = "";
string added ="";
string added2 = "";
string base2 = "";
int iSize = data.size();
int flag_if = 1;
int count_times = 0;
int count_times2 = 0;
for(int i = 0;i < iSize; i++)
{
//cout<<data[i]<<endl;
if(data[i]!=',')
{
added+=data[i];
}
else if(data[i]==',')
{
count_times+=1;
base = added;
char *result = (char*)base.data();
//cout<<result<<endl;
data_ma.str_matrix[next_flag][count_times-1] = result;
added ="";
base = "";
}
if(count_times==count_col)
{
added2+=data[i+1];
base2 = added2;
char *result2 = (char*)base2.data();
//cout<<result2<<endl;
data_ma.str_matrix[next_flag][count_times] = result2;
}
}
next_flag+=1;
}
return data_ma;
}
Matrix read_csv(string &file_path)
{
int count_rows = 0,count_col = 0;
ifstream infile;
infile.open(file_path);
while(!infile.eof())
{
count_col = 0;
count_rows+=1;
infile >> data;
int iSize = data.size();
int flag_if = 1;
for(int i = 0;i < iSize; i++)
{
if(data[i]==',')
{
count_col++;
}
}
}
Matrix data_ma = CreateMatrix(count_rows,count_col+1);
ifstream infile2;
infile2.open(file_path);
int next_flag = 0;
while(!infile2.eof())
{
infile2 >> data;
string base = "";
string added ="";
string added2 = "";
string base2 = "";
int iSize = data.size();
int flag_if = 1;
int count_times = 0;
int count_times2 = 0;
for(int i = 0;i < iSize; i++)
{
if(data[i]!=',')
{
added+=data[i];
}
else if(data[i]==',')
{
count_times+=1;
base = added;
char *result = (char*)base.data();
data_ma.matrix[next_flag][count_times-1] = str2double(result);
added ="";
base = "";
}
if(count_times==count_col)
{
added2+=data[i+1];
base2 = added2;
char *result2 = (char*)base2.data();
data_ma.matrix[next_flag][count_times] = str2double(result2);
}
}
next_flag+=1;
}
return data_ma;
}
int save_txt(Matrix mid1,string path = "./",string delimiter = ",",string header="./")
{
int index_x,index_y;
ofstream fout(path,ios::app);
fout<<header<<endl;
for(index_x=0;index_x<mid1.row;index_x++)
{
for(index_y=0;index_y<mid1.col;index_y++)
{
fout<<mid1.matrix[index_x][index_y]<<delimiter;
}
fout<<endl;
}
fout.close();
return 0;
}
/*
int read_file(string path = "./",string delimiter = ",",string header = "./")
{
ystring list_word[]=[];
string *p = list_word;
for(int i = 0;i<5;i++)
{
*(list_word+i) = 1;
cout<<"*p("<<i<<"): ";
cout<<*(p+i)<<endl;
}
}
*/
//-----------split------------
//
//

+ 87
- 0
grad_edge/matrix_grad.h View File

@@ -0,0 +1,87 @@
#include<iostream>
#include<ctime>
#include<string>
#include <time.h>
#include <math.h>
#include <fstream>
#include"./autodiff/node.h"
#include"../matrix/matrix_def.h"
#include"../matrix/matrix_pro.h"
using namespace std;
float sigmoid(float x){
return 1/(1+exp(x));
}
Node sigmoid(Node z){
Node sigmoid = 1/(1+(1/exp(z)));
return sigmoid;
}
struct edge_network
{
edge_network(int input, int num_neuron){
}
Matrix forward(Matrix data,Matrix weights,Matrix bais)
{
// cout<<"data: ";
// cout_mat(data);
// cout<<"weights: "<<endl;
// cout_mat(weights);
Matrix output = mul(weights,data);
output = add(output,bais,0);
Matrix output1 = e_sigmoid(output);
// cout<<"----------forward data--------"<<endl;
// cout_mat(data);
// cout<<"---------weight1---------"<<endl;
// cout_mat(weights);
// cout<<"---------bais----------"<<endl;
// cout_mat(bais);
// cout<<"---------output--------"<<endl;
// cout_mat(output1);
return output1;
}
Matrix forward_without_act(Matrix data,Matrix weights,Matrix bais)
{
Matrix output = mul(weights,data);
output = add(output,bais,0);
// Matrix output1 = e_sigmoid(output);
// cout<<"----------forward data--------"<<endl;
// cout_mat(data);
// cout<<"---------weight1---------"<<endl;
// cout_mat(weights);
// cout<<"---------bais----------"<<endl;
// cout_mat(bais);
// cout<<"---------output--------"<<endl;
// cout_mat(output);
return output;
}
Matrix backward(Matrix grad_next, Matrix output_before,Matrix weights,Node p_(Node))
{
for(int index = 0;index<output_before.row;index++)
{
Node z = output_before.matrix[index][0];
Node anyone = p_(z);
change_va(output_before,index,0,anyone.gradient(z));
}
return mul_simple(mul(weights,grad_next),output_before);
}
Matrix end_layer_backward(Matrix label,Matrix acti_val,Node loss_fun(Node,Node),Node act_fun(Node))
{
Matrix loss_act = CreateMatrix(acti_val.row,acti_val.col);
Matrix act_output = CreateMatrix(acti_val.row,acti_val.col);
int index_x,index_y;
for(index_x=0;index_x<loss_act.row;index_x++)
{
Node t1 = label.matrix[index_x][0],z31 =acti_val.matrix[index_x][0];
Node a13 = 1/(1+(1/exp(z31)));
Node loss = loss_fun(t1,a13);
cout<<"loss:"<<loss<<endl;
Node act = act_fun(z31);
act_output.matrix[index_x][0] = act.gradient(z31);
loss_act.matrix[index_x][0] = loss.gradient(a13);
}
Matrix mid_grad_end = mul_simple(loss_act,act_output);
cout_mat(mid_grad_end);
return mid_grad_end;
}
};



BIN
image-20200128154352842.png View File

Before After
Width: 1430  |  Height: 752  |  Size: 174 kB

BIN
image-20200418210521131.png View File

Before After
Width: 1190  |  Height: 200  |  Size: 7.2 kB

+ 62
- 0
install_diff/Makefile View File

@@ -0,0 +1,62 @@
CXX = g++
BIN = bin
LIB = lib
LIB_NAME = autodiff
OBJS_GRADIENT_DESCENT = root/obj/node.o root/obj/graph.o examples/obj/gradient_descent.o
OBJS_ANN = root/obj/node.o root/obj/graph.o examples/obj/ann.o
OBJS_GRADIENT = root/obj/node.o root/obj/graph.o examples/obj/gradient.o
OBJS_SPEED = root/obj/node.o root/obj/graph.o examples/obj/speed.o
OBJS_SIMPLE = root/obj/node.o root/obj/graph.o examples/obj/simple.o

all : gradient_descent ann gradient speed simple

gradient_descent : $(BIN) root/include/vectmath.h root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/gradient_descent.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/gradient_descent $(OBJS_GRADIENT_DESCENT) $(LIBS)

ann : $(BIN) root/include/vectmath.h root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/ann.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/ann $(OBJS_ANN) $(LIBS)

gradient : $(BIN) root/include/vectmath.h root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/gradient.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/gradient $(OBJS_GRADIENT) $(LIBS)

speed : $(BIN) root/include/vectmath.h root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/speed.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/speed $(OBJS_SPEED) $(LIBS)

simple : $(BIN) root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/simple.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/simple $(OBJS_SIMPLE) $(LIBS)

$(BIN) :
if [ ! -d $(BIN) ]; then mkdir $(BIN); fi

$(LIB) :
if [ ! -d $(LIB) ]; then mkdir $(LIB); fi

clean :
$(MAKE) -C root clean
$(MAKE) -C examples clean
if [ -d $(BIN) ]; then rm $(BIN) -r; fi

install : $(LIB)
ar rcs $(LIB)/lib$(LIB_NAME).a root/obj/graph.o root/obj/node.o
if [ ! -d /usr/local/include/$(LIB_NAME) ]; then sudo mkdir /usr/local/include/$(LIB_NAME); fi
sudo cp $(LIB)/lib$(LIB_NAME).a /usr/local/lib
sudo cp root/include/*.h /usr/local/include/$(LIB_NAME)

.PHONY : all
.PHONY : gradient_descent
.PHONY : ann
.PHONY : gradient
.PHONY : speed
.PHONY : simple
.PHONY : clean
.PHONY : install

BIN
install_diff/bin/ann View File


BIN
install_diff/bin/gradient View File


BIN
install_diff/bin/gradient_descent View File


BIN
install_diff/bin/simple View File


BIN
install_diff/bin/speed View File


+ 31
- 0
install_diff/examples/Makefile View File

@@ -0,0 +1,31 @@
CXX = g++
ODIR = obj
CXXFLAGS = -std=c++11 -O3
OBJS = $(ODIR)/gradient_descent.o $(ODIR)/ann.o $(ODIR)/gradient.o $(ODIR)/speed.o $(ODIR)/simple.o

all : $(ODIR) $(OBJS)

$(ODIR)/gradient_descent.o : src/gradient_descent.cpp ../root/include/vectmath.h ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/ann.o : src/ann.cpp ../root/include/vectmath.h ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/gradient.o : src/gradient.cpp ../root/include/vectmath.h ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/speed.o : src/speed.cpp ../root/include/vectmath.h ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/simple.o : src/simple.cpp ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR) :
if [ ! -d $(ODIR) ]; then mkdir $(ODIR); fi

clean :
if [ -d $(ODIR) ]; then rm $(ODIR) -r; fi

.PHONY : all
.PHONY : clean


BIN
install_diff/examples/obj/ann.o View File


BIN
install_diff/examples/obj/gradient.o View File


BIN
install_diff/examples/obj/gradient_descent.o View File


BIN
install_diff/examples/obj/simple.o View File


BIN
install_diff/examples/obj/speed.o View File


+ 126
- 0
install_diff/examples/src/ann.cpp View File

@@ -0,0 +1,126 @@
#include <iostream>
#include <ctime>

#include "../../root/include/vectmath.h"
#include "../../root/include/node.h"

typedef std::vector<Node> Vector;
typedef std::vector<Vector> Matrix;

Node random_number(){
return rand()/(double)RAND_MAX;
}

Node tan_h(Node& x){
return (1-exp(-2*x))/(1+exp(-2*x));
}

Node mean_square_error(Vector& y_true, Vector& y_pred){
Node loss;
for(size_t i=0 ; i<y_true.size() ; i++){
loss += pow(y_true[i]-y_pred[i], 2);
}
return loss;
}

struct Layer {
Matrix weights;
Matrix bias;
Node (*activation)(Node&);
int input_shape;
int output_shape;

Layer(int input, int output, Node (*activation)(Node&)){
this->activation = activation;
this->input_shape = input;
this->output_shape = output;
weights.resize(input, Vector(output));
bias.resize(1, Vector(output));
random_number >> weights;
random_number >> bias;
}

Matrix forward(Matrix& previous){
Matrix output = dot(previous, weights) + bias;
output = activation >> output;
return output;
}

void backward(Node& loss, const float& learning_rate){
weights -= learning_rate*loss.gradient(weights);
bias -= learning_rate*loss.gradient(bias);
}
};

struct Network {
std::vector<Layer> layers;
int input_shape;
Graph* graph;

Network(){
graph = Graph::getInstance();
}

void input_layer(int input_shape){
this->input_shape = input_shape;
}

void add(int output_shape, Node (*activation)(Node&)){
int input = layers.empty()?input_shape:layers.back().output_shape;
layers.push_back(Layer(input, output_shape, activation));
}

Matrix run(Matrix& input){
Matrix output(input.size());
for(size_t j=0 ; j<input.size() ; j++){
Matrix out = {input[j]};
for(auto& lay : layers){
out = lay.forward(out);
}
output[j] = out[0];
}
return output;
}

void fit(Matrix& input, Matrix& output, Node (*loss_function)(Vector&, Vector&), int epochs, float learning_rate){
int p=0;
for(size_t i=0 ; i<epochs ; i++){
std::cout << "\r" << i+1 << "/" << epochs;
for(size_t j=0 ; j<input.size() ; j++){
// compute input
Matrix out = {input[j]};
for(auto& lay : layers){
out = lay.forward(out);
}

// compute loss
Node loss = loss_function(output[j], out[0]);

// update parameters
for(auto& lay : layers){
lay.backward(loss, learning_rate);
}

graph->new_recording();
}
}
std::cout << std::endl;
}
};

int main(int argc, char const *argv[]) {
srand(time(NULL));

Matrix input = {{0,0},{0,1},{1,0},{1,1}};
Matrix output = {{0},{1},{1},{0}};

Network network;
network.input_layer(2);
network.add(3, tan_h);
network.add(1, tan_h);
network.fit(input, output, mean_square_error, 500, 0.1);

Matrix pred = network.run(input);
std::cout << pred << std::endl;
return 0;
}

+ 15
- 0
install_diff/examples/src/gradient.cpp View File

@@ -0,0 +1,15 @@
#include <iostream>

#include "../../root/include/vectmath.h"
#include "../../root/include/node.h"

Node function(std::vector<Node> x){
return pow(x[0]-x[1], 2) + x[0]*x[1]*x[2]; // (x-y)^2 + x*y*z
}

int main(int argc, char const *argv[]) {
std::vector<Node> x = {5,6,7};
Node f = function(x);
std::cout << "grad(f) = " << f.gradient(x) << std::endl;
return 0;
}

+ 27
- 0
install_diff/examples/src/gradient_descent.cpp View File

@@ -0,0 +1,27 @@
#include <iostream>

#include "../../root/include/vectmath.h"
#include "../../root/include/node.h"

Node function(std::vector<Node>& x){
return pow(x[0], 2) + pow(x[1], 2); // x^2 + y^2
}

int main(int argc, char const *argv[]) {
Graph* graph = Graph::getInstance();

std::vector<Node> x = {50, 50};
Node f;

int epochs = 30;
float learning_rate = 0.1;
for(size_t i=0 ; i<epochs ; i++){
f = function(x);
x -= learning_rate*f.gradient(x);
graph->new_recording();
}

std::cout << "f = " << f << std::endl;
std::cout << "x = " << x << std::endl;
return 0;
}

BIN
install_diff/examples/src/ma View File


+ 13
- 0
install_diff/examples/src/simple.cpp View File

@@ -0,0 +1,13 @@
#include <iostream>
#include "../../root/include/node.h"

int main(int argc, char const *argv[]) {
Node x=2, y=3;
Node f = x*y + sin(x);

std::cout << "f(x,y) = x*y + sin(x)" << std::endl;
std::cout << "f(" << x << "," << y << ") = " << f << std::endl;
std::cout << "∂f/∂x = " << f.gradient(x) << std::endl;
std::cout << "∂f/∂y = " << f.gradient(y) << std::endl;
return 0;
}

+ 51
- 0
install_diff/examples/src/speed.cpp View File

@@ -0,0 +1,51 @@
#include <iostream>
#include <iomanip>
#include <cassert>
#include <ctime>
#include <chrono>

#include "../../root/include/vectmath.h"
#include "../../root/include/node.h"

template <class T>
std::vector<std::vector<T> > get_random_matrix(const int& height, const int& width, T t){
std::vector<std::vector<T> > mat(height, std::vector<T>(width));
for(auto& v : mat){
for(auto& e : v){
e = rand()/(double)RAND_MAX;
}
}
return mat;
}

int main(int argc, char const *argv[]) {
srand(time(0));

int size = 30;
std::vector<std::vector<double> > a = get_random_matrix(size, size, double());
std::vector<std::vector<double> > b = get_random_matrix(size, size, double());
std::vector<std::vector<Node> > c = get_random_matrix(size, size, Node());
std::vector<std::vector<Node> > d = get_random_matrix(size, size, Node());

std::cout << std::fixed;
std::cout << std::setprecision(10);

std::cout << "Running with double...\t";
std::cout.flush();
auto start = std::chrono::high_resolution_clock::now();
std::vector<std::vector<double> > ab = dot(a, b);
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl;

std::cout << "Running with Node...\t";
std::cout.flush();
start = std::chrono::high_resolution_clock::now();
std::vector<std::vector<Node> > cd = dot(c, d);
finish = std::chrono::high_resolution_clock::now();
elapsed = finish - start;
std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl;

std::cout << "Yet to be improved..." << std::endl;
return 0;
}

BIN
install_diff/lib/libautodiff.a View File


+ 22
- 0
install_diff/root/Makefile View File

@@ -0,0 +1,22 @@
CXX = g++
ODIR = obj
CXXFLAGS = -std=c++11 -O3
OBJS = $(ODIR)/graph.o $(ODIR)/node.o

all : $(ODIR) $(OBJS)

$(ODIR)/graph.o : src/graph.cpp include/graph.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/node.o : src/node.cpp include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR) :
if [ ! -d $(ODIR) ]; then mkdir $(ODIR); fi

clean :
if [ -d $(ODIR) ]; then rm $(ODIR) -r; fi

.PHONY : all
.PHONY : clean


+ 16
- 0
install_diff/root/include/dor.h View File

@@ -0,0 +1,16 @@
#ifndef DYADIC_OPERATION_RESULT
#define DYADIC_OPERATION_RESULT

struct DyadicOperationResult {
double value;
double left_grad;
double right_grad;

DyadicOperationResult(double value, double left_grad, double right_grad){
this->value = value;
this->left_grad = left_grad;
this->right_grad = right_grad;
}
};

#endif /* end of include guard: DYADIC_OPERATION_RESULT */

+ 26
- 0
install_diff/root/include/graph.h View File

@@ -0,0 +1,26 @@
#ifndef GRAPH_H
#define GRAPH_H

#include <map>
#include <vector>
#include <utility>

class Graph {
private:
std::map<long int, std::vector<std::pair<double, long int> > > nodes;
static Graph* instance;
Graph();

public:
static long int uid_counter;
static long int uid();
static Graph* getInstance();

void connect(const long int& uid, const std::pair<double, long int>& edge);
std::vector<std::pair<double, long int> > get(const long int& uid) const;
bool has(const long int& uid) const;

void new_recording();
};

#endif /* end of include guard: GRAPH_H */

+ 14
- 0
install_diff/root/include/mor.h View File

@@ -0,0 +1,14 @@
#ifndef MONADIC_OPERATION_RESULT
#define MONADIC_OPERATION_RESULT

struct MonadicOperationResult {
double value;
double grad;

MonadicOperationResult(double value, double grad){
this->value = value;
this->grad = grad;
}
};

#endif /* end of include guard: MONADIC_OPERATION_RESULT */

+ 72
- 0
install_diff/root/include/node.h View File

@@ -0,0 +1,72 @@
#ifndef NODE_H
#define NODE_H

#include <cmath>
#include <iostream>

#include "graph.h"
#include "mor.h"
#include "dor.h"
#include "por.h"

class Node {
private:
double value;
long int uid;

double gradient_recursive(Graph* graph, const long int& current_uid, const long int& stop_uid) const;

public:
Node(const double& value=0);
Node(const Node& node);

static Node monadic_operation(const Node& n, MonadicOperationResult (*)(const double&));
static Node dyadic_operation(const Node& l, const Node& r, DyadicOperationResult (*)(const double&, const double&));
static Node polyadic_operation(const std::vector<Node>& nodes, PolyadicOperationResult (*)(const std::vector<double>&));

double gradient(const Node& node) const;
std::vector<double> gradient(const std::vector<Node>& nodes) const;
std::vector<std::vector<double> > gradient(const std::vector<std::vector<Node> >& nodes) const;

friend Node operator+(const Node& l, const Node& r);
friend Node operator-(const Node& l, const Node& r);
friend Node operator*(const Node& l, const Node& r);
friend Node operator/(const Node& l, const Node& r);

Node& operator+=(const Node& r);
Node& operator-=(const Node& r);
Node& operator*=(const Node& r);
Node& operator/=(const Node& r);

friend bool operator==(const Node& l, const Node& r);
friend bool operator<(const Node& l, const Node& r);
friend bool operator>(const Node& l, const Node& r);
friend bool operator<=(const Node& l, const Node& r);
friend bool operator>=(const Node& l, const Node& r);

friend Node sin(const Node& x);
friend Node cos(const Node& x);
friend Node tan(const Node& x);
friend Node sinh(const Node& x);
friend Node cosh(const Node& x);
friend Node tanh(const Node& x);
friend Node asin(const Node& x);
friend Node acos(const Node& x);
friend Node atan(const Node& x);

friend Node log(const Node& x, const Node& base);
friend Node log10(const Node& x);
friend Node ln(const Node& x);

friend Node pow(const Node& x, const Node& p);
friend Node exp(const Node& x);
friend Node sqrt(const Node& x);

friend Node abs(const Node& x);
friend Node min(const Node& l, const Node& r);
friend Node max(const Node& l, const Node& r);

friend std::ostream& operator<<(std::ostream& os, const Node& node);
};

#endif /* end of include guard: NODE_H */

+ 16
- 0
install_diff/root/include/por.h View File

@@ -0,0 +1,16 @@
#ifndef POLYADIC_OPERATION_RESULT
#define POLYADIC_OPERATION_RESULT

#include <vector>

struct PolyadicOperationResult {
double value;
std::vector<double> gradients;

PolyadicOperationResult(double value, const std::vector<double>& gradients){
this->value = value;
this->gradients = gradients;
}
};

#endif /* end of include guard: POLYADIC_OPERATION_RESULT */

+ 99
- 0
install_diff/root/include/vectmath.h View File

@@ -0,0 +1,99 @@
#ifndef VECTMATH
#define VECTMATH

#include <vector>
#include <cassert>
#include <iostream>
#include <algorithm>
#include <functional>

// dot product
template <typename T>
std::vector<std::vector<T> > dot(const std::vector<std::vector<T> >& a, const std::vector<std::vector<T> >& b){
assert(a[0].size()==b.size());

T w=0;
std::vector<std::vector<T> > result(a.size(), std::vector<T>(b[0].size()));
for (int i=0 ; i<a.size() ; i++){
for (int j=0 ; j<b[0].size() ; j++){
for (int h=0 ; h<b.size() ; h++){
w += a[i][h]*b[h][j];
}
result[i][j] = w;
w=0;
}
}

return result;
}

// operators
template <typename U, typename V>
std::vector<U>& operator-=(std::vector<U>& u, const std::vector<V>& v){
assert(u.size()==v.size());
for(size_t i=0 ; i<u.size() ; i++){
u[i] -= v[i];
}
return u;
}

template <typename U>
std::vector<U> operator+(const std::vector<U>& u, const std::vector<U>& v){
assert(u.size()==v.size());
std::vector<U> w(u.size());
for(size_t i=0 ; i<w.size() ; i++){
w[i] = u[i]+v[i];
}
return w;
}

template <typename U, typename S>
std::vector<U> operator*(const S& s, const std::vector<U>& u){
std::vector<U> result(u.size());
for(size_t i=0 ; i<u.size() ; i++){
result[i] = s*u[i];
}
return result;
}

template <typename U>
std::vector<U>& operator>>(U (*fun)(U&), std::vector<U>& u){
std::transform(u.begin(), u.end(), u.begin(), fun);
return u;
}

template <typename U, typename S>
std::vector<U>& operator>>(S (*fun)(S&), std::vector<U>& u){
for(auto& v : u){
fun >> v;
}
return u;
}

template <typename U>
std::vector<U>& operator>>(U (*fun)(), std::vector<U>& u){
for(auto& e : u){
e = fun();
}
return u;
}

template <typename U, typename S>
std::vector<U>& operator>>(S (*fun)(), std::vector<U>& u){
for(auto& v : u){
fun >> v;
}
return u;
}

template <typename U>
std::ostream& operator<<(std::ostream& os, const std::vector<U>& u){
os << "[";
for(size_t i=0 ; i<u.size() ; i++){
os << u[i] << (i<u.size()-1?", ":"");
}
os << "]";
return os;
}

#endif

BIN
install_diff/root/obj/graph.o View File


BIN
install_diff/root/obj/node.o View File


+ 33
- 0
install_diff/root/src/graph.cpp View File

@@ -0,0 +1,33 @@
#include "../include/graph.h"

Graph* Graph::instance = 0;
long int Graph::uid_counter = 1;

long int Graph::uid(){
return uid_counter++;
}

Graph* Graph::getInstance(){
if(instance==NULL){
instance = new Graph();
}
return instance;
}

Graph::Graph(){}

void Graph::connect(const long int& uid, const std::pair<double, long int>& edge){
nodes[uid].push_back(edge);
}

std::vector<std::pair<double, long int> > Graph::get(const long int& uid) const{
return nodes.at(uid);
}

bool Graph::has(const long int& uid) const{
return nodes.find(uid)!=nodes.end();
}

void Graph::new_recording(){
nodes.clear();
}

+ 275
- 0
install_diff/root/src/node.cpp View File

@@ -0,0 +1,275 @@
#include "../include/node.h"

Node::Node(const double& value) {
this->value = value;
this->uid = Graph::uid();
}

Node::Node(const Node& node){
this->value = node.value;
this->uid = node.uid;
}

double Node::gradient_recursive(Graph* graph, const long int& current_uid, const long int& stop_uid) const{
if(current_uid==stop_uid){
return 1.0;
}

double sum=0.0;
if(graph->has(current_uid)){
for(auto& pair : graph->get(current_uid)){
sum += pair.first*gradient_recursive(graph, pair.second, stop_uid);
}
}
return sum;
}

double Node::gradient(const Node& node) const{
Graph* graph = Graph::getInstance();
return gradient_recursive(graph, this->uid, node.uid);
}

std::vector<double> Node::gradient(const std::vector<Node>& nodes) const{
Graph* graph = Graph::getInstance();
std::vector<double> grad(nodes.size());
for(size_t i=0 ; i<nodes.size() ; i++){
grad[i] = gradient_recursive(graph, this->uid, nodes[i].uid);
}
return grad;
}

std::vector<std::vector<double> > Node::gradient(const std::vector<std::vector<Node> >& nodes) const{
Graph* graph = Graph::getInstance();
std::vector<std::vector<double> > grad(nodes.size());
for(size_t i=0 ; i<nodes.size() ; i++){
grad[i].resize(nodes[i].size());
for(size_t j=0 ; j<nodes[i].size() ; j++){
grad[i][j] = gradient_recursive(graph, this->uid, nodes[i][j].uid);
}
}
return grad;
}

Node Node::monadic_operation(const Node& n, MonadicOperationResult (*fun)(const double&)){
MonadicOperationResult res = fun(n.value);
Node result(res.value);
Graph* graph = Graph::getInstance();
graph->connect(result.uid, std::make_pair(res.grad, n.uid));
return result;
}

Node Node::dyadic_operation(const Node& left, const Node& right, DyadicOperationResult (*fun)(const double&, const double&)){
DyadicOperationResult res = fun(left.value, right.value);
Node result(res.value);
Graph* graph = Graph::getInstance();
graph->connect(result.uid, std::make_pair(res.left_grad, left.uid));
graph->connect(result.uid, std::make_pair(res.right_grad, right.uid));
return result;
}

Node Node::polyadic_operation(const std::vector<Node>& nodes, PolyadicOperationResult (*fun)(const std::vector<double>&)){
std::vector<double> values(nodes.size());
for(size_t i=0 ; i<nodes.size() ; i++){
values[i] = nodes[i].value;
}
PolyadicOperationResult res = fun(values);
Node result(res.value);
Graph* graph = Graph::getInstance();
for(size_t i=0 ; i<nodes.size() ; i++){
graph->connect(result.uid, std::make_pair(res.gradients[i], nodes[i].uid));
}
return result;
}

Node operator+(const Node& left, const Node& right){
return Node::dyadic_operation(left, right, [](const double& l, const double& r){
return DyadicOperationResult(l+r, 1.0, 1.0);
});
}

Node operator-(const Node& left, const Node& right){
return Node::dyadic_operation(left, right, [](const double& l, const double& r){
return DyadicOperationResult(l-r, 1.0, -1.0);
});
}

Node operator*(const Node& left, const Node& right){
return Node::dyadic_operation(left, right, [](const double& l, const double& r){
return DyadicOperationResult(l*r, r, l);
});
}

Node operator/(const Node& left, const Node& right){
return Node::dyadic_operation(left, right, [](const double& l, const double& r){
return DyadicOperationResult(l/r, 1.0/r, -1.0*l/(r*r));
});
}

Node& Node::operator+=(const Node& r){
*this = *this + r;
return *this;
}

Node& Node::operator-=(const Node& r){
*this = *this - r;
return *this;
}

Node& Node::operator*=(const Node& r){
*this = *this * r;
return *this;
}

Node& Node::operator/=(const Node& r){
*this = *this / r;
return *this;
}

bool operator==(const Node& left, const Node& right){
return left.value==right.value;
}

bool operator<(const Node& left, const Node& right){
return left.value<right.value;
}

bool operator>(const Node& left, const Node& right){
return left.value>right.value;
}

bool operator<=(const Node& left, const Node& right){
return left.value<=right.value;
}

bool operator>=(const Node& left, const Node& right){
return left.value>=right.value;
}

Node sin(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::sin(n), ::cos(n));
});
}

Node cos(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::cos(n), -1.0*::sin(n));
});
}

Node tan(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::tan(n), 1.0/::pow(::cos(n), 2));
});
}

Node sinh(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::sinh(n), ::cosh(n));
});
}

Node cosh(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::cosh(n), ::sinh(n));
});
}

Node asin(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::asin(n), 1.0/(::sqrt(1-n*n)));
});
}

Node acos(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::acos(n), -1.0/(::sqrt(1-n*n)));
});
}

Node atan(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::atan(n), 1.0/(1+n*n));
});
}

Node tanh(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::tanh(n), 1.0-::pow(::tanh(n), 2));
});
}

Node log(const Node& x, const Node& base){
return Node::dyadic_operation(x, base, [](const double& a, const double& b){
return DyadicOperationResult(::log(a)/::log(b), 1.0/(a*::log(b)), -1.0*::log(a)/(b*::log(b)));
});
}

Node log10(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::log(n)/::log(10), 1.0/(n*::log(10)));
});
}

Node ln(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::log(n), 1.0/::log(n));
});
}

Node pow(const Node& x, const Node& base){
return Node::dyadic_operation(x, base, [](const double& a, const double& b){
if(a<=0){
return DyadicOperationResult(::pow(a,b), b*::pow(a,b-1), 0);
}
return DyadicOperationResult(::pow(a,b), b*::pow(a,b-1), ::log(a)*::pow(a,b));
});
}

Node exp(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::exp(n), ::exp(n));
});
}

Node sqrt(const Node& x){
return Node::monadic_operation(x, [](const double& n){
return MonadicOperationResult(::sqrt(n), 1.0/(2*::sqrt(n)));
});
}

Node abs(const Node& x){
return Node::monadic_operation(x, [](const double& n){
int sign = n==0 ? 0 : n/::abs(n);
return MonadicOperationResult(::abs(n), sign);
});
}

Node min(const Node& left, const Node& right){
return Node::dyadic_operation(left, right, [](const double& a, const double& b){
if(a<b){
return DyadicOperationResult(a, 1, 0);
}
if(a>b){
return DyadicOperationResult(b, 0, 1);
}
return DyadicOperationResult(a, 0, 0);
});
}

Node max(const Node& left, const Node& right){
return Node::dyadic_operation(left, right, [](const double& a, const double& b){
if(a>b){
return DyadicOperationResult(a, 1, 0);
}
if(a<b){
return DyadicOperationResult(b, 0, 1);
}
return DyadicOperationResult(a, 0, 0);
});
}

std::ostream& operator<<(std::ostream& os, const Node& node){
os << node.value;
return os;
}

BIN
logistic/.logistic_def.h.swp View File


BIN
logistic/log View File


+ 20
- 0
logistic/logistic_def.cpp View File

@@ -0,0 +1,20 @@
#include<iostream>
#include"../matrix/matrix_def.cpp"
#include"../welcome/score_wel.cpp"
#include"../matrix/matrix_pro.cpp"
#include<math.h>
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20200925
*/
double sigmoid_def()
{
cout<<exp(2)<<endl;
}

+ 35
- 0
logistic/logistic_def.h View File

@@ -0,0 +1,35 @@
#include"../matrix/matrix_def.h"
#include"../matrix/matrix_pro.h"
#include<math.h>
#include<iostream>
using namespace std;
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20200925
*/
double get_sigmoid(double temp)
{
// cout<<"temp"<<temp<<endl;
return 1/1+exp(0-temp);
}
Matrix e_sigmoid(Matrix mid1)
{
Matrix result = CreateMatrix(mid1.row,mid1.col);
for(int index_x = 0;index_x<mid1.row;index_x++)
{
for(int index_y=0;index_y<mid1.col;index_y++)
{
// cout<<"ex:"<<get_sigmoid(mid1.matrix[index_x][index_y])<<endl;
result.matrix[index_x][index_y]= 1.0/(1+exp(-mid1.matrix[index_x][index_y]));
}
}
return result;
}


+ 96
- 0
main.cpp View File

@@ -0,0 +1,96 @@
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20200925
*/

#include<iostream>
#include<ctime>
#include<string>
#include <time.h>
#include <math.h>
#include <fstream>
#include "./autodiff/node.h"
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
#include"./grad_edge/matrix_grad.h"
#include"./data_struct/data_struct_pro.h"
using namespace std;
clock_t start, stop;
double duration;
Node z=1;
Node t1 = 1,a13 = 1;
/*自定义的损失函数*/
Node loss_act(Node t1,Node a13)
{
Node loss = 0.5*(pow((t1-a13),2));
return loss;
}
Node sigmoid_act(Node z)
{
Node sigmoid_act = 1/(1+(1/exp(z)));
return sigmoid_act;
}
//loss
Node (*loss)(Node,Node) = loss_act;
Node (*act)(Node) = sigmoid_act;
int main()
{
Matrix data_1 = ones(1,3);
Matrix data_2 = ones(3,1);
cout_mat(mul(data_1,data_2));
welcome();
//begin
cout<<"auto build on gcc"<<endl;
cout<<"---------autodiff for neraul network-----------"<<endl;
Matrix data_mine = ones(3,3);
cout<<"data mine"<<endl;
cout_mat(data_mine);
cout<<"data mine"<<endl;
Matrix label = CreateRandMat(3,1);
cout_mat(label);
Matrix weight1 = CreateRandMat(3,3);
cout<<"weight"<<endl;
cout_mat(weight1);
Matrix bais1 = ones(3,1);
cout_mat(bais1);

Matrix weight2 = CreateRandMat(3,3);
Matrix bais2 = ones(3,1);
for(int epoch = 0;epoch<10;epoch++)
{
cout<<"---------epoch: "<<epoch<<"------------"<<endl;
cout_mat(weight1);
int input_dim = 3;
int output_dim = 3;
edge_network sequaltial(input_dim,output_dim);
// define the network
Matrix output1 = sequaltial.forward(data_mine,weight1,bais1);
Matrix output1_without_act = sequaltial.forward_without_act(data_mine,weight1,bais1);
// layer1
Matrix output2 = sequaltial.forward(output1,weight2,bais2);
Matrix output2_without_act = sequaltial.forward_without_act(output1,weight2,bais2);
// layer2
Matrix output_end = sequaltial.end_layer_backward(label,output2_without_act,*loss,*act);
// layer3
Matrix backward3 = sequaltial.backward(output_end,output1_without_act,weight2,*act);
// bp
Matrix weight_2_grad = mul(output_end,get_T(output1));
Matrix weight_1_grad = mul(backward3,get_T(data_mine));
weight1 = subtract(weight1,times_mat(0.001,weight_1_grad));
bais1 = subtract(bais1,times_mat(0.001,backward3));
weight2 = subtract(weight2,times_mat(0.001,weight_2_grad));
bais2 = subtract(bais2,times_mat(0.001,output_end));
cout<<"neraul end;"<<endl;
}
return 0;
}

BIN
matrix/.matrix_pro.h.swp View File


+ 15
- 0
matrix/conv_test.cpp View File

@@ -0,0 +1,15 @@
#include<iostream>
#include<stdlib.h>
#include<string>
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20200925
*/
int main()

+ 121
- 0
matrix/matrix_def.h View File

@@ -0,0 +1,121 @@
#ifndef MATRIX_DEF
#define MATRIX_DEF
#pragma once
#include<iostream>
#include<sys/malloc.h>
#include<stdio.h>
#include<string>
using namespace std;
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20200925
*/
//CreateMatrix Create a Matrix for ones;
//ex:
//2*3
//0 0 0
//0 0 0
//change the value in matrix
typedef struct
{
int row,col;
float **matrix;
}Matrix;
typedef struct
{
int row,col;
string **str_matrix;
}str_Matrix;
str_Matrix CreateStr_Ma(int ro, int co)
{
str_Matrix str_arr;
int row,col;
string **designma;
designma = (string**)malloc(ro*sizeof(string*));
for(int i = 0;i<ro;i++)
{
designma[i] = (string*)malloc(co*sizeof(string));
}
for(int i = 0;i <ro;i++)
{
for(int j =0;j<co;j++)
{
//cout<<"designma"<<designma[i][j]<<endl;
designma[i][j] = "edge";
}
}
str_arr.row = ro;
str_arr.col = co;
str_arr.str_matrix = designma;
return str_arr;
}
Matrix CreateMatrix(int ro,int co)
{
Matrix m;
int row,col;
float **inputMatrix;
inputMatrix=(float**)malloc(ro*sizeof(float*));
for(int i = 0;i<ro;i++)
{
inputMatrix[i]=(float*)malloc(co*sizeof(float));
}
for(int i=0; i<ro; i++)
{
for(int j=0; j<co; j++)
{
inputMatrix[i][j] = 0;
}
}
m.col = co;
m.row = ro;
m.matrix = inputMatrix;
return m;
}
Matrix ones(int ro,int co)
{
Matrix m;
int row,col;
float **inputMatrix;
inputMatrix=(float**)malloc(ro*sizeof(float*));
for(int i = 0;i<ro;i++)
{
inputMatrix[i]=(float*)malloc(co*sizeof(float));
}
for(int i=0; i<ro; i++)
{
for(int j=0; j<co; j++)
{
inputMatrix[i][j] = 1;
}
}
m.col = co;
m.row = ro;
m.matrix = inputMatrix;
return m;
}

void cout_strmat(str_Matrix mid1)
{
for(int index_x = 0;index_x<mid1.row;index_x++)
{
for(int index_y=0;index_y<mid1.col;index_y++)
{
cout<<mid1.str_matrix[index_x][index_y]<<",";
}
cout<<endl;
}
}
int change_va(Matrix Matrix ,int index_x,int index_y,float value)
{
Matrix.matrix[index_x][index_y] = value;
return 0;
}

#endif

+ 442
- 0
matrix/matrix_pro.h View File

@@ -0,0 +1,442 @@
/*
███████╗██████╗ ██████╗ ███████╗ ███████╗███╗ ██╗ ██████╗ ██╗███╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝████╗ ██║██╔════╝ ██║████╗ ██║██╔════╝
█████╗ ██║ ██║██║ ███╗█████╗ █████╗ ██╔██╗ ██║██║ ███╗██║██╔██╗ ██║█████╗
██╔══╝ ██║ ██║██║ ██║██╔══╝ ██╔══╝ ██║╚██╗██║██║ ██║██║██║╚██╗██║██╔══╝
███████╗██████╔╝╚██████╔╝███████╗ ███████╗██║ ╚████║╚██████╔╝██║██║ ╚████║███████╗
╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝╚═╝ ╚═══╝╚══════╝
Author:Edge
Web:likedge.top
Date:20200925

matrix add
flag stand use binary or not default 1

move_ele change the x,y to y,x

get_T :get the matrix transpose
input:Matrix a;
output:Matrix a.transpose

and -- just test
---------------------------------------------------------------
if you have the better answer on it , it is nothing, just test~
---------------------------------------------------------------

*/
#ifndef MATRIX_PRO
#define MATRIX_PRO
#include<iostream>
#include<stdlib.h>
#include<string>
#define random(x) (rand()%x)

void move_ele(int &ele1, int &ele2)
{
ele1 ^= ele2 ^= ele1 ^= ele2;
}

int add_by(int a,int b)
{
int ans;
while(b)
{
ans = a^b;
b = ((a&b)<<1);
a = ans;
}
return a;
}

//two matrix add
Matrix add(Matrix mid1,Matrix mid2,int flag=1)
{
if(mid1.row == mid2.row&&mid1.col == mid2.col)
{
for(int x = 0;x<mid1.row;x++)
{
for(int y = 0;y<mid1.col;y++)
{
if(flag==1)
mid1.matrix[x][y]=add_by(mid1.matrix[x][y],mid2.matrix[x][y]);
else
mid1.matrix[x][y]+=mid2.matrix[x][y];
}
}
}
else{return mid1;}
return mid1;
}

//subtract use default

Matrix subtract(Matrix mid1,Matrix mid2)
{
Matrix result_subtract = CreateMatrix(mid1.row,mid2.col);
for(int i=0; i<mid1.row; i++)
{
for(int j=0; j<mid1.col; j++)
{
result_subtract.matrix[i][j] = mid1.matrix[i][j]-mid2.matrix[i][j];
}
}
return result_subtract;
}

Matrix mul(Matrix mid1,Matrix mid2)
{
Matrix mid3 = CreateMatrix(mid1.row,mid2.col);
for(int i = 0;i<mid1.row;i++)
{
for(int j = 0;j<mid2.col;j++)
{
for(int k = 0;k<mid1.col;k++)
{
mid3.matrix[i][j]=mid3.matrix[i][j]+mid1.matrix[i][k]*mid2.matrix[k][j];
}
}
}
return mid3;
}
Matrix times_mat(double times,Matrix mid1)
{
for(int index_x=0; index_x<mid1.row; index_x++)
{
for(int index_y=0; index_y<mid1.col; index_y++)
{
mid1.matrix[index_x][index_y] = mid1.matrix[index_x][index_y]*times;
}
}
return mid1;
}


Matrix get_T(Matrix mid1)
{
Matrix mid2 = CreateMatrix(mid1.col,mid1.row);
for(int index_x = 0;index_x<mid1.row;index_x++)
{
for(int index_y=0;index_y<mid1.col;index_y++)
{
mid2.matrix[index_y][index_x]=mid1.matrix[index_x][index_y];
}
}
return mid2;
}


double* flatten(Matrix mid1)
{
int size = mid1.row*mid1.col;
double balance[size];
double *p;
p = balance;
for ( int i = 0; i <size; i++ )
{
int index_x = i/mid1.col;
int index_y = i%mid1.col;
*(p+i) = mid1.matrix[index_x][index_y];
}
return balance;
}


Matrix matrix_rs(Matrix mid1,int rs_row,int rs_col)
{

int size = mid1.row*mid1.col;
double balance[size];
double *p;
p = balance;
int index_x=0,index_y=0;
Matrix result = CreateMatrix(rs_row,rs_col);
for ( int i = 0; i <size; i++ )
{
int index_x = i/mid1.col;
int index_y = i%mid1.col;
*(p+i) = mid1.matrix[index_x][index_y];
}

int added = mid1.row*mid1.col;
for(int i = 0;i<size;i++)
{
if(index_y == rs_col)
{
index_y=0;
index_x+=1;
}
result.matrix[index_x][index_y]=*(p+i);
index_y++;
}
return result;
}


double matrix_sum(Matrix mid1)
{
double sum = 0;
for(int index_x = 0;index_x<mid1.row;++index_x)
{
for(int index_y=0;index_y<mid1.col;++index_y)
{
sum+=mid1.matrix[index_x][index_y];
}
}
return sum;
}
double matrix_mean(Matrix mid1)
{
double ele = mid1.row*mid1.col;
return matrix_sum(mid1)/ele;
}
Matrix mat_apply(Matrix mid1,Matrix mid2,int axis = 0)
{
int new_row = mid1.row+mid2.row;
int new_col = mid1.col+mid2.col;
Matrix result;
if(axis ==1)
{result = CreateMatrix(mid1.row,new_col);}
else{result = CreateMatrix(new_row+1,mid2.col);}
for(int index_x = 0;index_x<mid1.row;++index_x)
{
for(int index_y=0;index_y<mid1.col;++index_y)
{
result.matrix[index_x][index_y] = mid1.matrix[index_x][index_y];
}
}
if(axis == 1)
{
for(int index_row=0;index_row<mid2.row;++index_row)
{
for(int index_col = mid1.col;index_col<new_col;++index_col)
{
result.matrix[index_row][index_col]= mid2.matrix[index_row][mid2.col-new_col+index_col];
}
}
}
else
{
for(int index_row=mid1.row;index_row<new_row;++index_row)
{
for(int index_col = 0;index_col<mid2.col;++index_col)
{
result.matrix[index_row][index_col]= mid2.matrix[index_row-mid2.row][index_col];
}
}
}
return result;
}
Matrix head(Matrix mid1)
{
int col = mid1.col;
Matrix mid_return = CreateMatrix(6,mid1.col);
for(int index_x = 0;index_x<6;++index_x)
{
for(int index_y=0;index_y<col;++index_y)
{
mid_return.matrix[index_x][index_y] = mid1.matrix[index_x][index_y];
}
}
return mid_return;
}
void cout_mat(Matrix mid1)
{
for(int index_x = 0;index_x<mid1.row;index_x++)
{
for(int index_y=0;index_y<mid1.col;index_y++)
{
cout<<mid1.matrix[index_x][index_y]<<",";
}
cout<<endl;
}
}
//iloc [x1,x2,y1,y2]
Matrix iloc(Matrix mid1,int start_x=0,int end_x=0,int start_y=0,int end_y=0)
{
if(end_y == 0)
{
end_y = mid1.col;
}
else if(end_x == 0)
{
end_x = mid1.row;
}
// cout<<mid1.row<<end_x<<" "<<start_x<<" "<<end_y<<" "<<start_y<<endl;
int new_row = end_x-start_x;
int new_col = end_y-start_y;
// cout<<"nc"<<new_col<<" nr"<<new_row<<endl;
Matrix mid_return = CreateMatrix(new_row,new_col);
for(int index_x = start_x;index_x<end_x;++index_x)
{
for(int index_y=start_y;index_y<end_y;++index_y)
{
mid_return.matrix[index_x-start_x][index_y-start_y] = mid1.matrix[index_x][index_y];
}
}
return mid_return;
}
Matrix mul_simple(Matrix mid1,Matrix mid2)
{
if(mid1.row != mid2.row||mid1.col != mid2.col)
{
cout<<"Error: shape A&B"<<endl;
return mid1;
}
Matrix result = CreateMatrix(mid1.row,mid1.col);
for(int index_x = 0;index_x<mid1.row;index_x++)
{
for(int index_y=0;index_y<mid1.col;index_y++)
{
result.matrix[index_x][index_y] = mid1.matrix[index_x][index_y]*mid2.matrix[index_x][index_y];
}
}
return result;
}
Matrix CreateRandMat(int x_dim,int y_dim)
{
int index_x,index_y;
Matrix result = CreateMatrix(x_dim,y_dim);
for(index_x = 0;index_x<x_dim;++index_x)
{
for(index_y=0;index_y<y_dim;++index_y)
{
double temp_val = (rand()%30000)*0.0001-1;
change_va(result,index_x,index_y,temp_val);
}
}
return result;
}
double edge_relu(double val_relu)
{
if(val_relu>0)
{return val_relu;}
else
{return 0;}
}
Matrix mat_relu(Matrix mid1)
{
Matrix relu_mat = CreateMatrix(mid1.row,mid1.col);
for(int index_x = 0;index_x<mid1.row;++index_x)
for(int index_y = 0;index_y<mid1.row;++index_y)
{
{
relu_mat.matrix[index_x][index_y] = edge_relu(mid1.matrix[index_x][index_y]);

}
}
return relu_mat;
}
Matrix mat_sq_loss(Matrix mid1,Matrix mid2)
{
int index_x,index_y;
Matrix mat_sq = CreateMatrix(mid1.row,1);
for(index_x = 0;index_x<mid1.row;++index_x)
{
mat_sq.matrix[index_x][0] = pow(mid1.matrix[index_x][0]-mid2.matrix[index_x][0],2);
}
return mat_sq;
}
Matrix padding(Matrix mid1,int shape1,int shape2)
{
Matrix result = CreateMatrix(shape1,shape2);
for(int index_x = 0;index_x<shape1;index_x++)
{
for(int index_y = 0;index_y<shape2;index_y++)
{
result.matrix[index_x][index_y] = mid1.matrix[index_x][0];
}
}
return result;
}
Matrix get_row(Matrix mid1,int index)
{
int index_y = 0;
Matrix mid2 = CreateMatrix(1,mid1.col);
for(index_y=0;index_y<mid1.col;index_y++)
{
mid2.matrix[0][index_y] = mid1.matrix[index][index_y];
}
return mid2;
}
Matrix conv_element(Matrix mid1,Matrix kernel,int kernel_size = 2,int stride = 1)
{
Matrix conv_result = CreateMatrix(((mid1.row-kernel_size)/stride)+1,((mid1.col-kernel_size)/stride)+1);
for(int x_ = 0;x_<=(mid1.row-kernel_size)/stride;x_+=stride)
{
for(int y_ = 0;y_<=(mid1.col-kernel_size)/stride;y_+=stride)
{
Matrix crop_pic = iloc(mid1,x_,x_+kernel.col,y_,y_+kernel.row);
change_va(conv_result,x_,y_,matrix_sum(mul_simple(crop_pic,kernel)));
}
}
// cout<<"row: "<<conv_result.row<<" , "<<"col: "<<conv_result.col<<endl;
// cout_mat(conv_result);
return conv_result;
}
/*
parameter:
Matrix mid1,
int input_dim = 3
int output_channels = 3
int stride = 1
int kernel_size = 2
int mode = 0
int padding = 0
*/
double conv_test(Matrix mid1,int input_dim = 3,int output_channels = 3,int stride = 1,int kernel_size = 2,int mode = 0,int padding = 0)
{
// cout_mat(mid1);
Matrix mid_rgb[input_dim];
for(int rgb_idx = 0;rgb_idx<input_dim;rgb_idx++)
{
mid_rgb[rgb_idx] = CreateRandMat(mid1.row,mid1.col);

}
Matrix filters[output_channels][input_dim];
for(int channel_index = 0;channel_index<input_dim;channel_index++)
{
for(int filter_index = 0;filter_index<output_channels;filter_index++)
{
Matrix kernel = ones(kernel_size,kernel_size);
filters[channel_index][filter_index] = kernel;
// cout<<"---------"<<endl;
// cout<<"channel: "<<channel_index<<", index: "<<filter_index<<endl;
// cout_mat(filters[channel_index][filter_index]);
}
}
if(mode == 0)
{
cout<<"input_img:"<<endl;
for(int i =0;i<input_dim;i++)
{
cout<<"---------rgb: "<<i<<"---------"<<endl;
cout_mat(mid_rgb[i]);
}
Matrix conv_result = CreateMatrix(((mid1.row-kernel_size)/stride)+1,((mid1.col-kernel_size)/stride)+1);
Matrix kernel = ones(kernel_size,kernel_size);
cout<<"--------- kernels: 3x3--------"<<endl;
cout_mat(kernel);
cout<<"--------- output: ---------"<<endl;
Matrix feature_maps[output_channels];
for(int filter_idx = 0;filter_idx<output_channels;filter_idx++)
{
Matrix sum_rgb = CreateMatrix(((mid1.row-kernel_size)/stride)+1,((mid1.col-kernel_size)/stride)+1);
for(int channel_idx=0;channel_idx<input_dim;channel_idx++)
{

sum_rgb = add(sum_rgb,conv_element(mid_rgb[channel_idx],filters[channel_idx][filter_idx],kernel_size,stride),0);
cout<<"sum_rgb"<<"filters_index: "<<filter_idx<<" "<<endl;
cout_mat(sum_rgb);
}
feature_maps[filter_idx]=sum_rgb;
}
for(int i = 0;i < output_channels;i++)
{
cout<<"==========filter: "<<i<<"========="<<endl;
cout_mat(feature_maps[i]);
}
return 0.0;
}
}
#endif

+ 1
- 0
mytest.csv
File diff suppressed because it is too large
View File


+ 2
- 0
mytest.txt
File diff suppressed because it is too large
View File


+ 58
- 0
neral/test.py View File

@@ -0,0 +1,58 @@
class Tensor:
def __init__(self, data, depend=[]):
"""初始化"""
self.data = data
self.depend = depend
self.grad = 0
def __mul__(self, data):
"""乘法"""
def grad_fn1(grad):
return grad * data.data
def grad_fn2(grad):
return grad * self.data
depend = [(self, grad_fn1), (data, grad_fn2)]
new = Tensor(self.data * data.data, depend)
return new
def __rmul__(self, data):
def grad_fn1(grad):
return grad * data.data
def grad_fn2(grad):
return grad * self.data
depend = [(self, grad_fn1), (data, grad_fn2)]
new = Tensor(self.data * data.data, depend)
return new
def __add__(self, data):
"""加法"""
def grad_fn(grad):
return grad
depend = [(self, grad_fn), (data, grad_fn)]
new = Tensor(self.data * data.data, depend)
return new
def __radd__(self, data):
def grad_fn(grad):
return grad
depend = [(self, grad_fn), (data, grad_fn)]
new = Tensor(self.data * data.data, depend)
return new
def __repr__(self):
return f"Tensor:{self.data}"
def backward(self, grad=None):
"""
反向传播,需要递归计算
"""
if grad == None:
self.grad = 1
else:
# 这一步用于计算图中的分支
self.grad += grad
# 这一步是递归计算
for tensor, grad_fn in self.depend:
bw = grad_fn(self.grad)
tensor.backward(bw)
x = Tensor(4)
f = x * x
g = x * x
y = f + g
y.backward()
print(x)
print(y, g.grad, x.grad)

+ 91
- 0
nerual_test.cpp View File

@@ -0,0 +1,91 @@
#include<iostream>
#include<time.h>
#include<string>
#include<math.h>
#include<fstream>
#include"./autodiff/node.h"
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
#include"./grad_edge/matrix_grad.h"
using namespace std;
Node z = 1;
Node t1 = 1,a13 = 1;
Node loss_act(Node t1,Node a13)
{
Node loss = 0.5*(pow((t1-a13),2));
return loss;
}
Node sigmoid_act(Node z)
{
Node sigmoid_act = 1/(1+(1/exp(z)));
return sigmoid_act;
}
Node (*loss)(Node,Node) = loss_act;
Node (*act)(Node) = sigmoid_act;
int main()
{
cout<<"begin to train"<<endl;
int input_dim = 2;
int output_dim = 2;
edge_network sequaltial(input_dim,output_dim);
Matrix data_mine = CreateMatrix(100,3);
Matrix label_mine = CreateRandMat(100,1);
Matrix data = iloc(data_mine,0,1,0,0);
cout<<"data: ";
cout_mat(data);
Matrix label = iloc(label_mine,0,3,0,0);
cout<<"label: ";
cout_mat(label);
Matrix bias1 = CreateRandMat(2,1);
Matrix weight1 = CreateRandMat(2,data.col);
cout<<"weight1: ";
cout_mat(weight1);
int index = 1;
Matrix backward2 = CreateMatrix(2,1);
Matrix before_backward2 = CreateMatrix(2,1);
change_va(before_backward2,1,0,1);
for(index = 0;index<99;index++){
Matrix output1 = sequaltial.forward(get_T(get_row(data_mine,index)),weight1,bias1);
Matrix output1_without_act = sequaltial.forward_without_act(get_T(get_row(data_mine,index)),weight1,bias1);
cout<<"output1_without_act: ";
cout_mat(output1_without_act);
Matrix weight2 = CreateRandMat(2,output1.row);
cout<<"weight2: "<<endl;
cout_mat(weight2);
Matrix bias2 = CreateRandMat(weight2.row,1);
Matrix output2 = sequaltial.forward(output1,weight2,bias2);
Matrix output2_without_act = sequaltial.forward_without_act(output1,weight2,bias2);
cout<<"output2_without_act: ";
cout_mat(output2_without_act);
cout<<"output2: ";
cout_mat(output2);
cout<<"output1:"<<endl;
cout_mat(output1);
Matrix weight3 = CreateRandMat(3,output2.row);
Matrix bias3 = CreateRandMat(weight3.row,1);
Matrix output3 = sequaltial.forward(output2,weight3,bias3);
Matrix output3_without_act = sequaltial.forward_without_act(output2,weight3,bias3);
cout<<"row: "<<label.row<<"col: "<<label.col;
Matrix output_end = sequaltial.end_layer_backward(label,output3_without_act,*loss,*act);
cout<<"outputend: ";
cout_mat(output_end);
cout<<"output3_without_act: ";
cout_mat(output3_without_act);
cout<<"output3: ";
cout_mat(output3);
Matrix backward3 = sequaltial.backward(output_end,output2_without_act,get_T(weight3),*act);
cout<<"backward_before: ";
cout_mat(before_backward2);
before_backward2 = add(before_backward2,backward2);
backward2 = sequaltial.backward(backward3,output1_without_act,get_T(weight2),*act);
cout<<"backward2: ";
save_txt(backward2,"a.csv");
cout_mat(before_backward2);
cout_mat(backward2);
//can change the before_backward2 values;
}
}


BIN
pics/image-20200418210521131.png View File

Before After
Width: 1190  |  Height: 200  |  Size: 7.2 kB

+ 1
- 0
picture/00.svg
File diff suppressed because it is too large
View File


+ 1
- 0
picture/01.svg
File diff suppressed because it is too large
View File


+ 1
- 0
picture/02.svg View File

@@ -0,0 +1 @@
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1574235895282" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="2501" xmlns:xlink="http://www.w3.org/1999/xlink" width="200" height="200"><defs><style type="text/css"></style></defs><path d="M318.272 570.624c-58.784 69.584-79.904 146.368-46.272 180 33.632 33.632 110.416 12.512 180-46.272" fill="#FF5252" p-id="2502"></path><path d="M316.4 774.464c-20.448 0-37.776-5.904-50.048-18.192-36.256-36.256-16.976-116.496 45.824-190.816a8 8 0 1 1 12.224 10.304c-56.368 66.72-76.464 139.456-46.72 169.184 29.696 29.728 102.448 9.632 169.184-46.72a8 8 0 1 1 10.336 12.224c-49.168 41.536-100.896 64.016-140.8 64.016z" fill="#263238" p-id="2503"></path><path d="M605.632 190.832L338.032 491.872a40 40 0 0 0 1.616 54.864l138.992 138.992a40 40 0 0 0 54.864 1.616l301.04-267.6a239.904 239.904 0 0 0 80.48-173.792l3.232-138.832-138.832 3.232a239.888 239.888 0 0 0-173.792 80.48z" fill="#40C4FF" p-id="2504"></path><path d="M506.944 705.424c-12.336 0-24.64-4.704-33.968-14.048l-138.992-138.992a48.048 48.048 0 0 1-1.936-65.824l267.6-301.04a248.752 248.752 0 0 1 179.584-83.168l138.832-3.232a8.032 8.032 0 0 1 8.176 8.176l-3.232 138.832a248.96 248.96 0 0 1-83.168 179.616L538.8 693.328a47.888 47.888 0 0 1-31.856 12.096z m98.688-514.592l5.984 5.312L344 497.184a32.032 32.032 0 0 0 1.296 43.888l138.992 138.992a32.032 32.032 0 0 0 43.888 1.296L829.232 413.76a232.72 232.72 0 0 0 77.792-168.016l3.04-130.448-130.448 3.04a232.72 232.72 0 0 0-168 77.792l-5.984-5.296z" fill="#263238" p-id="2505"></path><path d="M802.656 391.872a8 8 0 0 1-5.312-13.984 184.528 184.528 0 0 0 61.696-133.248l2.064-88.56a7.968 7.968 0 0 1 8.192-7.808 8 8 0 0 1 7.808 8.192l-2.064 88.56a200.72 200.72 0 0 1-67.072 144.848 8.112 8.112 0 0 1-5.312 2zM358.016 429.216a7.84 7.84 0 0 1-4.256-1.232 390.112 390.112 0 0 0-67.68-34.112l-67.856-26.112a8 8 0 0 1 0.208-15.024l74.576-26.32c47.664-16.816 99.072-18.368 148.624-4.56a8 8 0 0 1-4.304 15.408c-46.4-12.96-94.448-11.472-138.992 4.256l-54.112 19.088 47.6 18.32c24 9.216 47.712 21.168 70.464 35.52a8 8 0 0 1-4.272 14.768zM662.336 809.536a8 8 0 0 1-7.472-5.12l-26.096-67.84a391.952 391.952 0 0 0-32.064-64.464 8 8 0 0 1 13.632-8.384 408.656 408.656 0 0 1 33.376 67.088l18.304 47.6 19.088-54.096c15.264-43.232 17.136-89.936 5.44-135.056a8 8 0 0 1 15.472-4.032c12.512 48.24 10.496 98.176-5.824 144.4l-26.32 74.56a8 8 0 0 1-7.44 5.344h-0.096z" fill="#263238" p-id="2506"></path><path d="M634.029131 388.590216a80 80 0 1 0 113.13511-113.139059 80 80 0 1 0-113.13511 113.139059Z" fill="#FFFFFF" p-id="2507"></path><path d="M690.608 419.984a87.76 87.76 0 0 1-62.224-25.728 87.424 87.424 0 0 1-25.776-62.24c0-23.504 9.152-45.616 25.776-62.224a87.424 87.424 0 0 1 62.224-25.776c23.504 0 45.616 9.152 62.224 25.776a87.392 87.392 0 0 1 25.776 62.224c0 23.52-9.152 45.616-25.776 62.24a87.712 87.712 0 0 1-62.224 25.728z m0-159.968a71.52 71.52 0 0 0-50.912 21.088c-13.6 13.6-21.088 31.664-21.088 50.912s7.488 37.312 21.088 50.928a72.128 72.128 0 0 0 101.824 0c13.6-13.616 21.088-31.68 21.088-50.928s-7.488-37.312-21.088-50.912a71.52 71.52 0 0 0-50.912-21.088z" fill="#263238" p-id="2508"></path><path d="M341.376 593.712c-33.984 44.048-45.184 90.576-24.112 111.648 21.072 21.072 67.616 9.872 111.648-24.112" fill="#FFD740" p-id="2509"></path><path d="M347.056 724.144c-14.4 0-26.64-4.304-35.456-13.104-23.92-23.92-14.496-73.024 23.44-122.208a8 8 0 0 1 12.672 9.776c-31.776 41.168-42.208 83.696-24.8 101.104 17.392 17.392 59.936 6.976 101.104-24.784a8 8 0 1 1 9.792 12.656c-31.056 23.968-62.08 36.56-86.752 36.56zM105.744 653.36a8 8 0 0 1-5.664-13.648l11.312-11.312a8 8 0 0 1 11.312 11.312l-11.312 11.312a7.968 7.968 0 0 1-5.648 2.336zM139.68 619.424a8 8 0 0 1-5.664-13.648l124.448-124.448a8 8 0 0 1 11.312 11.312l-124.448 124.448a7.968 7.968 0 0 1-5.648 2.336zM377.28 924.896a8 8 0 0 1-5.664-13.648l11.312-11.312a8 8 0 0 1 11.312 11.312l-11.312 11.312a8 8 0 0 1-5.648 2.336zM411.216 890.96a8 8 0 0 1-5.664-13.648l124.448-124.448a8 8 0 0 1 11.312 11.312l-124.448 124.448a7.968 7.968 0 0 1-5.648 2.336z" fill="#263238" p-id="2510"></path><path d="M240 120h-64a8 8 0 0 1 0-16h64a8 8 0 0 1 0 16z" fill="#263238" p-id="2511"></path><path d="M208 152a8 8 0 0 1-8-8V80a8 8 0 0 1 16 0v64a8 8 0 0 1-8 8z" fill="#263238" p-id="2512"></path><path d="M128 224H64a8 8 0 0 1 0-16h64a8 8 0 0 1 0 16z" fill="#263238" p-id="2513"></path><path d="M96 256a8 8 0 0 1-8-8v-64a8 8 0 0 1 16 0v64a8 8 0 0 1-8 8z" fill="#263238" p-id="2514"></path></svg>

BIN
picture/WX20191119-105411@2x.png View File

Before After
Width: 1144  |  Height: 888  |  Size: 380 kB

BIN
picture/WX20191119-125244@2x.png View File

Before After
Width: 1170  |  Height: 934  |  Size: 166 kB

BIN
picture/apply_axis_0.png View File

Before After
Width: 254  |  Height: 340  |  Size: 19 kB

BIN
picture/apply_axis_1.png View File

Before After
Width: 496  |  Height: 172  |  Size: 14 kB

BIN
picture/autograd.jpg View File

Before After
Width: 2624  |  Height: 1006  |  Size: 248 kB

+ 1
- 0
picture/cpu.svg
File diff suppressed because it is too large
View File


+ 1
- 0
picture/jabber.svg View File

@@ -0,0 +1 @@
<svg role="img" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title>Jabber icon</title><path d="M9.597 11.737c0-.35-.065-.732-.268-1.025-.408-.588-1.283-.775-1.892-.405-.308.188-.48.515-.576.851-.191.668-.104 1.43.03 2.1.043.214.088.428.148.639.021.076.031.186.08.25.087.11.297.141.426.12.387-.065.291-.703.278-.974-.03-.634-.218-1.25-.036-1.881.076-.268.225-.568.494-.684.244-.105.49.023.586.261.156.385.117.83.215 1.23.033.137.07.272.131.399.018.037.043.113.094.108.126-.011.304-.22.398-.298.304-.25.616-.52.965-.705.165-.088.435-.23.603-.08a.612.612 0 0 1 .108.13c.198.31.002.55-.127.845-.166.38-.336.758-.577 1.098-.207.293-.49.549-.655.869-.107.205-.167.43-.123.663.036.188.181.301.373.257.143-.033.24-.156.322-.269.146-.202.281-.412.426-.615.28-.393.61-.76.846-1.183a3.41 3.41 0 0 0 .42-1.664c0-.474-.171-1.198-.723-1.298a.974.974 0 0 0-.326.01 1.432 1.432 0 0 0-.374.12 2.715 2.715 0 0 0-.818.637c-.146.16-.276.363-.449.495M9.078.016c-.435.058-.878.052-1.315.12-.838.129-1.64.389-2.425.703-.286.114-.568.241-.845.376-.103.05-.26.09-.343.17-.043.041-.039.139-.044.195-.014.156-.034.313-.05.47-.058.605-.1 1.229-.013 1.834.028.195.09.55.33.587.369.058.656-.397.837-.648.424-.586.905-1.132 1.6-1.394.817-.308 1.753-.381 2.618-.44 2.426-.167 5.078.277 6.865 2.064.254.254.495.524.7.82.8 1.159 1.223 2.477 1.427 3.86.096.65.161 1.308.013 1.955-.257 1.122-.932 2.1-1.706 2.931-.53.57-1.128 1.084-1.749 1.552-.347.261-.736.483-1.062.768-.375.329-.688.74-.925 1.179-.639 1.181-.81 2.602-.622 3.92.038.27.073.542.134.809.018.08.022.217.073.282.097.122.36.189.508.196.154.007.256-.11.294-.249.064-.236.026-.498-.012-.736-.076-.487-.147-.977-.125-1.471a3.71 3.71 0 0 1 1.026-2.425c.643-.673 1.512-1.061 2.243-1.625 1.474-1.136 2.794-2.668 3.301-4.492a5.194 5.194 0 0 0 .159-2.015c-.105-.849-.415-1.697-.708-2.497-.892-2.437-2.422-4.755-4.851-5.87-.964-.443-1.973-.645-3.016-.79-.49-.068-.98-.11-1.472-.132-.274-.012-.572-.042-.845-.006M5.277 15.796c-.473.068-.61.447-.523.876.112.548.543.965.97 1.295a6.03 6.03 0 0 0 3.884 1.238c.538-.023 1.124-.112 1.617-.34.265-.122.542-.563.181-.751a.59.59 0 0 0-.169-.051c-.157-.026-.333.041-.482.084-.263.075-.526.153-.797.196-.808.13-1.683-.055-2.352-.534-.542-.387-.98-.898-1.393-1.415-.253-.316-.482-.663-.936-.598M4.662 18.474c-.12.016-.259.011-.362.087-.215.158.022.476.135.62.328.417.76.763 1.192 1.068a7.832 7.832 0 0 0 4.03 1.442c.421.03.85 0 1.267-.07.152-.026.342-.037.482-.103.399-.186.284-.939-.072-1.106-.155-.073-.404.023-.567.046-.385.054-.771.06-1.158.05-1.015-.025-2.096-.338-2.98-.831a5.589 5.589 0 0 1-.966-.693c-.181-.16-.368-.42-.603-.502-.11-.037-.284-.023-.398-.008M4.903 20.73a.638.638 0 0 0-.413.236c-.078.088-.152.167-.197.278-.246.609.41 1.183.864 1.47.504.32 1.055.558 1.616.758 1.266.45 2.752.739 4.066.336.391-.12.778-.338 1.062-.634.16-.167.27-.419-.024-.526-.174-.063-.385.098-.543.162a4.57 4.57 0 0 1-1.158.312c-.527.064-1.001-.052-1.508-.179-.434-.108-.872-.217-1.291-.373a4.457 4.457 0 0 1-1.026-.513c-.094-.066-.206-.125-.282-.211-.25-.282-.439-.612-.707-.88-.116-.116-.281-.256-.459-.236"/></svg>

BIN
picture/logo.png View File

Before After
Width: 1204  |  Height: 1212  |  Size: 144 kB

BIN
picture/logo2.png View File

Before After
Width: 1948  |  Height: 620  |  Size: 58 kB

BIN
picture/nerual_test1.png View File

Before After
Width: 1686  |  Height: 574  |  Size: 983 kB

BIN
picture/path.png View File

Before After
Width: 2830  |  Height: 1840  |  Size: 384 kB

+ 1
- 0
picture/processwire (1).svg View File

@@ -0,0 +1 @@
<svg role="img" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title>ProcessWire icon</title><path d="M21.939 5.27C21.211 4.183 20 2.941 18.784 2.137 16.258.407 13.332-.207 10.744.061c-2.699.291-5.01 1.308-6.91 3.004C2.074 4.637.912 6.559.4 8.392c-.518 1.833-.449 3.53-.264 4.808.195 1.297.841 2.929.841 2.929.132.313.315.44.41.493.472.258 1.247.031 1.842-.637.03-.041.046-.098.03-.146-.166-.639-.226-1.12-.285-1.492-.135-.736-.195-1.969-.105-3.109.045-.617.165-1.277.375-1.969.406-1.367 1.262-2.794 2.6-3.98 1.441-1.277 3.289-2.066 5.046-2.27.616-.074 1.788-.145 3.199.203.301.075 1.593.412 2.975 1.348 1.006.684 1.816 1.528 2.374 2.363.568.797 1.185 2.141 1.366 3.125.256 1.12.256 2.307.074 3.463-.225 1.158-.631 2.284-1.262 3.275-.435.768-1.337 1.783-2.403 2.545-.961.676-2.058 1.164-3.184 1.434-.57.135-1.142.221-1.728.24-.521.016-1.212 0-1.697-.082-.721-.115-.871-.299-1.036-.549 0 0-.115-.18-.147-.662.011-4.405.009-3.229.009-5.516 0-.646-.021-1.232-.015-1.764.03-.873.104-1.473.728-2.123.451-.479 1.082-.768 1.777-.768.211 0 .938.01 1.577.541.685.572.8 1.354.827 1.563.156 1.223-.652 2.134-.962 2.365-.384.288-.729.428-.962.51-.496.166-1.041.214-1.531.182-.075-.005-.143.044-.158.119l-.165.856c-.161.65.2.888.41.972.671.207 1.266.293 1.971.24 1.081-.076 2.147-.502 3.052-1.346.77-.732 1.209-1.635 1.359-2.645.15-1.121-.045-2.328-.556-3.35-.562-1.127-1.532-2.068-2.81-2.583-1.291-.508-2.318-.526-3.642-.188l-.015.005c-.86.296-1.596.661-2.362 1.452-.525.546-.955 1.207-1.217 1.953-.26.752-.33 1.313-.342 2.185-.016.646.015 1.246.015 1.808v3.701c0 1.184-.04 1.389 0 1.998.022.404.078.861.255 1.352.182.541.564 1.096.826 1.352.367.391.834.705 1.293.9 1.051.467 2.478.541 3.635.496.766-.029 1.536-.135 2.291-.314 1.51-.359 2.96-1.012 4.235-1.918 1.367-.963 2.555-2.277 3.211-3.393.841-1.326 1.385-2.814 1.668-4.343.255-1.532.243-3.103-.099-4.612-.27-1.4-.991-2.936-1.823-4.176l.038.037z"/></svg>

+ 1
- 0
picture/啊.svg View File

@@ -0,0 +1 @@
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1574238116469" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="2236" xmlns:xlink="http://www.w3.org/1999/xlink" width="200" height="200"><defs><style type="text/css"></style></defs><path d="M512 512m-512 0a512 512 0 1 0 1024 0 512 512 0 1 0-1024 0Z" fill="#FEC43C" p-id="2237"></path><path d="M1013.76 408.576C965.632 175.104 759.808 0 512 0 229.376 0 0 229.376 0 512c0 123.904 44.032 236.544 116.736 324.608C203.776 884.736 303.104 911.36 409.6 911.36c301.056 0 550.912-217.088 604.16-502.784z" fill="#FFD73A" p-id="2238"></path><path d="M296.96 337.92m-92.16 0a92.16 92.16 0 1 0 184.32 0 92.16 92.16 0 1 0-184.32 0Z" fill="#873A18" p-id="2239"></path><path d="M727.04 337.92m-92.16 0a92.16 92.16 0 1 0 184.32 0 92.16 92.16 0 1 0-184.32 0Z" fill="#873A18" p-id="2240"></path><path d="M512 849.92c-84.992 0-153.6-68.608-153.6-153.6V583.68c0-84.992 68.608-153.6 153.6-153.6s153.6 68.608 153.6 153.6v112.64c0 84.992-68.608 153.6-153.6 153.6z" fill="#873A18" p-id="2241"></path><path d="M512 849.92c80.896 0 146.432-62.464 152.576-141.312-16.384-24.576-45.056-41.984-77.824-41.984-30.72 0-58.368 15.36-75.776 38.912-16.384-23.552-44.032-38.912-75.776-38.912-32.768 0-60.416 16.384-77.824 41.984C365.568 787.456 431.104 849.92 512 849.92z" fill="#F44444" p-id="2242"></path></svg>

+ 1
- 0
picture/彩虹.svg
File diff suppressed because it is too large
View File


+ 1
- 0
picture/方向.svg View File

@@ -0,0 +1 @@
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1574238017780" class="icon" viewBox="0 0 1211 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="4222" xmlns:xlink="http://www.w3.org/1999/xlink" width="236.5234375" height="200"><defs><style type="text/css"></style></defs><path d="M435.2 515.413333c0 131.413333 107.52 238.933333 238.933333 238.933334s238.933333-107.52 238.933334-238.933334-107.52-238.933333-238.933334-238.933333-238.933333 107.52-238.933333 238.933333z" fill="#E9EAEB" p-id="4223"></path><path d="M435.2 600.746667h256v51.2l187.733333-136.533334-187.733333-136.533333v51.2h-256" fill="#A3D4FF" p-id="4224"></path><path d="M674.133333 686.08v-68.266667h-273.066666v-34.133333h307.2v35.84l141.653333-104.106667-141.653333-102.4v34.133334h-273.066667v-34.133334h238.933333v-66.56l233.813334 168.96z" fill="#2A5082" p-id="4225"></path><path d="M605.866667 498.346667h34.133333v34.133333h-34.133333zM537.6 498.346667h34.133333v34.133333h-34.133333zM537.6 327.68h34.133333v34.133333h-34.133333zM537.6 669.013333h34.133333v34.133334h-34.133333z" fill="#2A5082" p-id="4226"></path><path d="M674.133333 952.32c-109.226667 0-213.333333-39.253333-295.253333-112.64l22.186667-25.6c75.093333 66.56 172.373333 104.106667 273.066666 104.106667v34.133333zM1117.866667 508.586667h-34.133334c0-196.266667-139.946667-366.933333-334.506666-402.773334l6.826666-34.133333c209.92 39.253333 361.813333 223.573333 361.813334 436.906667zM899.413333 899.413333l-17.066666-29.013333c81.92-47.786667 146.773333-122.88 180.906666-211.626667l32.426667 11.946667c-37.546667 95.573333-107.52 175.786667-196.266667 228.693333z" fill="#BCC0C4" p-id="4227"></path><path d="M788.48 865.28l-11.946667-32.426667c129.706667-47.786667 216.746667-174.08 216.746667-312.32 0-182.613333-148.48-332.8-332.8-332.8-83.626667 0-163.84 30.72-225.28 88.746667l-23.893333-25.6C479.573333 187.733333 568.32 153.6 660.48 153.6c201.386667 0 366.933333 163.84 366.933333 366.933333 1.706667 153.6-95.573333 291.84-238.933333 344.746667z" fill="#BCC0C4" p-id="4228"></path><path d="M366.933333 413.013333h34.133334v34.133334h-34.133334zM332.8 447.146667h-40.96l-102.4-102.4H110.933333v-34.133334h92.16l102.4 102.4h27.306667zM110.933333 498.346667h392.533334v34.133333h-392.533334zM186.026667 720.213333H110.933333v-34.133333h61.44l102.4-102.4h126.293334v34.133333h-112.64zM203.093333 873.813333H110.933333v-34.133333h78.506667l170.666667-170.666667h143.36v34.133334h-129.706667zM503.466667 361.813333h-143.36l-136.533334-136.533333H110.933333v-34.133333h126.293334l136.533333 136.533333h129.706667z" fill="#2A5082" p-id="4229"></path></svg>

+ 1
- 0
picture/星月.svg
File diff suppressed because it is too large
View File


+ 1
- 0
picture/火箭.svg View File

@@ -0,0 +1 @@
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1574235993975" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="11946" xmlns:xlink="http://www.w3.org/1999/xlink" width="200" height="200"><defs><style type="text/css"></style></defs><path d="M684.4 416.9s23.5 11.8 32 32.6v128.2c8.7 20.9 102.9 43.3 113 63.6v128.2H641s-69 32.3-132.1 32.3h3.7c-63.1 0-132.1-32.3-132.1-32.3H192V641.3c10-20.3 106.7-42.7 115.4-63.6V449.5c8.6-20.8 28.3-28.8 28.3-28.8" fill="#29A3EC" p-id="11947"></path><path d="M512.6 811.8h-3.7c-0.4 0-0.7 0-1.1-0.1-57.7-1.4-117.5-26.9-129.5-32.2H192c-5.5 0-10-4.5-10-10V641.3c0-1.5 0.4-3 1-4.4 6.3-12.8 27.7-21.9 62.1-35.9 13-5.3 26.4-10.7 36.7-15.9 10.3-5.2 14.2-8.6 15.6-10.2V449.5c0-1.3 0.3-2.6 0.8-3.8 10.1-24.4 32.8-33.9 33.8-34.2 5.1-2.1 10.9 0.4 13 5.5 2.1 5.1-0.4 10.9-5.5 13-0.1 0-14.9 6.4-22.1 21.6v126.1c0 1.3-0.3 2.6-0.8 3.8-5.7 13.8-28.1 23.4-64 38-12.6 5.1-25.6 10.4-35.6 15.4-9.5 4.8-13.4 7.9-15 9.5v115.1h178.5c1.5 0 2.9 0.3 4.2 0.9 0.7 0.3 66.4 30.7 126 31.3 59.6-0.6 125.3-31 126-31.3 1.3-0.6 2.8-0.9 4.2-0.9h178.4V644.4c-1.6-1.6-5.5-4.8-14.7-9.5-9.8-5-22.5-10.3-34.8-15.4-35.2-14.6-57-24.2-62.7-38-0.5-1.2-0.8-2.5-0.8-3.8V451.6c-7.5-15.9-26.3-25.7-26.5-25.8-4.9-2.5-6.9-8.5-4.4-13.4s8.5-6.9 13.4-4.4c1.1 0.6 27 13.8 36.8 37.8 0.5 1.2 0.7 2.5 0.7 3.8V575c1.4 1.6 5.2 4.9 15.2 10.1 10.1 5.2 23.3 10.7 36 16 33.6 13.9 54.5 23.1 60.8 35.8 0.7 1.4 1 2.9 1 4.5v128.2c0 5.5-4.5 10-10 10H643.2c-12 5.4-71.8 30.9-129.5 32.2h-1.1z" fill="#5A3392" p-id="11948"></path><path d="M651 453.9s19 9.3 25.9 25.8v101.5c7.1 16.5 83.1 34.3 91.3 50.4v101.5H616s-55.8 25.6-106.8 25.6h3c-51 0-106.8-25.6-106.8-25.6H253.3V631.6c8.1-16.1 86.2-33.8 93.2-50.4V479.7c6.9-16.5 22.8-22.8 22.8-22.8" fill="#3CDEF6" p-id="11949"></path><path d="M384.5 646.7V290.2c0-45.5 81.9-180 127.4-180s127.4 134.5 127.4 180v356.5H384.5" fill="#EE746C" p-id="11950"></path><path d="M639.3 656.7H384.5c-5.5 0-10-4.5-10-10V290.2c0-27.6 24.3-76.5 48.3-112.6 19.2-28.9 56.4-77.4 89.1-77.4 32.7 0 69.9 48.5 89.1 77.4 24 36.1 48.3 85 48.3 112.6v356.5c0 5.5-4.5 10-10 10z m-244.8-20h234.8V290.2c0-19.7-18.9-62.4-45-101.5-27.3-41-56.4-68.5-72.4-68.5s-45.2 27.5-72.4 68.5c-26.1 39.1-45 81.8-45 101.5v346.5z" fill="#5A3392" p-id="11951"></path><path d="M635.6 290.2c0-45.5-81.9-180-127.4-180s-123.4 134.5-123.4 180h250.8z" fill="#FFFFFF" p-id="11952"></path><path d="M635.6 300.2H384.8c-5.5 0-10-4.5-10-10 0-27.8 23.2-76.5 46.2-112.5 13.8-21.5 28.2-39.8 41.7-53 16.8-16.3 32.1-24.6 45.4-24.6 13.4 0 28.8 8.2 46 24.5 13.9 13.2 28.8 31.5 43.1 52.9 24 36.1 48.3 85 48.3 112.6 0.1 5.6-4.4 10.1-9.9 10.1z m-239.7-20h228.5c-4.5-22.1-21.5-58.1-43.8-91.5-27.3-41-56.4-68.5-72.4-68.5-16.2 0-43.8 26.8-70.3 68.3-21.4 33.4-37.7 69.5-42 91.7z" fill="#5A3392" p-id="11953"></path><path d="M417.9 322.7h188V613h-188z" fill="#FB9761" p-id="11954"></path><path d="M511.9 768.7c-5.5 0-10-4.5-10-10V527.2c0-5.5 4.5-10 10-10s10 4.5 10 10v231.5c0 5.5-4.5 10-10 10zM192 845.6c-5.5 0-10-4.5-10-10v-39.1c0-5.5 4.5-10 10-10s10 4.5 10 10v39.1c0 5.5-4.5 10-10 10zM256 895.6c-5.5 0-10-4.5-10-10v-89.1c0-5.5 4.5-10 10-10s10 4.5 10 10v89.1c0 5.5-4.5 10-10 10zM320.1 865.6c-5.5 0-10-4.5-10-10v-59.1c0-5.5 4.5-10 10-10s10 4.5 10 10v59.1c0 5.5-4.5 10-10 10zM704.1 845.6c-5.5 0-10-4.5-10-10v-39.1c0-5.5 4.5-10 10-10s10 4.5 10 10v39.1c0 5.5-4.5 10-10 10zM768.1 895.6c-5.5 0-10-4.5-10-10v-89.1c0-5.5 4.5-10 10-10s10 4.5 10 10v89.1c0 5.5-4.5 10-10 10zM832.2 865.6c-5.5 0-10-4.5-10-10v-59.1c0-5.5 4.5-10 10-10s10 4.5 10 10v59.1c0 5.5-4.5 10-10 10z" fill="#5A3392" p-id="11955"></path><path d="M421 372.5c-5.5 0-10-4.5-10-10v-38.3c0-5.5 4.5-10 10-10h38.4c5.5 0 10 4.5 10 10s-4.5 10-10 10H431v28.3c0 5.5-4.5 10-10 10z" fill="#FFFFFF" p-id="11956"></path><path d="M421.4 392.5m-9.8 0a9.8 9.8 0 1 0 19.6 0 9.8 9.8 0 1 0-19.6 0Z" fill="#FFFFFF" p-id="11957"></path><path d="M511.9 472.1m-55.2 0a55.2 55.2 0 1 0 110.4 0 55.2 55.2 0 1 0-110.4 0Z" fill="#E2FBFF" p-id="11958"></path><path d="M511.9 537.3c-36 0-65.2-29.2-65.2-65.2s29.2-65.2 65.2-65.2 65.2 29.2 65.2 65.2-29.2 65.2-65.2 65.2z m0-110.4c-24.9 0-45.2 20.3-45.2 45.2s20.3 45.2 45.2 45.2 45.2-20.3 45.2-45.2-20.3-45.2-45.2-45.2z" fill="#5A3392" p-id="11959"></path><path d="M511.9 472.1m-27.9 0a27.9 27.9 0 1 0 55.8 0 27.9 27.9 0 1 0-55.8 0Z" fill="#FFFFFF" p-id="11960"></path><path d="M330.1 742.6h-66.8c-5.5 0-10-4.5-10-10s4.5-10 10-10h66.8c5.5 0 10 4.5 10 10s-4.5 10-10 10z" fill="#FFFFFF" p-id="11961"></path></svg>

+ 4
- 0
requitement/.gitignore View File

@@ -0,0 +1,4 @@
.gmake
obj/
bin/
lib/

+ 62
- 0
requitement/Makefile View File

@@ -0,0 +1,62 @@
CXX = g++
BIN = bin
LIB = lib
LIB_NAME = autodiff
OBJS_GRADIENT_DESCENT = root/obj/node.o root/obj/graph.o examples/obj/gradient_descent.o
OBJS_ANN = root/obj/node.o root/obj/graph.o examples/obj/ann.o
OBJS_GRADIENT = root/obj/node.o root/obj/graph.o examples/obj/gradient.o
OBJS_SPEED = root/obj/node.o root/obj/graph.o examples/obj/speed.o
OBJS_SIMPLE = root/obj/node.o root/obj/graph.o examples/obj/simple.o

all : gradient_descent ann gradient speed simple

gradient_descent : $(BIN) root/include/vectmath.h root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/gradient_descent.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/gradient_descent $(OBJS_GRADIENT_DESCENT) $(LIBS)

ann : $(BIN) root/include/vectmath.h root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/ann.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/ann $(OBJS_ANN) $(LIBS)

gradient : $(BIN) root/include/vectmath.h root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/gradient.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/gradient $(OBJS_GRADIENT) $(LIBS)

speed : $(BIN) root/include/vectmath.h root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/speed.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/speed $(OBJS_SPEED) $(LIBS)

simple : $(BIN) root/include/mor.h root/include/dor.h root/include/por.h
$(MAKE) -C examples obj obj/simple.o
$(MAKE) -C root obj obj/node.o obj/graph.o
$(CXX) -o $(BIN)/simple $(OBJS_SIMPLE) $(LIBS)

$(BIN) :
if [ ! -d $(BIN) ]; then mkdir $(BIN); fi

$(LIB) :
if [ ! -d $(LIB) ]; then mkdir $(LIB); fi

clean :
$(MAKE) -C root clean
$(MAKE) -C examples clean
if [ -d $(BIN) ]; then rm $(BIN) -r; fi

install : $(LIB)
ar rcs $(LIB)/lib$(LIB_NAME).a root/obj/graph.o root/obj/node.o
if [ ! -d /usr/local/include/$(LIB_NAME) ]; then sudo mkdir /usr/local/include/$(LIB_NAME); fi
sudo cp $(LIB)/lib$(LIB_NAME).a /usr/local/lib
sudo cp root/include/*.h /usr/local/include/$(LIB_NAME)

.PHONY : all
.PHONY : gradient_descent
.PHONY : ann
.PHONY : gradient
.PHONY : speed
.PHONY : simple
.PHONY : clean
.PHONY : install

+ 31
- 0
requitement/examples/Makefile View File

@@ -0,0 +1,31 @@
CXX = g++
ODIR = obj
CXXFLAGS = -std=c++11 -O3
OBJS = $(ODIR)/gradient_descent.o $(ODIR)/ann.o $(ODIR)/gradient.o $(ODIR)/speed.o $(ODIR)/simple.o

all : $(ODIR) $(OBJS)

$(ODIR)/gradient_descent.o : src/gradient_descent.cpp ../root/include/vectmath.h ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/ann.o : src/ann.cpp ../root/include/vectmath.h ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/gradient.o : src/gradient.cpp ../root/include/vectmath.h ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/speed.o : src/speed.cpp ../root/include/vectmath.h ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/simple.o : src/simple.cpp ../root/include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR) :
if [ ! -d $(ODIR) ]; then mkdir $(ODIR); fi

clean :
if [ -d $(ODIR) ]; then rm $(ODIR) -r; fi

.PHONY : all
.PHONY : clean


+ 126
- 0
requitement/examples/src/ann.cpp View File

@@ -0,0 +1,126 @@
#include <iostream>
#include <ctime>

#include "../../root/include/vectmath.h"
#include "../../root/include/node.h"

typedef std::vector<Node> Vector;
typedef std::vector<Vector> Matrix;

Node random_number(){
return rand()/(double)RAND_MAX;
}

Node tan_h(Node& x){
return (1-exp(-2*x))/(1+exp(-2*x));
}

Node mean_square_error(Vector& y_true, Vector& y_pred){
Node loss;
for(size_t i=0 ; i<y_true.size() ; i++){
loss += pow(y_true[i]-y_pred[i], 2);
}
return loss;
}

struct Layer {
Matrix weights;
Matrix bias;
Node (*activation)(Node&);
int input_shape;
int output_shape;

Layer(int input, int output, Node (*activation)(Node&)){
this->activation = activation;
this->input_shape = input;
this->output_shape = output;
weights.resize(input, Vector(output));
bias.resize(1, Vector(output));
random_number >> weights;
random_number >> bias;
}

Matrix forward(Matrix& previous){
Matrix output = dot(previous, weights) + bias;
output = activation >> output;
return output;
}

void backward(Node& loss, const float& learning_rate){
weights -= learning_rate*loss.gradient(weights);
bias -= learning_rate*loss.gradient(bias);
}
};

struct Network {
std::vector<Layer> layers;
int input_shape;
Graph* graph;

Network(){
graph = Graph::getInstance();
}

void input_layer(int input_shape){
this->input_shape = input_shape;
}

void add(int output_shape, Node (*activation)(Node&)){
int input = layers.empty()?input_shape:layers.back().output_shape;
layers.push_back(Layer(input, output_shape, activation));
}

Matrix run(Matrix& input){
Matrix output(input.size());
for(size_t j=0 ; j<input.size() ; j++){
Matrix out = {input[j]};
for(auto& lay : layers){
out = lay.forward(out);
}
output[j] = out[0];
}
return output;
}

void fit(Matrix& input, Matrix& output, Node (*loss_function)(Vector&, Vector&), int epochs, float learning_rate){
int p=0;
for(size_t i=0 ; i<epochs ; i++){
std::cout << "\r" << i+1 << "/" << epochs;
for(size_t j=0 ; j<input.size() ; j++){
// compute input
Matrix out = {input[j]};
for(auto& lay : layers){
out = lay.forward(out);
}

// compute loss
Node loss = loss_function(output[j], out[0]);

// update parameters
for(auto& lay : layers){
lay.backward(loss, learning_rate);
}

graph->new_recording();
}
}
std::cout << std::endl;
}
};

int main(int argc, char const *argv[]) {
srand(time(NULL));

Matrix input = {{0,0},{0,1},{1,0},{1,1}};
Matrix output = {{0},{1},{1},{0}};

Network network;
network.input_layer(2);
network.add(3, tan_h);
network.add(1, tan_h);
network.fit(input, output, mean_square_error, 500, 0.1);

Matrix pred = network.run(input);
std::cout << pred << std::endl;
return 0;
}

+ 15
- 0
requitement/examples/src/gradient.cpp View File

@@ -0,0 +1,15 @@
#include <iostream>

#include "../../root/include/vectmath.h"
#include "../../root/include/node.h"

Node function(std::vector<Node> x){
return pow(x[0]-x[1], 2) + x[0]*x[1]*x[2]; // (x-y)^2 + x*y*z
}

int main(int argc, char const *argv[]) {
std::vector<Node> x = {5,6,7};
Node f = function(x);
std::cout << "grad(f) = " << f.gradient(x) << std::endl;
return 0;
}

+ 27
- 0
requitement/examples/src/gradient_descent.cpp View File

@@ -0,0 +1,27 @@
#include <iostream>

#include "../../root/include/vectmath.h"
#include "../../root/include/node.h"

Node function(std::vector<Node>& x){
return pow(x[0], 2) + pow(x[1], 2); // x^2 + y^2
}

int main(int argc, char const *argv[]) {
Graph* graph = Graph::getInstance();

std::vector<Node> x = {50, 50};
Node f;

int epochs = 30;
float learning_rate = 0.1;
for(size_t i=0 ; i<epochs ; i++){
f = function(x);
x -= learning_rate*f.gradient(x);
graph->new_recording();
}

std::cout << "f = " << f << std::endl;
std::cout << "x = " << x << std::endl;
return 0;
}

+ 13
- 0
requitement/examples/src/simple.cpp View File

@@ -0,0 +1,13 @@
#include <iostream>
#include "../../root/include/node.h"

int main(int argc, char const *argv[]) {
Node x=2, y=3;
Node f = x*y + sin(x);

std::cout << "f(x,y) = x*y + sin(x)" << std::endl;
std::cout << "f(" << x << "," << y << ") = " << f << std::endl;
std::cout << "∂f/∂x = " << f.gradient(x) << std::endl;
std::cout << "∂f/∂y = " << f.gradient(y) << std::endl;
return 0;
}

+ 51
- 0
requitement/examples/src/speed.cpp View File

@@ -0,0 +1,51 @@
#include <iostream>
#include <iomanip>
#include <cassert>
#include <ctime>
#include <chrono>

#include "../../root/include/vectmath.h"
#include "../../root/include/node.h"

template <class T>
std::vector<std::vector<T> > get_random_matrix(const int& height, const int& width, T t){
std::vector<std::vector<T> > mat(height, std::vector<T>(width));
for(auto& v : mat){
for(auto& e : v){
e = rand()/(double)RAND_MAX;
}
}
return mat;
}

int main(int argc, char const *argv[]) {
srand(time(0));

int size = 30;
std::vector<std::vector<double> > a = get_random_matrix(size, size, double());
std::vector<std::vector<double> > b = get_random_matrix(size, size, double());
std::vector<std::vector<Node> > c = get_random_matrix(size, size, Node());
std::vector<std::vector<Node> > d = get_random_matrix(size, size, Node());

std::cout << std::fixed;
std::cout << std::setprecision(10);

std::cout << "Running with double...\t";
std::cout.flush();
auto start = std::chrono::high_resolution_clock::now();
std::vector<std::vector<double> > ab = dot(a, b);
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl;

std::cout << "Running with Node...\t";
std::cout.flush();
start = std::chrono::high_resolution_clock::now();
std::vector<std::vector<Node> > cd = dot(c, d);
finish = std::chrono::high_resolution_clock::now();
elapsed = finish - start;
std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl;

std::cout << "Yet to be improved..." << std::endl;
return 0;
}

+ 22
- 0
requitement/root/Makefile View File

@@ -0,0 +1,22 @@
CXX = g++
ODIR = obj
CXXFLAGS = -std=c++11 -O3
OBJS = $(ODIR)/graph.o $(ODIR)/node.o

all : $(ODIR) $(OBJS)

$(ODIR)/graph.o : src/graph.cpp include/graph.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR)/node.o : src/node.cpp include/node.h
$(CXX) -c $< -o $@ $(CXXFLAGS)

$(ODIR) :
if [ ! -d $(ODIR) ]; then mkdir $(ODIR); fi

clean :
if [ -d $(ODIR) ]; then rm $(ODIR) -r; fi

.PHONY : all
.PHONY : clean


+ 16
- 0
requitement/root/include/dor.h View File

@@ -0,0 +1,16 @@
#ifndef DYADIC_OPERATION_RESULT
#define DYADIC_OPERATION_RESULT

struct DyadicOperationResult {
double value;
double left_grad;
double right_grad;

DyadicOperationResult(double value, double left_grad, double right_grad){
this->value = value;
this->left_grad = left_grad;
this->right_grad = right_grad;
}
};

#endif /* end of include guard: DYADIC_OPERATION_RESULT */

+ 26
- 0
requitement/root/include/graph.h View File

@@ -0,0 +1,26 @@
#ifndef GRAPH_H
#define GRAPH_H

#include <map>
#include <vector>
#include <utility>

class Graph {
private:
std::map<long int, std::vector<std::pair<double, long int> > > nodes;
static Graph* instance;
Graph();

public:
static long int uid_counter;
static long int uid();
static Graph* getInstance();

void connect(const long int& uid, const std::pair<double, long int>& edge);
std::vector<std::pair<double, long int> > get(const long int& uid) const;
bool has(const long int& uid) const;

void new_recording();
};

#endif /* end of include guard: GRAPH_H */

+ 14
- 0
requitement/root/include/mor.h View File

@@ -0,0 +1,14 @@
#ifndef MONADIC_OPERATION_RESULT
#define MONADIC_OPERATION_RESULT

struct MonadicOperationResult {
double value;
double grad;

MonadicOperationResult(double value, double grad){
this->value = value;
this->grad = grad;
}
};

#endif /* end of include guard: MONADIC_OPERATION_RESULT */

+ 72
- 0
requitement/root/include/node.h View File

@@ -0,0 +1,72 @@
#ifndef NODE_H
#define NODE_H

#include <cmath>
#include <iostream>

#include "graph.h"
#include "mor.h"
#include "dor.h"
#include "por.h"

class Node {
private:
double value;
long int uid;

double gradient_recursive(Graph* graph, const long int& current_uid, const long int& stop_uid) const;

public:
Node(const double& value=0);
Node(const Node& node);

static Node monadic_operation(const Node& n, MonadicOperationResult (*)(const double&));
static Node dyadic_operation(const Node& l, const Node& r, DyadicOperationResult (*)(const double&, const double&));
static Node polyadic_operation(const std::vector<Node>& nodes, PolyadicOperationResult (*)(const std::vector<double>&));

double gradient(const Node& node) const;
std::vector<double> gradient(const std::vector<Node>& nodes) const;
std::vector<std::vector<double> > gradient(const std::vector<std::vector<Node> >& nodes) const;

friend Node operator+(const Node& l, const Node& r);
friend Node operator-(const Node& l, const Node& r);
friend Node operator*(const Node& l, const Node& r);
friend Node operator/(const Node& l, const Node& r);

Node& operator+=(const Node& r);
Node& operator-=(const Node& r);
Node& operator*=(const Node& r);
Node& operator/=(const Node& r);

friend bool operator==(const Node& l, const Node& r);
friend bool operator<(const Node& l, const Node& r);
friend bool operator>(const Node& l, const Node& r);
friend bool operator<=(const Node& l, const Node& r);
friend bool operator>=(const Node& l, const Node& r);

friend Node sin(const Node& x);
friend Node cos(const Node& x);
friend Node tan(const Node& x);
friend Node sinh(const Node& x);
friend Node cosh(const Node& x);
friend Node tanh(const Node& x);
friend Node asin(const Node& x);
friend Node acos(const Node& x);
friend Node atan(const Node& x);

friend Node log(const Node& x, const Node& base);
friend Node log10(const Node& x);
friend Node ln(const Node& x);

friend Node pow(const Node& x, const Node& p);
friend Node exp(const Node& x);
friend Node sqrt(const Node& x);

friend Node abs(const Node& x);
friend Node min(const Node& l, const Node& r);
friend Node max(const Node& l, const Node& r);

friend std::ostream& operator<<(std::ostream& os, const Node& node);
};

#endif /* end of include guard: NODE_H */

+ 16
- 0
requitement/root/include/por.h View File

@@ -0,0 +1,16 @@
#ifndef POLYADIC_OPERATION_RESULT
#define POLYADIC_OPERATION_RESULT

#include <vector>

struct PolyadicOperationResult {
double value;
std::vector<double> gradients;

PolyadicOperationResult(double value, const std::vector<double>& gradients){
this->value = value;
this->gradients = gradients;
}
};

#endif /* end of include guard: POLYADIC_OPERATION_RESULT */

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save