You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

client_command.go 21 kB

2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. package main
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "os"
  7. "path/filepath"
  8. "sync"
  9. "gitlink.org.cn/cloudream/client/config"
  10. agentcaller "gitlink.org.cn/cloudream/proto"
  11. racli "gitlink.org.cn/cloudream/rabbitmq/client"
  12. "gitlink.org.cn/cloudream/utils"
  13. "gitlink.org.cn/cloudream/utils/consts"
  14. "gitlink.org.cn/cloudream/utils/consts/errorcode"
  15. myio "gitlink.org.cn/cloudream/utils/io"
  16. "gitlink.org.cn/cloudream/ec"
  17. "google.golang.org/grpc"
  18. _ "google.golang.org/grpc/balancer/grpclb"
  19. "google.golang.org/grpc/credentials/insecure"
  20. )
  21. func Move(bucketName string, objectName string, stgID int) error {
  22. // TODO 此处是写死的常量
  23. userId := 0
  24. // 先向协调端请求文件相关的元数据
  25. coorClient, err := racli.NewCoordinatorClient()
  26. if err != nil {
  27. return fmt.Errorf("create coordinator client failed, err: %w", err)
  28. }
  29. defer coorClient.Close()
  30. moveResp, err := coorClient.Move(bucketName, objectName, userId, stgID)
  31. if err != nil {
  32. return fmt.Errorf("request to coordinator failed, err: %w", err)
  33. }
  34. if moveResp.ErrorCode != errorcode.OK {
  35. return fmt.Errorf("coordinator operation failed, code: %s, message: %s", moveResp.ErrorCode, moveResp.Message)
  36. }
  37. // 然后向代理端发送移动文件的请求
  38. agentClient, err := racli.NewAgentClient(moveResp.NodeID)
  39. if err != nil {
  40. return fmt.Errorf("create agent client to %d failed, err: %w", stgID, err)
  41. }
  42. defer agentClient.Close()
  43. switch moveResp.Redundancy {
  44. case consts.REDUNDANCY_REP:
  45. agentMoveResp, err := agentClient.RepMove(moveResp.Directory, moveResp.Hashes, bucketName, objectName, userId, moveResp.FileSizeInBytes)
  46. if err != nil {
  47. return fmt.Errorf("request to agent %d failed, err: %w", stgID, err)
  48. }
  49. if agentMoveResp.ErrorCode != errorcode.OK {
  50. return fmt.Errorf("agent %d operation failed, code: %s, messsage: %s", stgID, agentMoveResp.ErrorCode, agentMoveResp.Message)
  51. }
  52. case consts.REDUNDANCY_EC:
  53. agentMoveResp, err := agentClient.ECMove(moveResp.Directory, moveResp.Hashes, moveResp.IDs, moveResp.ECName, bucketName, objectName, userId, moveResp.FileSizeInBytes)
  54. if err != nil {
  55. return fmt.Errorf("request to agent %d failed, err: %w", stgID, err)
  56. }
  57. if agentMoveResp.ErrorCode != errorcode.OK {
  58. return fmt.Errorf("agent %d operation failed, code: %s, messsage: %s", stgID, agentMoveResp.ErrorCode, agentMoveResp.Message)
  59. }
  60. }
  61. return nil
  62. }
  63. func Read(localFilePath string, bucketName string, objectName string) error {
  64. // TODO 此处是写死的常量
  65. userId := 0
  66. // 先向协调端请求文件相关的数据
  67. coorClient, err := racli.NewCoordinatorClient()
  68. if err != nil {
  69. return fmt.Errorf("create coordinator client failed, err: %w", err)
  70. }
  71. defer coorClient.Close()
  72. readResp, err := coorClient.Read(bucketName, objectName, userId)
  73. if err != nil {
  74. return fmt.Errorf("request to coordinator failed, err: %w", err)
  75. }
  76. if readResp.ErrorCode != errorcode.OK {
  77. return fmt.Errorf("coordinator operation failed, code: %s, message: %s", readResp.ErrorCode, readResp.Message)
  78. }
  79. switch readResp.Redundancy {
  80. case consts.REDUNDANCY_REP:
  81. if len(readResp.NodeIPs) == 0 {
  82. return fmt.Errorf("no node has this file")
  83. }
  84. // 随便选第一个节点下载文件
  85. err = repRead(readResp.FileSizeInBytes, readResp.NodeIPs[0], readResp.Hashes[0], localFilePath)
  86. if err != nil {
  87. return fmt.Errorf("rep read failed, err: %w", err)
  88. }
  89. case consts.REDUNDANCY_EC:
  90. // TODO EC部分的代码要考虑重构
  91. ecRead(readResp.FileSizeInBytes, readResp.NodeIPs, readResp.Hashes, readResp.BlockIDs, readResp.ECName, localFilePath)
  92. }
  93. return nil
  94. }
  95. func repRead(fileSizeInBytes int64, nodeIP string, repHash string, localFilePath string) error {
  96. // 连接grpc
  97. grpcAddr := fmt.Sprintf("%s:%d", nodeIP, config.Cfg().GRPCPort)
  98. conn, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials()))
  99. if err != nil {
  100. return fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err)
  101. }
  102. defer conn.Close()
  103. // 创建本地文件
  104. curExecPath, err := os.Executable()
  105. if err != nil {
  106. return fmt.Errorf("get executable directory failed, err: %w", err)
  107. }
  108. outputFilePath := filepath.Join(filepath.Dir(curExecPath), localFilePath)
  109. outputFileDir := filepath.Dir(outputFilePath)
  110. err = os.MkdirAll(outputFileDir, os.ModePerm)
  111. if err != nil {
  112. return fmt.Errorf("create output file directory %s failed, err: %w", outputFileDir, err)
  113. }
  114. outputFile, err := os.Create(outputFilePath)
  115. if err != nil {
  116. return fmt.Errorf("create output file %s failed, err: %w", outputFilePath, err)
  117. }
  118. defer outputFile.Close()
  119. /*
  120. TO DO: 判断本地有没有ipfs daemon、能否获取相应对象的cid
  121. 如果本地有ipfs daemon且能获取相应对象的cid,则获取对象cid对应的ipfsblock的cid,通过ipfs网络获取这些ipfsblock
  122. 否则,像目前一样,使用grpc向指定节点获取
  123. */
  124. // 下载文件
  125. client := agentcaller.NewFileTransportClient(conn)
  126. stream, err := client.GetFile(context.Background(), &agentcaller.GetReq{
  127. FileHash: repHash,
  128. })
  129. if err != nil {
  130. return fmt.Errorf("request grpc failed, err: %w", err)
  131. }
  132. defer stream.CloseSend()
  133. for {
  134. resp, err := stream.Recv()
  135. if err != nil {
  136. return fmt.Errorf("read file data on grpc stream failed, err: %w", err)
  137. }
  138. if resp.Type == agentcaller.FileDataPacketType_Data {
  139. err = myio.WriteAll(outputFile, resp.Data)
  140. // TODO 写入到文件失败,是否要考虑删除这个不完整的文件?
  141. if err != nil {
  142. return fmt.Errorf("write file data to local file failed, err: %w", err)
  143. }
  144. } else if resp.Type == agentcaller.FileDataPacketType_EOF {
  145. return nil
  146. }
  147. }
  148. }
  149. func ecRead(fileSizeInBytes int64, nodeIPs []string, blockHashs []string, blockIds []int, ecName string, localFilePath string) {
  150. //根据ecName获得以下参数
  151. wg := sync.WaitGroup{}
  152. ecPolicies := *utils.GetEcPolicy()
  153. ecPolicy := ecPolicies[ecName]
  154. fmt.Println(ecPolicy)
  155. ecK := ecPolicy.GetK()
  156. ecN := ecPolicy.GetN()
  157. var coefs = [][]int64{{1, 1, 1}, {1, 2, 3}} //2应替换为ecK,3应替换为ecN
  158. numPacket := (fileSizeInBytes + int64(ecK)*config.Cfg().GRCPPacketSize - 1) / (int64(ecK) * config.Cfg().GRCPPacketSize)
  159. fmt.Println(numPacket)
  160. //创建channel
  161. getBufs := make([]chan []byte, ecN)
  162. decodeBufs := make([]chan []byte, ecK)
  163. for i := 0; i < ecN; i++ {
  164. getBufs[i] = make(chan []byte)
  165. }
  166. for i := 0; i < ecK; i++ {
  167. decodeBufs[i] = make(chan []byte)
  168. }
  169. //从协调端获取有哪些编码块
  170. //var blockSeq = []int{0,1}
  171. blockSeq := blockIds
  172. wg.Add(1)
  173. for i := 0; i < len(blockSeq); i++ {
  174. go get(blockHashs[i], nodeIPs[i], getBufs[blockSeq[i]], numPacket)
  175. }
  176. go decode(getBufs[:], decodeBufs[:], blockSeq, ecK, coefs, numPacket)
  177. go persist(decodeBufs[:], numPacket, localFilePath, &wg)
  178. wg.Wait()
  179. }
  180. type fileSender struct {
  181. grpcCon *grpc.ClientConn
  182. stream agentcaller.FileTransport_SendFileClient
  183. nodeIP string
  184. fileHash string
  185. err error
  186. }
  187. func RepWrite(localFilePath string, bucketName string, objectName string, numRep int) error {
  188. // TODO 此处是写死的常量
  189. userId := 0
  190. //获取文件大小
  191. fileInfo, err := os.Stat(localFilePath)
  192. if err != nil {
  193. return fmt.Errorf("get file %s state failed, err: %w", localFilePath, err)
  194. }
  195. fileSizeInBytes := fileInfo.Size()
  196. coorClient, err := racli.NewCoordinatorClient()
  197. if err != nil {
  198. return fmt.Errorf("create coordinator client failed, err: %w", err)
  199. }
  200. defer coorClient.Close()
  201. //发送写请求,请求Coor分配写入节点Ip
  202. repWriteResp, err := coorClient.RepWrite(bucketName, objectName, fileSizeInBytes, numRep, userId)
  203. if err != nil {
  204. return fmt.Errorf("request to coordinator failed, err: %w", err)
  205. }
  206. if repWriteResp.ErrorCode != errorcode.OK {
  207. return fmt.Errorf("coordinator RepWrite failed, err: %w", err)
  208. }
  209. file, err := os.Open(localFilePath)
  210. if err != nil {
  211. return fmt.Errorf("open file %s failed, err: %w", localFilePath, err)
  212. }
  213. defer file.Close()
  214. /*
  215. TO DO ss: 判断本地有没有ipfs daemon、能否与目标agent的ipfs daemon连通、本地ipfs目录空间是否充足
  216. 如果本地有ipfs daemon、能与目标agent的ipfs daemon连通、本地ipfs目录空间充足,将所有内容写入本地ipfs目录,得到对象的cid,发送cid给目标agent让其pin相应的对象
  217. 否则,像目前一样,使用grpc向指定节点获取
  218. */
  219. senders := make([]fileSender, numRep)
  220. // 建立grpc连接,发送请求
  221. startSendFile(numRep, senders, repWriteResp.NodeIPs)
  222. // 向每个节点发送数据
  223. err = sendFileData(file, numRep, senders)
  224. if err != nil {
  225. return err
  226. }
  227. // 发送EOF消息,并获得FileHash
  228. sendFinish(numRep, senders)
  229. // 收集发送成功的节点以及返回的hash
  230. var sucNodeIPs []string
  231. var sucFileHashes []string
  232. for i := 0; i < numRep; i++ {
  233. sender := &senders[i]
  234. if sender.err == nil {
  235. sucNodeIPs = append(sucNodeIPs, sender.nodeIP)
  236. sucFileHashes = append(sucFileHashes, sender.fileHash)
  237. }
  238. }
  239. // 记录写入的文件的Hash
  240. // TODO 如果一个都没有写成功,那么是否要发送这个请求?
  241. writeRepHashResp, err := coorClient.WriteRepHash(bucketName, objectName, sucFileHashes, sucNodeIPs, userId)
  242. if err != nil {
  243. return fmt.Errorf("request to coordinator failed, err: %w", err)
  244. }
  245. if writeRepHashResp.ErrorCode != errorcode.OK {
  246. return fmt.Errorf("coordinator WriteRepHash failed, err: %w", err)
  247. }
  248. return nil
  249. }
  250. func startSendFile(numRep int, senders []fileSender, nodeIPs []string) {
  251. for i := 0; i < numRep; i++ {
  252. sender := &senders[i]
  253. sender.nodeIP = nodeIPs[i]
  254. grpcAddr := fmt.Sprintf("%s:%d", nodeIPs[i], config.Cfg().GRPCPort)
  255. conn, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials()))
  256. if err != nil {
  257. sender.err = fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err)
  258. continue
  259. }
  260. client := agentcaller.NewFileTransportClient(conn)
  261. stream, err := client.SendFile(context.Background())
  262. if err != nil {
  263. conn.Close()
  264. sender.err = fmt.Errorf("request to send file failed, err: %w", err)
  265. continue
  266. }
  267. sender.grpcCon = conn
  268. sender.stream = stream
  269. }
  270. }
  271. func sendFileData(file *os.File, numRep int, senders []fileSender) error {
  272. // 共用的发送数据缓冲区
  273. buf := make([]byte, 2048)
  274. for {
  275. // 读取文件数据
  276. readCnt, err := file.Read(buf)
  277. // 文件读取完毕
  278. if err == io.EOF {
  279. break
  280. }
  281. if err != nil {
  282. // 读取失败则断开所有连接
  283. for i := 0; i < numRep; i++ {
  284. sender := &senders[i]
  285. if sender.err != nil {
  286. continue
  287. }
  288. sender.stream.CloseSend()
  289. sender.grpcCon.Close()
  290. sender.err = fmt.Errorf("read file data failed, err: %w", err)
  291. }
  292. return fmt.Errorf("read file data failed, err: %w", err)
  293. }
  294. // 并行的向每个节点发送数据
  295. hasSender := false
  296. var sendWg sync.WaitGroup
  297. for i := 0; i < numRep; i++ {
  298. sender := &senders[i]
  299. // 发生了错误的跳过
  300. if sender.err != nil {
  301. continue
  302. }
  303. hasSender = true
  304. sendWg.Add(1)
  305. go func() {
  306. err := sender.stream.Send(&agentcaller.FileDataPacket{
  307. Type: agentcaller.FileDataPacketType_Data,
  308. Data: buf[:readCnt],
  309. })
  310. // 发生错误则关闭连接
  311. if err != nil {
  312. sender.stream.CloseSend()
  313. sender.grpcCon.Close()
  314. sender.err = fmt.Errorf("send file data failed, err: %w", err)
  315. }
  316. sendWg.Done()
  317. }()
  318. }
  319. // 等待向每个节点发送数据结束
  320. sendWg.Wait()
  321. // 如果所有节点都发送失败,则不要再继续读取文件数据了
  322. if !hasSender {
  323. break
  324. }
  325. }
  326. return nil
  327. }
  328. func sendFinish(numRep int, senders []fileSender) {
  329. for i := 0; i < numRep; i++ {
  330. sender := &senders[i]
  331. // 发生了错误的跳过
  332. if sender.err != nil {
  333. continue
  334. }
  335. err := sender.stream.Send(&agentcaller.FileDataPacket{
  336. Type: agentcaller.FileDataPacketType_EOF,
  337. })
  338. if err != nil {
  339. sender.stream.CloseSend()
  340. sender.grpcCon.Close()
  341. sender.err = fmt.Errorf("send file data failed, err: %w", err)
  342. continue
  343. }
  344. resp, err := sender.stream.CloseAndRecv()
  345. if err != nil {
  346. sender.err = fmt.Errorf("receive response failed, err: %w", err)
  347. sender.grpcCon.Close()
  348. continue
  349. }
  350. sender.fileHash = resp.FileHash
  351. sender.grpcCon.Close()
  352. }
  353. }
  354. func EcWrite(localFilePath string, bucketName string, objectName string, ecName string) error {
  355. fmt.Println("write " + localFilePath + " as " + bucketName + "/" + objectName)
  356. // TODO 需要参考RepWrite函数的代码逻辑,做好错误处理
  357. //获取文件大小
  358. fileInfo, err := os.Stat(localFilePath)
  359. if err != nil {
  360. return fmt.Errorf("get file %s state failed, err: %w", localFilePath, err)
  361. }
  362. fileSizeInBytes := fileInfo.Size()
  363. //调用纠删码库,获取编码参数及生成矩阵
  364. ecPolicies := *utils.GetEcPolicy()
  365. ecPolicy := ecPolicies[ecName]
  366. ipss := utils.GetAgentIps()
  367. fmt.Println(ipss)
  368. print("@!@!@!@!@!@!")
  369. //var policy utils.EcConfig
  370. //policy = ecPolicy[0]
  371. ecK := ecPolicy.GetK()
  372. ecN := ecPolicy.GetN()
  373. //const ecK int = ecPolicy.GetK()
  374. //const ecN int = ecPolicy.GetN()
  375. var coefs = [][]int64{{1, 1, 1}, {1, 2, 3}} //2应替换为ecK,3应替换为ecN
  376. //计算每个块的packet数
  377. numPacket := (fileSizeInBytes + int64(ecK)*config.Cfg().GRCPPacketSize - 1) / (int64(ecK) * config.Cfg().GRCPPacketSize)
  378. fmt.Println(numPacket)
  379. userId := 0
  380. coorClient, err := racli.NewCoordinatorClient()
  381. if err != nil {
  382. return fmt.Errorf("create coordinator client failed, err: %w", err)
  383. }
  384. defer coorClient.Close()
  385. //发送写请求,请求Coor分配写入节点Ip
  386. ecWriteResp, err := coorClient.ECWrite(bucketName, objectName, fileSizeInBytes, ecName, userId)
  387. if err != nil {
  388. return fmt.Errorf("request to coordinator failed, err: %w", err)
  389. }
  390. if ecWriteResp.ErrorCode != errorcode.OK {
  391. return fmt.Errorf("coordinator ECWrite failed, err: %w", err)
  392. }
  393. //创建channel
  394. loadBufs := make([]chan []byte, ecN)
  395. encodeBufs := make([]chan []byte, ecN)
  396. for i := 0; i < ecN; i++ {
  397. loadBufs[i] = make(chan []byte)
  398. }
  399. for i := 0; i < ecN; i++ {
  400. encodeBufs[i] = make(chan []byte)
  401. }
  402. hashs := make([]string, ecN)
  403. //正式开始写入
  404. go load(localFilePath, loadBufs[:ecN], ecK, numPacket*int64(ecK), fileSizeInBytes) //从本地文件系统加载数据
  405. go encode(loadBufs[:ecN], encodeBufs[:ecN], ecK, coefs, numPacket)
  406. var wg sync.WaitGroup
  407. wg.Add(ecN)
  408. for i := 0; i < ecN; i++ {
  409. go send(ecWriteResp.NodeIPs[i], encodeBufs[i], numPacket, &wg, hashs, i)
  410. }
  411. wg.Wait()
  412. //第二轮通讯:插入元数据hashs
  413. writeRepHashResp, err := coorClient.WriteECHash(bucketName, objectName, hashs, ecWriteResp.NodeIPs, userId)
  414. if err != nil {
  415. return fmt.Errorf("request to coordinator failed, err: %w", err)
  416. }
  417. if writeRepHashResp.ErrorCode != errorcode.OK {
  418. return fmt.Errorf("coordinator WriteECHash failed, err: %w", err)
  419. }
  420. return nil
  421. }
  422. func load(localFilePath string, loadBufs []chan []byte, ecK int, totalNumPacket int64, fileSizeInBytes int64) {
  423. fmt.Println("load " + localFilePath)
  424. file, _ := os.Open(localFilePath)
  425. for i := 0; int64(i) < totalNumPacket; i++ {
  426. print(totalNumPacket)
  427. buf := make([]byte, config.Cfg().GRCPPacketSize)
  428. idx := i % ecK
  429. print(len(loadBufs))
  430. _, err := file.Read(buf)
  431. loadBufs[idx] <- buf
  432. if idx == ecK-1 {
  433. print("***")
  434. for j := ecK; j < len(loadBufs); j++ {
  435. print(j)
  436. zeroPkt := make([]byte, config.Cfg().GRCPPacketSize)
  437. fmt.Printf("%v", zeroPkt)
  438. loadBufs[j] <- zeroPkt
  439. }
  440. }
  441. if err != nil && err != io.EOF {
  442. break
  443. }
  444. }
  445. fmt.Println("load over")
  446. for i := 0; i < len(loadBufs); i++ {
  447. print(i)
  448. close(loadBufs[i])
  449. }
  450. file.Close()
  451. }
  452. func encode(inBufs []chan []byte, outBufs []chan []byte, ecK int, coefs [][]int64, numPacket int64) {
  453. fmt.Println("encode ")
  454. var tmpIn [][]byte
  455. tmpIn = make([][]byte, len(outBufs))
  456. enc := ec.NewRsEnc(ecK, len(outBufs))
  457. for i := 0; int64(i) < numPacket; i++ {
  458. for j := 0; j < len(outBufs); j++ { //3
  459. tmpIn[j] = <-inBufs[j]
  460. //print(i)
  461. //fmt.Printf("%v",tmpIn[j])
  462. //print("@#$")
  463. }
  464. enc.Encode(tmpIn)
  465. fmt.Printf("%v", tmpIn)
  466. print("$$$$$$$$$$$$$$$$$$")
  467. for j := 0; j < len(outBufs); j++ { //1,2,3//示意,需要调用纠删码编解码引擎: tmp[k] = tmp[k]+(tmpIn[w][k]*coefs[w][j])
  468. outBufs[j] <- tmpIn[j]
  469. }
  470. }
  471. fmt.Println("encode over")
  472. for i := 0; i < len(outBufs); i++ {
  473. close(outBufs[i])
  474. }
  475. }
  476. func decode(inBufs []chan []byte, outBufs []chan []byte, blockSeq []int, ecK int, coefs [][]int64, numPacket int64) {
  477. fmt.Println("decode ")
  478. var tmpIn [][]byte
  479. var zeroPkt []byte
  480. tmpIn = make([][]byte, len(inBufs))
  481. hasBlock := map[int]bool{}
  482. for j := 0; j < len(blockSeq); j++ {
  483. hasBlock[blockSeq[j]] = true
  484. }
  485. needRepair := false //检测是否传入了所有数据块
  486. for j := 0; j < len(outBufs); j++ {
  487. if blockSeq[j] != j {
  488. needRepair = true
  489. }
  490. }
  491. enc := ec.NewRsEnc(ecK, len(inBufs))
  492. for i := 0; int64(i) < numPacket; i++ {
  493. for j := 0; j < len(inBufs); j++ { //3
  494. if hasBlock[j] {
  495. tmpIn[j] = <-inBufs[j]
  496. } else {
  497. tmpIn[j] = zeroPkt
  498. }
  499. }
  500. fmt.Printf("%v", tmpIn)
  501. if needRepair {
  502. err := enc.Repair(tmpIn)
  503. print("&&&&&")
  504. if err != nil {
  505. fmt.Fprintf(os.Stderr, "Decode Repair Error: %s", err.Error())
  506. }
  507. }
  508. //fmt.Printf("%v",tmpIn)
  509. for j := 0; j < len(outBufs); j++ { //1,2,3//示意,需要调用纠删码编解码引擎: tmp[k] = tmp[k]+(tmpIn[w][k]*coefs[w][j])
  510. outBufs[j] <- tmpIn[j]
  511. }
  512. }
  513. fmt.Println("decode over")
  514. for i := 0; i < len(outBufs); i++ {
  515. close(outBufs[i])
  516. }
  517. }
  518. func send(ip string, inBuf chan []byte, numPacket int64, wg *sync.WaitGroup, hashs []string, idx int) error {
  519. /*
  520. TO DO ss: 判断本地有没有ipfs daemon、能否与目标agent的ipfs daemon连通、本地ipfs目录空间是否充足
  521. 如果本地有ipfs daemon、能与目标agent的ipfs daemon连通、本地ipfs目录空间充足,将所有内容写入本地ipfs目录,得到对象的cid,发送cid给目标agent让其pin相应的对象
  522. 否则,像目前一样,使用grpc向指定节点获取
  523. */
  524. // TODO 如果发生错误,需要考虑将错误传递出去
  525. defer wg.Done()
  526. grpcAddr := fmt.Sprintf("%s:%d", ip, config.Cfg().GRPCPort)
  527. conn, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials()))
  528. if err != nil {
  529. return fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err)
  530. }
  531. defer conn.Close()
  532. client := agentcaller.NewFileTransportClient(conn)
  533. stream, err := client.SendFile(context.Background())
  534. if err != nil {
  535. return fmt.Errorf("request to send file failed, err: %w", err)
  536. }
  537. for i := 0; int64(i) < numPacket; i++ {
  538. buf := <-inBuf
  539. err := stream.Send(&agentcaller.FileDataPacket{
  540. Code: agentcaller.FileDataPacket_OK,
  541. Data: buf,
  542. })
  543. if err != nil {
  544. stream.CloseSend()
  545. return fmt.Errorf("send file data failed, err: %w", err)
  546. }
  547. }
  548. err = stream.Send(&agentcaller.FileDataPacket{
  549. Code: agentcaller.FileDataPacket_EOF,
  550. })
  551. if err != nil {
  552. stream.CloseSend()
  553. return fmt.Errorf("send file data failed, err: %w", err)
  554. }
  555. resp, err := stream.CloseAndRecv()
  556. if err != nil {
  557. return fmt.Errorf("receive response failed, err: %w", err)
  558. }
  559. hashs[idx] = resp.FileHash
  560. return nil
  561. }
  562. func get(blockHash string, nodeIP string, getBuf chan []byte, numPacket int64) error {
  563. grpcAddr := fmt.Sprintf("%s:%d", nodeIP, config.Cfg().GRPCPort)
  564. conn, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials()))
  565. if err != nil {
  566. return fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err)
  567. }
  568. defer conn.Close()
  569. /*
  570. TO DO: 判断本地有没有ipfs daemon、能否获取相应对象的cid
  571. 如果本地有ipfs daemon且能获取相应编码块的cid,则获取编码块cid对应的ipfsblock的cid,通过ipfs网络获取这些ipfsblock
  572. 否则,像目前一样,使用grpc向指定节点获取
  573. */
  574. client := agentcaller.NewFileTransportClient(conn)
  575. //rpc get
  576. // TODO 要考虑读取失败后,如何中断后续解码过程
  577. stream, err := client.GetFile(context.Background(), &agentcaller.GetReq{
  578. FileHash: blockHash,
  579. })
  580. for i := 0; int64(i) < numPacket; i++ {
  581. fmt.Println(i)
  582. // TODO 同上
  583. res, _ := stream.Recv()
  584. fmt.Println(res.BlockOrReplicaData)
  585. getBuf <- res.BlockOrReplicaData
  586. }
  587. close(getBuf)
  588. return nil
  589. }
  590. func persist(inBuf []chan []byte, numPacket int64, localFilePath string, wg *sync.WaitGroup) {
  591. fDir, err := os.Executable()
  592. if err != nil {
  593. panic(err)
  594. }
  595. fURL := filepath.Join(filepath.Dir(fDir), "assets")
  596. _, err = os.Stat(fURL)
  597. if os.IsNotExist(err) {
  598. os.MkdirAll(fURL, os.ModePerm)
  599. }
  600. file, err := os.Create(filepath.Join(fURL, localFilePath))
  601. if err != nil {
  602. return
  603. }
  604. for i := 0; int64(i) < numPacket; i++ {
  605. for j := 0; j < len(inBuf); j++ {
  606. tmp := <-inBuf[j]
  607. fmt.Println(tmp)
  608. file.Write(tmp)
  609. }
  610. }
  611. file.Close()
  612. wg.Done()
  613. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。