You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

block.go 4.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. package ec
  2. import (
  3. "errors"
  4. "io"
  5. "io/ioutil"
  6. "gitlink.org.cn/cloudream/common/pkgs/ipfs"
  7. "gitlink.org.cn/cloudream/common/pkgs/logger"
  8. stgglb "gitlink.org.cn/cloudream/storage/common/globals"
  9. )
  10. type BlockReader struct {
  11. ipfsCli *ipfs.PoolClient
  12. /*将文件分块相关的属性*/
  13. //fileHash
  14. fileHash string
  15. //fileSize
  16. fileSize int64
  17. //ecK将文件的分块数
  18. ecK int
  19. //chunkSize
  20. chunkSize int64
  21. /*可选项*/
  22. //fastRead,true的时候直接通过hash读block
  23. jumpReadOpt bool
  24. }
  25. func NewBlockReader() (*BlockReader, error) {
  26. ipfsClient, err := stgglb.IPFSPool.Acquire()
  27. if err != nil {
  28. return nil, err
  29. }
  30. //default:fast模式,通过hash直接获取
  31. return &BlockReader{ipfsCli: ipfsClient, chunkSize: 256 * 1024, jumpReadOpt: false}, nil
  32. }
  33. func (r *BlockReader) Close() {
  34. r.ipfsCli.Close()
  35. }
  36. func (r *BlockReader) SetJumpRead(fileHash string, fileSize int64, ecK int) {
  37. r.fileHash = fileHash
  38. r.fileSize = fileSize
  39. r.ecK = ecK
  40. r.jumpReadOpt = true
  41. }
  42. func (r *BlockReader) SetchunkSize(size int64) {
  43. r.chunkSize = size
  44. }
  45. func (r *BlockReader) FetchBLock(blockHash string) (io.ReadCloser, error) {
  46. return r.ipfsCli.OpenRead(blockHash)
  47. }
  48. func (r *BlockReader) FetchBLocks(blockHashs []string) ([]io.ReadCloser, error) {
  49. readers := make([]io.ReadCloser, len(blockHashs))
  50. for i, hash := range blockHashs {
  51. var err error
  52. readers[i], err = r.ipfsCli.OpenRead(hash)
  53. if err != nil {
  54. return nil, err
  55. }
  56. }
  57. return readers, nil
  58. }
  59. func (r *BlockReader) JumpFetchBlock(innerID int) (io.ReadCloser, error) {
  60. if !r.jumpReadOpt {
  61. return nil, nil
  62. }
  63. pipeReader, pipeWriter := io.Pipe()
  64. go func() {
  65. for i := int64(r.chunkSize * int64(innerID)); i < r.fileSize; i += int64(r.ecK) * r.chunkSize {
  66. reader, err := r.ipfsCli.OpenRead(r.fileHash, ipfs.ReadOption{Offset: i, Length: r.chunkSize})
  67. if err != nil {
  68. pipeWriter.CloseWithError(err)
  69. return
  70. }
  71. data, err := ioutil.ReadAll(reader)
  72. if err != nil {
  73. pipeWriter.CloseWithError(err)
  74. return
  75. }
  76. reader.Close()
  77. _, err = pipeWriter.Write(data)
  78. if err != nil {
  79. pipeWriter.CloseWithError(err)
  80. return
  81. }
  82. }
  83. //如果文件大小不是分块的整数倍,可能需要补0
  84. if r.fileSize%(r.chunkSize*int64(r.ecK)) != 0 {
  85. //pktNum_1:chunkNum-1
  86. pktNum_1 := r.fileSize / (r.chunkSize * int64(r.ecK))
  87. offset := (r.fileSize - int64(pktNum_1)*int64(r.ecK)*r.chunkSize)
  88. count0 := int64(innerID)*int64(r.ecK)*r.chunkSize - offset
  89. if count0 > 0 {
  90. add0 := make([]byte, count0)
  91. pipeWriter.Write(add0)
  92. }
  93. }
  94. pipeWriter.Close()
  95. }()
  96. return pipeReader, nil
  97. }
  98. // FetchBlock1这个函数废弃了
  99. func (r *BlockReader) FetchBlock1(input interface{}, errMsg chan error) (io.ReadCloser, error) {
  100. /*两种模式下传入第一个参数,但是input的类型不同:
  101. jumpReadOpt-》true:传入blcokHash, string型,通过哈希直接读
  102. jumpReadOpt->false: 传入innerID,int型,选择需要获取的数据块的id
  103. */
  104. var innerID int
  105. var blockHash string
  106. switch input.(type) {
  107. case int:
  108. // 执行针对整数的逻辑分支
  109. if r.jumpReadOpt {
  110. return nil, errors.New("conflict, wrong input type and jumpReadOpt:true")
  111. } else {
  112. innerID = input.(int)
  113. }
  114. case string:
  115. if !r.jumpReadOpt {
  116. return nil, errors.New("conflict, wrong input type and jumpReadOpt:false")
  117. } else {
  118. blockHash = input.(string)
  119. }
  120. default:
  121. return nil, errors.New("wrong input type")
  122. }
  123. //开始执行
  124. if r.jumpReadOpt { //快速读
  125. ipfsCli, err := stgglb.IPFSPool.Acquire()
  126. if err != nil {
  127. logger.Warnf("new ipfs client: %s", err.Error())
  128. return nil, err
  129. }
  130. defer ipfsCli.Close()
  131. return ipfsCli.OpenRead(blockHash)
  132. } else { //跳跃读
  133. ipfsCli, err := stgglb.IPFSPool.Acquire()
  134. if err != nil {
  135. logger.Warnf("new ipfs client: %s", err.Error())
  136. return nil, err
  137. }
  138. defer ipfsCli.Close()
  139. pipeReader, pipeWriter := io.Pipe()
  140. go func() {
  141. for i := int64(r.chunkSize * int64(innerID)); i < r.fileSize; i += int64(r.ecK) * r.chunkSize {
  142. reader, err := ipfsCli.OpenRead(r.fileHash, ipfs.ReadOption{i, r.chunkSize})
  143. if err != nil {
  144. pipeWriter.Close()
  145. errMsg <- err
  146. return
  147. }
  148. data, err := ioutil.ReadAll(reader)
  149. if err != nil {
  150. pipeWriter.Close()
  151. errMsg <- err
  152. return
  153. }
  154. reader.Close()
  155. _, err = pipeWriter.Write(data)
  156. if err != nil {
  157. pipeWriter.Close()
  158. errMsg <- err
  159. return
  160. }
  161. }
  162. //如果文件大小不是分块的整数倍,可能需要补0
  163. if r.fileSize%(r.chunkSize*int64(r.ecK)) != 0 {
  164. //pktNum_1:chunkNum-1
  165. pktNum_1 := r.fileSize / (r.chunkSize * int64(r.ecK))
  166. offset := (r.fileSize - int64(pktNum_1)*int64(r.ecK)*r.chunkSize)
  167. count0 := int64(innerID)*int64(r.ecK)*r.chunkSize - offset
  168. if count0 > 0 {
  169. add0 := make([]byte, count0)
  170. pipeWriter.Write(add0)
  171. }
  172. }
  173. pipeWriter.Close()
  174. errMsg <- nil
  175. }()
  176. return pipeReader, nil
  177. }
  178. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。