You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ec.go 4.7 kB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. package ops2
  2. import (
  3. "fmt"
  4. "io"
  5. "gitlink.org.cn/cloudream/common/pkgs/future"
  6. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  8. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/utils"
  9. cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
  10. "gitlink.org.cn/cloudream/common/utils/io2"
  11. "gitlink.org.cn/cloudream/common/utils/math2"
  12. "gitlink.org.cn/cloudream/common/utils/sync2"
  13. "gitlink.org.cn/cloudream/storage/common/pkgs/ec"
  14. )
  15. func init() {
  16. exec.UseOp[*ECMultiply]()
  17. }
  18. type ECMultiply struct {
  19. Coef [][]byte `json:"coef"`
  20. Inputs []exec.VarID `json:"inputs"`
  21. Outputs []exec.VarID `json:"outputs"`
  22. ChunkSize int `json:"chunkSize"`
  23. }
  24. func (o *ECMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  25. inputs, err := exec.BindArray[*exec.StreamValue](e, ctx.Context, o.Inputs)
  26. if err != nil {
  27. return err
  28. }
  29. defer func() {
  30. for _, s := range inputs {
  31. s.Stream.Close()
  32. }
  33. }()
  34. outputWrs := make([]*io.PipeWriter, len(o.Outputs))
  35. outputVars := make([]*exec.StreamValue, len(o.Outputs))
  36. for i := range o.Outputs {
  37. rd, wr := io.Pipe()
  38. outputVars[i] = &exec.StreamValue{Stream: rd}
  39. outputWrs[i] = wr
  40. }
  41. inputChunks := make([][]byte, len(o.Inputs))
  42. for i := range o.Inputs {
  43. inputChunks[i] = make([]byte, math2.Min(o.ChunkSize, 64*1024))
  44. }
  45. // 输出用两个缓冲轮换
  46. outputBufPool := sync2.NewBucketPool[[][]byte]()
  47. for i := 0; i < 2; i++ {
  48. outputChunks := make([][]byte, len(o.Outputs))
  49. for i := range o.Outputs {
  50. outputChunks[i] = make([]byte, math2.Min(o.ChunkSize, 64*1024))
  51. }
  52. outputBufPool.PutEmpty(outputChunks)
  53. }
  54. fut := future.NewSetVoid()
  55. go func() {
  56. mul := ec.GaloisMultiplier().BuildGalois()
  57. defer outputBufPool.WakeUpAll()
  58. readLens := math2.SplitLessThan(o.ChunkSize, 64*1024)
  59. readLenIdx := 0
  60. for {
  61. curReadLen := readLens[readLenIdx]
  62. for i := range inputChunks {
  63. inputChunks[i] = inputChunks[i][:curReadLen]
  64. }
  65. err := sync2.ParallelDo(inputs, func(s *exec.StreamValue, i int) error {
  66. _, err := io.ReadFull(s.Stream, inputChunks[i])
  67. return err
  68. })
  69. if err == io.EOF {
  70. fut.SetVoid()
  71. return
  72. }
  73. if err != nil {
  74. fut.SetError(err)
  75. return
  76. }
  77. outputBuf, ok := outputBufPool.GetEmpty()
  78. if !ok {
  79. return
  80. }
  81. for i := range outputBuf {
  82. outputBuf[i] = outputBuf[i][:curReadLen]
  83. }
  84. err = mul.Multiply(o.Coef, inputChunks, outputBuf)
  85. if err != nil {
  86. fut.SetError(err)
  87. return
  88. }
  89. outputBufPool.PutFilled(outputBuf)
  90. readLenIdx = (readLenIdx + 1) % len(readLens)
  91. }
  92. }()
  93. go func() {
  94. defer outputBufPool.WakeUpAll()
  95. for {
  96. outputChunks, ok := outputBufPool.GetFilled()
  97. if !ok {
  98. return
  99. }
  100. for i := range o.Outputs {
  101. err := io2.WriteAll(outputWrs[i], outputChunks[i])
  102. if err != nil {
  103. fut.SetError(err)
  104. return
  105. }
  106. }
  107. outputBufPool.PutEmpty(outputChunks)
  108. }
  109. }()
  110. exec.PutArray(e, o.Outputs, outputVars)
  111. err = fut.Wait(ctx.Context)
  112. if err != nil {
  113. for _, wr := range outputWrs {
  114. wr.CloseWithError(err)
  115. }
  116. return err
  117. }
  118. for _, wr := range outputWrs {
  119. wr.Close()
  120. }
  121. return nil
  122. }
  123. func (o *ECMultiply) String() string {
  124. return fmt.Sprintf(
  125. "ECMultiply(coef=%v) (%v) -> (%v)",
  126. o.Coef,
  127. utils.FormatVarIDs(o.Inputs),
  128. utils.FormatVarIDs(o.Outputs),
  129. )
  130. }
  131. type ECMultiplyNode struct {
  132. dag.NodeBase
  133. EC cdssdk.ECRedundancy
  134. InputIndexes []int
  135. OutputIndexes []int
  136. }
  137. func (b *GraphNodeBuilder) NewECMultiply(ec cdssdk.ECRedundancy) *ECMultiplyNode {
  138. node := &ECMultiplyNode{
  139. EC: ec,
  140. }
  141. b.AddNode(node)
  142. return node
  143. }
  144. func (t *ECMultiplyNode) AddInput(str *dag.StreamVar, dataIndex int) {
  145. t.InputIndexes = append(t.InputIndexes, dataIndex)
  146. idx := t.InputStreams().EnlargeOne()
  147. str.To(t, idx)
  148. }
  149. func (t *ECMultiplyNode) RemoveAllInputs() {
  150. t.InputStreams().ClearAllInput(t)
  151. t.InputStreams().Slots.Resize(0)
  152. t.InputIndexes = nil
  153. }
  154. func (t *ECMultiplyNode) NewOutput(dataIndex int) *dag.StreamVar {
  155. t.OutputIndexes = append(t.OutputIndexes, dataIndex)
  156. return t.OutputStreams().AppendNew(t).Var()
  157. }
  158. func (t *ECMultiplyNode) GenerateOp() (exec.Op, error) {
  159. rs, err := ec.NewRs(t.EC.K, t.EC.N)
  160. if err != nil {
  161. return nil, err
  162. }
  163. coef, err := rs.GenerateMatrix(t.InputIndexes, t.OutputIndexes)
  164. if err != nil {
  165. return nil, err
  166. }
  167. return &ECMultiply{
  168. Coef: coef,
  169. Inputs: t.InputStreams().GetVarIDs(),
  170. Outputs: t.OutputStreams().GetVarIDs(),
  171. ChunkSize: t.EC.ChunkSize,
  172. }, nil
  173. }
  174. // func (t *MultiplyType) String() string {
  175. // return fmt.Sprintf("Multiply[]%v%v", formatStreamIO(node), formatValueIO(node))
  176. // }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。