You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

chunked.go 4.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. package ops2
  2. import (
  3. "fmt"
  4. "io"
  5. "github.com/samber/lo"
  6. "gitlink.org.cn/cloudream/common/pkgs/future"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  8. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  9. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/utils"
  10. "gitlink.org.cn/cloudream/common/utils/io2"
  11. "golang.org/x/sync/semaphore"
  12. )
  13. func init() {
  14. exec.UseOp[*ChunkedSplit]()
  15. exec.UseOp[*ChunkedJoin]()
  16. }
  17. type ChunkedSplit struct {
  18. Input *exec.StreamVar `json:"input"`
  19. Outputs []*exec.StreamVar `json:"outputs"`
  20. ChunkSize int `json:"chunkSize"`
  21. PaddingZeros bool `json:"paddingZeros"`
  22. }
  23. func (o *ChunkedSplit) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  24. err := e.BindVars(ctx.Context, o.Input)
  25. if err != nil {
  26. return err
  27. }
  28. defer o.Input.Stream.Close()
  29. outputs := io2.ChunkedSplit(o.Input.Stream, o.ChunkSize, len(o.Outputs), io2.ChunkedSplitOption{
  30. PaddingZeros: o.PaddingZeros,
  31. })
  32. sem := semaphore.NewWeighted(int64(len(outputs)))
  33. for i := range outputs {
  34. sem.Acquire(ctx.Context, 1)
  35. o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
  36. sem.Release(1)
  37. })
  38. }
  39. exec.PutArrayVars(e, o.Outputs)
  40. return sem.Acquire(ctx.Context, int64(len(outputs)))
  41. }
  42. func (o *ChunkedSplit) String() string {
  43. return fmt.Sprintf(
  44. "ChunkedSplit(chunkSize=%v, paddingZeros=%v), %v -> (%v)",
  45. o.ChunkSize,
  46. o.PaddingZeros,
  47. o.Input.ID,
  48. utils.FormatVarIDs(o.Outputs),
  49. )
  50. }
  51. type ChunkedJoin struct {
  52. Inputs []*exec.StreamVar `json:"inputs"`
  53. Output *exec.StreamVar `json:"output"`
  54. ChunkSize int `json:"chunkSize"`
  55. }
  56. func (o *ChunkedJoin) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  57. err := exec.BindArrayVars(e, ctx.Context, o.Inputs)
  58. if err != nil {
  59. return err
  60. }
  61. var strReaders []io.Reader
  62. for _, s := range o.Inputs {
  63. strReaders = append(strReaders, s.Stream)
  64. }
  65. defer func() {
  66. for _, str := range o.Inputs {
  67. str.Stream.Close()
  68. }
  69. }()
  70. fut := future.NewSetVoid()
  71. o.Output.Stream = io2.AfterReadClosedOnce(io2.BufferedChunkedJoin(strReaders, o.ChunkSize), func(closer io.ReadCloser) {
  72. fut.SetVoid()
  73. })
  74. e.PutVars(o.Output)
  75. return fut.Wait(ctx.Context)
  76. }
  77. func (o *ChunkedJoin) String() string {
  78. return fmt.Sprintf(
  79. "ChunkedJoin(chunkSize=%v), (%v) -> %v",
  80. o.ChunkSize,
  81. utils.FormatVarIDs(o.Inputs),
  82. o.Output.ID,
  83. )
  84. }
  85. type ChunkedSplitNode struct {
  86. dag.NodeBase
  87. ChunkSize int
  88. }
  89. func (b *GraphNodeBuilder) NewChunkedSplit(chunkSize int) *ChunkedSplitNode {
  90. node := &ChunkedSplitNode{
  91. ChunkSize: chunkSize,
  92. }
  93. b.AddNode(node)
  94. return node
  95. }
  96. func (t *ChunkedSplitNode) Split(input *dag.StreamVar, cnt int) {
  97. t.InputStreams().EnsureSize(1)
  98. input.Connect(t, 0)
  99. t.OutputStreams().Resize(cnt)
  100. for i := 0; i < cnt; i++ {
  101. t.OutputStreams().Setup(t, t.Graph().NewStreamVar(), i)
  102. }
  103. }
  104. func (t *ChunkedSplitNode) SubStream(idx int) *dag.StreamVar {
  105. return t.OutputStreams().Get(idx)
  106. }
  107. func (t *ChunkedSplitNode) SplitCount() int {
  108. return t.OutputStreams().Len()
  109. }
  110. func (t *ChunkedSplitNode) Clear() {
  111. if t.InputStreams().Len() == 0 {
  112. return
  113. }
  114. t.InputStreams().Get(0).Disconnect(t, 0)
  115. t.InputStreams().Resize(0)
  116. for _, out := range t.OutputStreams().RawArray() {
  117. out.DisconnectAll()
  118. }
  119. t.OutputStreams().Resize(0)
  120. }
  121. func (t *ChunkedSplitNode) GenerateOp() (exec.Op, error) {
  122. return &ChunkedSplit{
  123. Input: t.InputStreams().Get(0).Var,
  124. Outputs: lo.Map(t.OutputStreams().RawArray(), func(v *dag.StreamVar, idx int) *exec.StreamVar {
  125. return v.Var
  126. }),
  127. ChunkSize: t.ChunkSize,
  128. PaddingZeros: true,
  129. }, nil
  130. }
  131. // func (t *ChunkedSplitNode) String() string {
  132. // return fmt.Sprintf("ChunkedSplit[%v]%v%v", t.ChunkSize, formatStreamIO(node), formatValueIO(node))
  133. // }
  134. type ChunkedJoinNode struct {
  135. dag.NodeBase
  136. ChunkSize int
  137. }
  138. func (b *GraphNodeBuilder) NewChunkedJoin(chunkSize int) *ChunkedJoinNode {
  139. node := &ChunkedJoinNode{
  140. ChunkSize: chunkSize,
  141. }
  142. b.AddNode(node)
  143. node.OutputStreams().SetupNew(node, b.Graph.NewStreamVar())
  144. return node
  145. }
  146. func (t *ChunkedJoinNode) AddInput(str *dag.StreamVar) {
  147. idx := t.InputStreams().EnlargeOne()
  148. str.Connect(t, idx)
  149. }
  150. func (t *ChunkedJoinNode) Joined() *dag.StreamVar {
  151. return t.OutputStreams().Get(0)
  152. }
  153. func (t *ChunkedJoinNode) RemoveAllInputs() {
  154. for i, in := range t.InputStreams().RawArray() {
  155. in.Disconnect(t, i)
  156. }
  157. t.InputStreams().Resize(0)
  158. }
  159. func (t *ChunkedJoinNode) GenerateOp() (exec.Op, error) {
  160. return &ChunkedJoin{
  161. Inputs: lo.Map(t.InputStreams().RawArray(), func(v *dag.StreamVar, idx int) *exec.StreamVar {
  162. return v.Var
  163. }),
  164. Output: t.OutputStreams().Get(0).Var,
  165. ChunkSize: t.ChunkSize,
  166. }, nil
  167. }
  168. // func (t *ChunkedJoinType) String() string {
  169. // return fmt.Sprintf("ChunkedJoin[%v]%v%v", t.ChunkSize, formatStreamIO(node), formatValueIO(node))
  170. // }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。