You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

chunked.go 4.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. package ops2
  2. import (
  3. "fmt"
  4. "io"
  5. "github.com/samber/lo"
  6. "gitlink.org.cn/cloudream/common/pkgs/future"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  8. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  9. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/utils"
  10. "gitlink.org.cn/cloudream/common/utils/io2"
  11. "golang.org/x/sync/semaphore"
  12. )
  13. func init() {
  14. exec.UseOp[*ChunkedSplit]()
  15. exec.UseOp[*ChunkedJoin]()
  16. }
  17. type ChunkedSplit struct {
  18. Input *exec.StreamVar `json:"input"`
  19. Outputs []*exec.StreamVar `json:"outputs"`
  20. ChunkSize int `json:"chunkSize"`
  21. PaddingZeros bool `json:"paddingZeros"`
  22. }
  23. func (o *ChunkedSplit) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  24. err := e.BindVars(ctx.Context, o.Input)
  25. if err != nil {
  26. return err
  27. }
  28. defer o.Input.Stream.Close()
  29. outputs := io2.ChunkedSplit(o.Input.Stream, o.ChunkSize, len(o.Outputs), io2.ChunkedSplitOption{
  30. PaddingZeros: o.PaddingZeros,
  31. })
  32. sem := semaphore.NewWeighted(int64(len(outputs)))
  33. for i := range outputs {
  34. sem.Acquire(ctx.Context, 1)
  35. o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
  36. sem.Release(1)
  37. })
  38. }
  39. exec.PutArrayVars(e, o.Outputs)
  40. return sem.Acquire(ctx.Context, int64(len(outputs)))
  41. }
  42. func (o *ChunkedSplit) String() string {
  43. return fmt.Sprintf(
  44. "ChunkedSplit(chunkSize=%v, paddingZeros=%v), %v -> (%v)",
  45. o.ChunkSize,
  46. o.PaddingZeros,
  47. o.Input.ID,
  48. utils.FormatVarIDs(o.Outputs),
  49. )
  50. }
  51. type ChunkedJoin struct {
  52. Inputs []*exec.StreamVar `json:"inputs"`
  53. Output *exec.StreamVar `json:"output"`
  54. ChunkSize int `json:"chunkSize"`
  55. }
  56. func (o *ChunkedJoin) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  57. err := exec.BindArrayVars(e, ctx.Context, o.Inputs)
  58. if err != nil {
  59. return err
  60. }
  61. var strReaders []io.Reader
  62. for _, s := range o.Inputs {
  63. strReaders = append(strReaders, s.Stream)
  64. }
  65. defer func() {
  66. for _, str := range o.Inputs {
  67. str.Stream.Close()
  68. }
  69. }()
  70. fut := future.NewSetVoid()
  71. o.Output.Stream = io2.AfterReadClosedOnce(io2.BufferedChunkedJoin(strReaders, o.ChunkSize), func(closer io.ReadCloser) {
  72. fut.SetVoid()
  73. })
  74. e.PutVars(o.Output)
  75. return fut.Wait(ctx.Context)
  76. }
  77. func (o *ChunkedJoin) String() string {
  78. return fmt.Sprintf(
  79. "ChunkedJoin(chunkSize=%v), (%v) -> %v",
  80. o.ChunkSize,
  81. utils.FormatVarIDs(o.Inputs),
  82. o.Output.ID,
  83. )
  84. }
  85. type ChunkedSplitNode struct {
  86. dag.NodeBase
  87. ChunkSize int
  88. }
  89. func (b *GraphNodeBuilder) NewChunkedSplit(chunkSize int) *ChunkedSplitNode {
  90. node := &ChunkedSplitNode{
  91. ChunkSize: chunkSize,
  92. }
  93. b.AddNode(node)
  94. return node
  95. }
  96. func (t *ChunkedSplitNode) Split(input *dag.StreamVar, cnt int) {
  97. t.InputStreams().EnsureSize(1)
  98. input.Connect(t, 0)
  99. t.OutputStreams().Resize(cnt)
  100. for i := 0; i < cnt; i++ {
  101. t.OutputStreams().Setup(t, t.Graph().NewStreamVar(), i)
  102. }
  103. }
  104. func (t *ChunkedSplitNode) SubStream(idx int) *dag.StreamVar {
  105. return t.OutputStreams().Get(idx)
  106. }
  107. func (t *ChunkedSplitNode) SplitCount() int {
  108. return t.OutputStreams().Len()
  109. }
  110. func (t *ChunkedSplitNode) GenerateOp() (exec.Op, error) {
  111. return &ChunkedSplit{
  112. Input: t.InputStreams().Get(0).Var,
  113. Outputs: lo.Map(t.OutputStreams().RawArray(), func(v *dag.StreamVar, idx int) *exec.StreamVar {
  114. return v.Var
  115. }),
  116. ChunkSize: t.ChunkSize,
  117. PaddingZeros: true,
  118. }, nil
  119. }
  120. // func (t *ChunkedSplitNode) String() string {
  121. // return fmt.Sprintf("ChunkedSplit[%v]%v%v", t.ChunkSize, formatStreamIO(node), formatValueIO(node))
  122. // }
  123. type ChunkedJoinNode struct {
  124. dag.NodeBase
  125. ChunkSize int
  126. }
  127. func (b *GraphNodeBuilder) NewChunkedJoin(chunkSize int) *ChunkedJoinNode {
  128. node := &ChunkedJoinNode{
  129. ChunkSize: chunkSize,
  130. }
  131. b.AddNode(node)
  132. node.OutputStreams().SetupNew(node, b.Graph.NewStreamVar())
  133. return node
  134. }
  135. func (t *ChunkedJoinNode) AddInput(str *dag.StreamVar) {
  136. idx := t.InputStreams().EnlargeOne()
  137. str.Connect(t, idx)
  138. }
  139. func (t *ChunkedJoinNode) Joined() *dag.StreamVar {
  140. return t.OutputStreams().Get(0)
  141. }
  142. func (t *ChunkedJoinNode) GenerateOp() (exec.Op, error) {
  143. return &ChunkedJoin{
  144. Inputs: lo.Map(t.InputStreams().RawArray(), func(v *dag.StreamVar, idx int) *exec.StreamVar {
  145. return v.Var
  146. }),
  147. Output: t.OutputStreams().Get(0).Var,
  148. ChunkSize: t.ChunkSize,
  149. }, nil
  150. }
  151. // func (t *ChunkedJoinType) String() string {
  152. // return fmt.Sprintf("ChunkedJoin[%v]%v%v", t.ChunkSize, formatStreamIO(node), formatValueIO(node))
  153. // }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。