You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

shard_store.go 5.0 kB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. package ops2
  2. import (
  3. "fmt"
  4. "io"
  5. "gitlink.org.cn/cloudream/common/pkgs/future"
  6. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  8. "gitlink.org.cn/cloudream/common/pkgs/logger"
  9. cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
  10. "gitlink.org.cn/cloudream/common/utils/io2"
  11. "gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/pool"
  12. "gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
  13. )
  14. func init() {
  15. exec.UseOp[*ShardRead]()
  16. exec.UseOp[*ShardWrite]()
  17. exec.UseVarValue[*FileHashValue]()
  18. }
  19. type FileHashValue struct {
  20. Hash cdssdk.FileHash `json:"hash"`
  21. }
  22. func (v *FileHashValue) Clone() exec.VarValue {
  23. return &FileHashValue{Hash: v.Hash}
  24. }
  25. type ShardRead struct {
  26. Output exec.VarID `json:"output"`
  27. StorageID cdssdk.StorageID `json:"storageID"`
  28. Open types.OpenOption `json:"option"`
  29. }
  30. func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  31. logger.
  32. WithField("Open", o.Open).
  33. Debugf("reading from shard store")
  34. defer logger.Debugf("reading from shard store finished")
  35. pool, err := exec.ValueByType[*pool.ShardStorePool](ctx)
  36. if err != nil {
  37. return fmt.Errorf("getting shard store pool: %w", err)
  38. }
  39. store := pool.Get(o.StorageID)
  40. if store == nil {
  41. return fmt.Errorf("shard store %v not found", o.StorageID)
  42. }
  43. file, err := store.Open(o.Open)
  44. if err != nil {
  45. return fmt.Errorf("opening shard store file: %w", err)
  46. }
  47. fut := future.NewSetVoid()
  48. e.PutVar(o.Output, &exec.StreamValue{
  49. Stream: io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) {
  50. fut.SetVoid()
  51. }),
  52. })
  53. return fut.Wait(ctx.Context)
  54. }
  55. func (o *ShardRead) String() string {
  56. return fmt.Sprintf("ShardRead %v -> %v", o.Open, o.Output)
  57. }
  58. type ShardWrite struct {
  59. Input exec.VarID `json:"input"`
  60. FileHash exec.VarID `json:"fileHash"`
  61. StorageID cdssdk.StorageID `json:"storageID"`
  62. }
  63. func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  64. logger.
  65. WithField("Input", o.Input).
  66. WithField("FileHash", o.FileHash).
  67. Debugf("writting file to shard store")
  68. defer logger.Debugf("write to shard store finished")
  69. pool, err := exec.ValueByType[*pool.ShardStorePool](ctx)
  70. if err != nil {
  71. return fmt.Errorf("getting shard store pool: %w", err)
  72. }
  73. store := pool.Get(o.StorageID)
  74. if store == nil {
  75. return fmt.Errorf("shard store %v not found", o.StorageID)
  76. }
  77. input, err := exec.BindVar[*exec.StreamValue](e, ctx.Context, o.Input)
  78. if err != nil {
  79. return err
  80. }
  81. defer input.Stream.Close()
  82. writer := store.New()
  83. defer writer.Abort()
  84. _, err = io.Copy(writer, input.Stream)
  85. if err != nil {
  86. return fmt.Errorf("writing file to shard store: %w", err)
  87. }
  88. fileInfo, err := writer.Finish()
  89. if err != nil {
  90. return fmt.Errorf("finishing writing file to shard store: %w", err)
  91. }
  92. e.PutVar(o.FileHash, &FileHashValue{
  93. Hash: fileInfo.Hash,
  94. })
  95. return nil
  96. }
  97. func (o *ShardWrite) String() string {
  98. return fmt.Sprintf("ShardWrite %v -> %v", o.Input, o.FileHash)
  99. }
  100. type ShardReadNode struct {
  101. dag.NodeBase
  102. StorageID cdssdk.StorageID
  103. Open types.OpenOption
  104. }
  105. func (b *GraphNodeBuilder) NewShardRead(stgID cdssdk.StorageID, open types.OpenOption) *ShardReadNode {
  106. node := &ShardReadNode{
  107. StorageID: stgID,
  108. Open: open,
  109. }
  110. b.AddNode(node)
  111. node.OutputStreams().SetupNew(node, b.NewVar())
  112. return node
  113. }
  114. func (t *ShardReadNode) Output() dag.Slot {
  115. return dag.Slot{
  116. Var: t.OutputStreams().Get(0),
  117. Index: 0,
  118. }
  119. }
  120. func (t *ShardReadNode) GenerateOp() (exec.Op, error) {
  121. return &ShardRead{
  122. Output: t.OutputStreams().Get(0).VarID,
  123. StorageID: t.StorageID,
  124. Open: t.Open,
  125. }, nil
  126. }
  127. // func (t *IPFSReadType) String() string {
  128. // return fmt.Sprintf("IPFSRead[%s,%v+%v]%v%v", t.FileHash, t.Option.Offset, t.Option.Length, formatStreamIO(node), formatValueIO(node))
  129. // }
  130. type ShardWriteNode struct {
  131. dag.NodeBase
  132. StorageID cdssdk.StorageID
  133. FileHashStoreKey string
  134. }
  135. func (b *GraphNodeBuilder) NewShardWrite(stgID cdssdk.StorageID, fileHashStoreKey string) *ShardWriteNode {
  136. node := &ShardWriteNode{
  137. FileHashStoreKey: fileHashStoreKey,
  138. }
  139. b.AddNode(node)
  140. return node
  141. }
  142. func (t *ShardWriteNode) SetInput(input *dag.Var) {
  143. t.InputStreams().EnsureSize(1)
  144. input.Connect(t, 0)
  145. t.OutputValues().SetupNew(t, t.Graph().NewVar())
  146. }
  147. func (t *ShardWriteNode) Input() dag.Slot {
  148. return dag.Slot{
  149. Var: t.InputStreams().Get(0),
  150. Index: 0,
  151. }
  152. }
  153. func (t *ShardWriteNode) FileHashVar() *dag.Var {
  154. return t.OutputValues().Get(0)
  155. }
  156. func (t *ShardWriteNode) GenerateOp() (exec.Op, error) {
  157. return &ShardWrite{
  158. Input: t.InputStreams().Get(0).VarID,
  159. FileHash: t.OutputValues().Get(0).VarID,
  160. StorageID: t.StorageID,
  161. }, nil
  162. }
  163. // func (t *IPFSWriteType) String() string {
  164. // return fmt.Sprintf("IPFSWrite[%s,%v+%v]%v%v", t.FileHashStoreKey, t.Range.Offset, t.Range.Length, formatStreamIO(node), formatValueIO(node))
  165. // }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。