You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ipfs.go 4.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. package ops2
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "gitlink.org.cn/cloudream/common/pkgs/future"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  8. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  9. "gitlink.org.cn/cloudream/common/pkgs/ipfs"
  10. "gitlink.org.cn/cloudream/common/pkgs/logger"
  11. "gitlink.org.cn/cloudream/common/utils/io2"
  12. stgglb "gitlink.org.cn/cloudream/storage/common/globals"
  13. )
  14. func init() {
  15. exec.UseOp[*IPFSRead]()
  16. exec.UseOp[*IPFSWrite]()
  17. }
  18. type IPFSRead struct {
  19. Output *exec.StreamVar `json:"output"`
  20. FileHash string `json:"fileHash"`
  21. Option ipfs.ReadOption `json:"option"`
  22. }
  23. func (o *IPFSRead) Execute(ctx context.Context, e *exec.Executor) error {
  24. logger.
  25. WithField("FileHash", o.FileHash).
  26. Debugf("ipfs read op")
  27. defer logger.Debugf("ipfs read op finished")
  28. ipfsCli, err := stgglb.IPFSPool.Acquire()
  29. if err != nil {
  30. return fmt.Errorf("new ipfs client: %w", err)
  31. }
  32. defer stgglb.IPFSPool.Release(ipfsCli)
  33. file, err := ipfsCli.OpenRead(o.FileHash, o.Option)
  34. if err != nil {
  35. return fmt.Errorf("reading ipfs: %w", err)
  36. }
  37. defer file.Close()
  38. fut := future.NewSetVoid()
  39. o.Output.Stream = io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) {
  40. fut.SetVoid()
  41. })
  42. e.PutVars(o.Output)
  43. return fut.Wait(ctx)
  44. }
  45. func (o *IPFSRead) String() string {
  46. return fmt.Sprintf("IPFSRead %v -> %v", o.FileHash, o.Output.ID)
  47. }
  48. type IPFSWrite struct {
  49. Input *exec.StreamVar `json:"input"`
  50. FileHash *exec.StringVar `json:"fileHash"`
  51. }
  52. func (o *IPFSWrite) Execute(ctx context.Context, e *exec.Executor) error {
  53. logger.
  54. WithField("Input", o.Input.ID).
  55. WithField("FileHashVar", o.FileHash.ID).
  56. Debugf("ipfs write op")
  57. ipfsCli, err := stgglb.IPFSPool.Acquire()
  58. if err != nil {
  59. return fmt.Errorf("new ipfs client: %w", err)
  60. }
  61. defer stgglb.IPFSPool.Release(ipfsCli)
  62. err = e.BindVars(ctx, o.Input)
  63. if err != nil {
  64. return err
  65. }
  66. defer o.Input.Stream.Close()
  67. o.FileHash.Value, err = ipfsCli.CreateFile(o.Input.Stream)
  68. if err != nil {
  69. return fmt.Errorf("creating ipfs file: %w", err)
  70. }
  71. e.PutVars(o.FileHash)
  72. return nil
  73. }
  74. func (o *IPFSWrite) String() string {
  75. return fmt.Sprintf("IPFSWrite %v -> %v", o.Input.ID, o.FileHash.ID)
  76. }
  77. type IPFSReadNode struct {
  78. dag.NodeBase
  79. FileHash string
  80. Option ipfs.ReadOption
  81. }
  82. func (b *GraphNodeBuilder) NewIPFSRead(fileHash string, option ipfs.ReadOption) *IPFSReadNode {
  83. node := &IPFSReadNode{
  84. FileHash: fileHash,
  85. Option: option,
  86. }
  87. b.AddNode(node)
  88. node.OutputStreams().SetupNew(node, b.NewStreamVar())
  89. return node
  90. }
  91. func (t *IPFSReadNode) Output() dag.StreamSlot {
  92. return dag.StreamSlot{
  93. Var: t.OutputStreams().Get(0),
  94. Index: 0,
  95. }
  96. }
  97. func (t *IPFSReadNode) GenerateOp() (exec.Op, error) {
  98. return &IPFSRead{
  99. Output: t.OutputStreams().Get(0).Var,
  100. FileHash: t.FileHash,
  101. Option: t.Option,
  102. }, nil
  103. }
  104. // func (t *IPFSReadType) String() string {
  105. // return fmt.Sprintf("IPFSRead[%s,%v+%v]%v%v", t.FileHash, t.Option.Offset, t.Option.Length, formatStreamIO(node), formatValueIO(node))
  106. // }
  107. type IPFSWriteNode struct {
  108. dag.NodeBase
  109. FileHashStoreKey string
  110. }
  111. func (b *GraphNodeBuilder) NewIPFSWrite(fileHashStoreKey string) *IPFSWriteNode {
  112. node := &IPFSWriteNode{
  113. FileHashStoreKey: fileHashStoreKey,
  114. }
  115. b.AddNode(node)
  116. return node
  117. }
  118. func (t *IPFSWriteNode) SetInput(input *dag.StreamVar) {
  119. t.InputStreams().EnsureSize(1)
  120. input.Connect(t, 0)
  121. t.OutputValues().SetupNew(t, t.Graph().NewValueVar(dag.StringValueVar))
  122. }
  123. func (t *IPFSWriteNode) Input() dag.StreamSlot {
  124. return dag.StreamSlot{
  125. Var: t.InputStreams().Get(0),
  126. Index: 0,
  127. }
  128. }
  129. func (t *IPFSWriteNode) FileHashVar() *dag.ValueVar {
  130. return t.OutputValues().Get(0)
  131. }
  132. func (t *IPFSWriteNode) GenerateOp() (exec.Op, error) {
  133. return &IPFSWrite{
  134. Input: t.InputStreams().Get(0).Var,
  135. FileHash: t.OutputValues().Get(0).Var.(*exec.StringVar),
  136. }, nil
  137. }
  138. // func (t *IPFSWriteType) String() string {
  139. // return fmt.Sprintf("IPFSWrite[%s,%v+%v]%v%v", t.FileHashStoreKey, t.Range.Offset, t.Range.Length, formatStreamIO(node), formatValueIO(node))
  140. // }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。