You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

passes.go 7.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. package parser
  2. import (
  3. "fmt"
  4. "math"
  5. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  6. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  7. cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
  8. "gitlink.org.cn/cloudream/common/utils/math2"
  9. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc"
  10. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc/ops2"
  11. "gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
  12. )
  13. // 计算输入流的打开范围。会把流的范围按条带大小取整
  14. func calcStreamRange(ctx *GenerateContext) {
  15. stripSize := int64(ctx.LRC.ChunkSize * ctx.LRC.K)
  16. rng := exec.Range{
  17. Offset: math.MaxInt64,
  18. }
  19. for _, to := range ctx.To {
  20. if to.GetDataIndex() == -1 {
  21. toRng := to.GetRange()
  22. rng.ExtendStart(math2.Floor(toRng.Offset, stripSize))
  23. if toRng.Length != nil {
  24. rng.ExtendEnd(math2.Ceil(toRng.Offset+*toRng.Length, stripSize))
  25. } else {
  26. rng.Length = nil
  27. }
  28. } else {
  29. toRng := to.GetRange()
  30. blkStartIndex := math2.FloorDiv(toRng.Offset, int64(ctx.LRC.ChunkSize))
  31. rng.ExtendStart(blkStartIndex * stripSize)
  32. if toRng.Length != nil {
  33. blkEndIndex := math2.CeilDiv(toRng.Offset+*toRng.Length, int64(ctx.LRC.ChunkSize))
  34. rng.ExtendEnd(blkEndIndex * stripSize)
  35. } else {
  36. rng.Length = nil
  37. }
  38. }
  39. }
  40. ctx.StreamRange = rng
  41. }
  42. func buildFromNode(ctx *GenerateContext, f ioswitchlrc.From) (ops2.FromNode, error) {
  43. var repRange exec.Range
  44. var blkRange exec.Range
  45. repRange.Offset = ctx.StreamRange.Offset
  46. blkRange.Offset = ctx.StreamRange.Offset / int64(ctx.LRC.ChunkSize*ctx.LRC.K) * int64(ctx.LRC.ChunkSize)
  47. if ctx.StreamRange.Length != nil {
  48. repRngLen := *ctx.StreamRange.Length
  49. repRange.Length = &repRngLen
  50. blkRngLen := *ctx.StreamRange.Length / int64(ctx.LRC.ChunkSize*ctx.LRC.K) * int64(ctx.LRC.ChunkSize)
  51. blkRange.Length = &blkRngLen
  52. }
  53. switch f := f.(type) {
  54. case *ioswitchlrc.FromNode:
  55. t := ctx.DAG.NewShardRead(f.Storage.StorageID, types.NewOpen(f.FileHash))
  56. if f.DataIndex == -1 {
  57. t.Open.WithNullableLength(repRange.Offset, repRange.Length)
  58. } else {
  59. t.Open.WithNullableLength(blkRange.Offset, blkRange.Length)
  60. }
  61. t.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Node: f.Node, Address: *f.Node.Address.(*cdssdk.GRPCAddressInfo)})
  62. t.Env().Pinned = true
  63. return t, nil
  64. case *ioswitchlrc.FromDriver:
  65. n := ctx.DAG.NewFromDriver(f.Handle)
  66. n.Env().ToEnvDriver()
  67. n.Env().Pinned = true
  68. if f.DataIndex == -1 {
  69. f.Handle.RangeHint.Offset = repRange.Offset
  70. f.Handle.RangeHint.Length = repRange.Length
  71. } else {
  72. f.Handle.RangeHint.Offset = blkRange.Offset
  73. f.Handle.RangeHint.Length = blkRange.Length
  74. }
  75. return n, nil
  76. default:
  77. return nil, fmt.Errorf("unsupported from type %T", f)
  78. }
  79. }
  80. func buildToNode(ctx *GenerateContext, t ioswitchlrc.To) (ops2.ToNode, error) {
  81. switch t := t.(type) {
  82. case *ioswitchlrc.ToNode:
  83. n := ctx.DAG.NewShardWrite(t.FileHashStoreKey)
  84. n.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Node: t.Hub})
  85. n.Env().Pinned = true
  86. return n, nil
  87. case *ioswitchlrc.ToDriver:
  88. n := ctx.DAG.NewToDriver(t.Handle)
  89. n.Env().ToEnvDriver()
  90. n.Env().Pinned = true
  91. return n, nil
  92. default:
  93. return nil, fmt.Errorf("unsupported to type %T", t)
  94. }
  95. }
  96. // 通过流的输入输出位置来确定指令的执行位置。
  97. // To系列的指令都会有固定的执行位置,这些位置会随着pin操作逐步扩散到整个DAG,
  98. // 所以理论上不会出现有指令的位置始终无法确定的情况。
  99. func pin(ctx *GenerateContext) bool {
  100. changed := false
  101. ctx.DAG.Walk(func(node dag.Node) bool {
  102. if node.Env().Pinned {
  103. return true
  104. }
  105. var toEnv *dag.NodeEnv
  106. for _, out := range node.OutputStreams().RawArray() {
  107. for _, to := range out.To().RawArray() {
  108. if to.Node.Env().Type == dag.EnvUnknown {
  109. continue
  110. }
  111. if toEnv == nil {
  112. toEnv = to.Node.Env()
  113. } else if !toEnv.Equals(to.Node.Env()) {
  114. toEnv = nil
  115. break
  116. }
  117. }
  118. }
  119. if toEnv != nil {
  120. if !node.Env().Equals(toEnv) {
  121. changed = true
  122. }
  123. *node.Env() = *toEnv
  124. return true
  125. }
  126. // 否则根据输入流的始发地来固定
  127. var fromEnv *dag.NodeEnv
  128. for _, in := range node.InputStreams().RawArray() {
  129. if in.From().Node.Env().Type == dag.EnvUnknown {
  130. continue
  131. }
  132. if fromEnv == nil {
  133. fromEnv = in.From().Node.Env()
  134. } else if !fromEnv.Equals(in.From().Node.Env()) {
  135. fromEnv = nil
  136. break
  137. }
  138. }
  139. if fromEnv != nil {
  140. if !node.Env().Equals(fromEnv) {
  141. changed = true
  142. }
  143. *node.Env() = *fromEnv
  144. }
  145. return true
  146. })
  147. return changed
  148. }
  149. // 对于所有未使用的流,增加Drop指令
  150. func dropUnused(ctx *GenerateContext) {
  151. ctx.DAG.Walk(func(node dag.Node) bool {
  152. for _, out := range node.OutputStreams().RawArray() {
  153. if out.To().Len() == 0 {
  154. n := ctx.DAG.NewDropStream()
  155. *n.Env() = *node.Env()
  156. n.SetInput(out)
  157. }
  158. }
  159. return true
  160. })
  161. }
  162. // 为IPFS写入指令存储结果
  163. func storeIPFSWriteResult(ctx *GenerateContext) {
  164. dag.WalkOnlyType[*ops2.ShardWriteNode](ctx.DAG.Graph, func(n *ops2.ShardWriteNode) bool {
  165. if n.FileHashStoreKey == "" {
  166. return true
  167. }
  168. storeNode := ctx.DAG.NewStore()
  169. storeNode.Env().ToEnvDriver()
  170. storeNode.Store(n.FileHashStoreKey, n.FileHashVar())
  171. return true
  172. })
  173. }
  174. // 生成Range指令。StreamRange可能超过文件总大小,但Range指令会在数据量不够时不报错而是正常返回
  175. func generateRange(ctx *GenerateContext) {
  176. for i := 0; i < len(ctx.To); i++ {
  177. to := ctx.To[i]
  178. toNode := ctx.ToNodes[to]
  179. toDataIdx := to.GetDataIndex()
  180. toRng := to.GetRange()
  181. if toDataIdx == -1 {
  182. n := ctx.DAG.NewRange()
  183. toInput := toNode.Input()
  184. *n.Env() = *toInput.Var.From().Node.Env()
  185. rnged := n.RangeStream(toInput.Var, exec.Range{
  186. Offset: toRng.Offset - ctx.StreamRange.Offset,
  187. Length: toRng.Length,
  188. })
  189. toInput.Var.Disconnect(toNode, toInput.Index)
  190. toNode.SetInput(rnged)
  191. } else {
  192. stripSize := int64(ctx.LRC.ChunkSize * ctx.LRC.K)
  193. blkStartIdx := ctx.StreamRange.Offset / stripSize
  194. blkStart := blkStartIdx * int64(ctx.LRC.ChunkSize)
  195. n := ctx.DAG.NewRange()
  196. toInput := toNode.Input()
  197. *n.Env() = *toInput.Var.From().Node.Env()
  198. rnged := n.RangeStream(toInput.Var, exec.Range{
  199. Offset: toRng.Offset - blkStart,
  200. Length: toRng.Length,
  201. })
  202. toInput.Var.Disconnect(toNode, toInput.Index)
  203. toNode.SetInput(rnged)
  204. }
  205. }
  206. }
  207. // 生成Clone指令
  208. func generateClone(ctx *GenerateContext) {
  209. ctx.DAG.Walk(func(node dag.Node) bool {
  210. for _, out := range node.OutputStreams().RawArray() {
  211. if out.To().Len() <= 1 {
  212. continue
  213. }
  214. t := ctx.DAG.NewCloneStream()
  215. *t.Env() = *node.Env()
  216. for _, to := range out.To().RawArray() {
  217. t.NewOutput().Connect(to.Node, to.SlotIndex)
  218. }
  219. out.To().Resize(0)
  220. t.SetInput(out)
  221. }
  222. for _, out := range node.OutputValues().RawArray() {
  223. if out.To().Len() <= 1 {
  224. continue
  225. }
  226. t := ctx.DAG.NewCloneValue()
  227. *t.Env() = *node.Env()
  228. for _, to := range out.To().RawArray() {
  229. t.NewOutput().Connect(to.Node, to.SlotIndex)
  230. }
  231. out.To().Resize(0)
  232. t.SetInput(out)
  233. }
  234. return true
  235. })
  236. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。