You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

command_service.go 9.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. package main
  2. import (
  3. "fmt"
  4. log "github.com/sirupsen/logrus"
  5. mydb "gitlink.org.cn/cloudream/db"
  6. ramsg "gitlink.org.cn/cloudream/rabbitmq/message"
  7. "gitlink.org.cn/cloudream/utils"
  8. "gitlink.org.cn/cloudream/utils/consts/errorcode"
  9. )
  10. type CommandService struct {
  11. db *mydb.DB
  12. }
  13. func NewCommandService(db *mydb.DB) *CommandService {
  14. return &CommandService{
  15. db: db,
  16. }
  17. }
  18. // TODO 需要考虑数据库操作的事务性
  19. func (service *CommandService) Move(msg *ramsg.MoveCommand) ramsg.MoveResp {
  20. //查询数据库,获取冗余类型,冗余参数
  21. //jh:使用command中的bucketname和objectname查询对象表,获得redundancy,EcName,fileSizeInBytes
  22. //-若redundancy是rep,查询对象副本表, 获得repHash
  23. //--ids :={0}
  24. //--hashs := {repHash}
  25. //-若redundancy是ec,查询对象编码块表,获得blockHashs, ids(innerID),
  26. //--查询缓存表,获得每个hash的nodeIps、TempOrPins、Times
  27. //--查询节点延迟表,得到command.destination与各个nodeIps的的延迟,存到一个map类型中(Delay)
  28. //--kx:根据查出来的hash/hashs、nodeIps、TempOrPins、Times(移动/读取策略)、Delay确定hashs、ids
  29. bucketID, err := service.db.QueryBucketID(msg.BucketName)
  30. if err != nil {
  31. log.Warn("query BucketID failed, err: %s", err.Error())
  32. return ramsg.NewCoorMoveRespFailed(errorcode.OPERATION_FAILED, "query BucketID failed")
  33. }
  34. //jh:使用command中的bucketid和objectname查询对象表,获得objectid,redundancy,EcName,fileSizeInBytes
  35. object, err := service.db.QueryObject(msg.ObjectName, bucketID)
  36. if err != nil {
  37. log.Warn("query Object failed, err: %s", err.Error())
  38. return ramsg.NewCoorMoveRespFailed(errorcode.OPERATION_FAILED, "query Object failed")
  39. }
  40. //-若redundancy是rep,查询对象副本表, 获得repHash
  41. var hashs []string
  42. ids := []int{0}
  43. redundancy := "rep"
  44. if object.Redundancy { //rep
  45. objectRep, err := service.db.QueryObjectRep(object.ObjectID)
  46. if err != nil {
  47. log.Warn("query ObjectRep failed, err: %s", err.Error())
  48. return ramsg.NewCoorMoveRespFailed(errorcode.OPERATION_FAILED, "query ObjectRep failed")
  49. }
  50. hashs = append(hashs, objectRep.RepHash)
  51. } else { //ec
  52. redundancy = "ec"
  53. blockHashs, err := service.db.QueryObjectBlock(object.ObjectID)
  54. if err != nil {
  55. log.Warn("query ObjectBlock failed, err: %s", err.Error())
  56. return ramsg.NewCoorMoveRespFailed(errorcode.OPERATION_FAILED, "query ObjectBlock failed")
  57. }
  58. ecPolicies := *utils.GetEcPolicy()
  59. ecPolicy := ecPolicies[object.ECName]
  60. ecN := ecPolicy.GetN()
  61. ecK := ecPolicy.GetK()
  62. ids = make([]int, ecK)
  63. for i := 0; i < ecN; i++ {
  64. hashs = append(hashs, "-1")
  65. }
  66. for i := 0; i < ecK; i++ {
  67. ids[i] = i
  68. }
  69. hashs = make([]string, ecN)
  70. for _, tt := range blockHashs {
  71. id := tt.InnerID
  72. hash := tt.BlockHash
  73. hashs[id] = hash
  74. }
  75. //--查询缓存表,获得每个hash的nodeIps、TempOrPins、Times
  76. /*for id,hash := range blockHashs{
  77. //type Cache struct {NodeIP string,TempOrPin bool,Cachetime string}
  78. Cache := Query_Cache(hash)
  79. //利用Time_trans()函数可将Cache[i].Cachetime转化为时间戳格式
  80. //--查询节点延迟表,得到command.Destination与各个nodeIps的延迟,存到一个map类型中(Delay)
  81. Delay := make(map[string]int) // 延迟集合
  82. for i:=0; i<len(Cache); i++{
  83. Delay[Cache[i].NodeIP] = Query_NodeDelay(Destination, Cache[i].NodeIP)
  84. }
  85. //--kx:根据查出来的hash/hashs、nodeIps、TempOrPins、Times(移动/读取策略)、Delay确定hashs、ids
  86. }*/
  87. }
  88. return ramsg.NewCoorMoveRespOK(
  89. redundancy,
  90. object.ECName,
  91. hashs,
  92. ids,
  93. object.FileSizeInBytes,
  94. )
  95. }
  96. func (service *CommandService) RepWrite(msg *ramsg.RepWriteCommand) ramsg.WriteResp {
  97. //查询用户可用的节点IP
  98. nodeIPs, err := service.db.QueryUserAvailableNodeIPs(msg.UserID)
  99. if err != nil {
  100. log.Warn("query user available node ip failed, err: %s", err.Error())
  101. return ramsg.NewCoorWriteRespFailed(errorcode.OPERATION_FAILED, "query user available node ip failed")
  102. }
  103. if len(nodeIPs) < msg.ReplicateNumber {
  104. log.Warn("user nodes are not enough, err: %s", err.Error())
  105. return ramsg.NewCoorWriteRespFailed(errorcode.OPERATION_FAILED, "user nodes are not enough")
  106. }
  107. numRep := msg.ReplicateNumber
  108. ips := make([]string, numRep)
  109. //随机选取numRep个nodeIp
  110. start := utils.GetRandInt(len(nodeIPs))
  111. for i := 0; i < numRep; i++ {
  112. ips[i] = nodeIPs[(start+i)%len(nodeIPs)]
  113. }
  114. _, err = service.db.CreateRepObject(msg.ObjectName, msg.BucketName, msg.FileSizeInBytes, msg.ReplicateNumber)
  115. if err != nil {
  116. log.Warn("create object failed, err: %s", err.Error())
  117. return ramsg.NewCoorWriteRespFailed(errorcode.OPERATION_FAILED, "create object failed")
  118. }
  119. return ramsg.NewCoorWriteRespOK(ips)
  120. }
  121. func (service *CommandService) WriteRepHash(msg *ramsg.WriteRepHashCommand) ramsg.WriteHashResp {
  122. //jh:根据command中的信息,插入对象副本表中的Hash字段,并完成缓存表的插入
  123. //插入对象副本表中的Hash字段
  124. //TODO xh: objectID的查询合并到Insert_RepHash函数中去
  125. ObjectId := Query_ObjectID(msg.ObjectName)
  126. Insert_RepHash(ObjectId, msg.Hashes[0])
  127. //缓存表的插入
  128. Insert_Cache(msg.Hashes, msg.IPs, false)
  129. //返回消息
  130. return ramsg.NewCoorWriteHashRespOK()
  131. }
  132. func (service *CommandService) ECWrite(msg *ramsg.ECWriteCommand) ramsg.WriteResp {
  133. //jh:根据command中的UserId查询用户节点权限表,返回用户可用的NodeIp
  134. //kx:根据command中的ecName,得到ecN,然后从jh查到的NodeIp中选择ecN个,赋值给Ips
  135. //jh:完成对象表、对象编码块表的插入(对象编码块表的Hash字段先不插入)
  136. //返回消息
  137. nodeip := Query_UserNode(msg.UserID) //nodeip格式:[]string
  138. ecid := msg.ECName
  139. ecPolicies := *utils.GetEcPolicy()
  140. ecPolicy := ecPolicies[ecid]
  141. ecN := ecPolicy.GetN()
  142. ips := make([]string, ecN)
  143. //kx:从jh查到的NodeIp中选择ecN个,赋值给Ips
  144. //根据BucketName查询BucketID
  145. start := utils.GetRandInt(len(nodeip))
  146. for i := 0; i < ecN; i++ {
  147. ips[i] = nodeip[(start+i)%len(nodeip)]
  148. }
  149. //根据BucketName查询BucketID
  150. BucketID := Query_BucketID(msg.BucketName)
  151. if BucketID == -1 {
  152. // TODO 日志
  153. return ramsg.NewCoorWriteRespFailed(errorcode.OPERATION_FAILED, fmt.Sprintf("bucket id not found for %s", msg.BucketName))
  154. }
  155. //对象表插入Insert_Cache
  156. ObjectID := Insert_EcObject(msg.ObjectName, BucketID, msg.FileSizeInBytes, msg.ECName)
  157. //对象编码块表插入,hash暂时为空
  158. for i := 0; i < ecN; i++ {
  159. Insert_EcObjectBlock(ObjectID, i)
  160. }
  161. return ramsg.NewCoorWriteRespOK(ips)
  162. }
  163. func (service *CommandService) WriteECHash(msg *ramsg.WriteECHashCommand) ramsg.WriteHashResp {
  164. //jh:根据command中的信息,插入对象编码块表中的Hash字段,并完成缓存表的插入
  165. //返回消息
  166. //插入对象编码块表中的Hash字段
  167. ObjectId := Query_ObjectID(msg.ObjectName)
  168. Insert_EcHash(ObjectId, msg.Hashes)
  169. //缓存表的插入
  170. Insert_Cache(msg.Hashes, msg.IPs, false)
  171. return ramsg.NewCoorWriteHashRespOK()
  172. }
  173. func (service *CommandService) Read(msg *ramsg.ReadCommand) ramsg.ReadResp {
  174. var ips, hashs []string
  175. blockIds := []int{0}
  176. //先查询
  177. BucketID := Query_BucketID(msg.BucketName)
  178. //jh:使用command中的bucketid和objectname查询对象表,获得objectid,redundancy,EcName,fileSizeInBytes
  179. /*
  180. TODO xh:
  181. redundancyy(bool型)这个变量名不规范(应该是为了与redundancy(字符型)分开而随意取的名),需调整:
  182. 只用redundancy变量,且将其类型调整为bool(用常量EC表示false,REP表示true),ReadRes结构体的定义做相应修改
  183. */
  184. ObjectID, fileSizeInBytes, redundancyy, ecName := Query_Object(msg.ObjectName, BucketID)
  185. //-若redundancy是rep,查询对象副本表, 获得repHash
  186. redundancy := "rep"
  187. if redundancyy { //rep
  188. repHash := Query_ObjectRep(ObjectID)
  189. hashs[0] = repHash
  190. caches := Query_Cache(repHash)
  191. //TODO xh: 所有错误消息均不可吃掉,记录到日志里
  192. for _, cache := range caches {
  193. ip := cache.NodeIP
  194. ips = append(ips, ip)
  195. }
  196. } else { //ec
  197. redundancy = "ec"
  198. blockHashs := Query_ObjectBlock(ObjectID)
  199. ecPolicies := *utils.GetEcPolicy()
  200. ecPolicy := ecPolicies[ecName]
  201. ecN := ecPolicy.GetN()
  202. ecK := ecPolicy.GetK()
  203. for i := 0; i < ecN; i++ {
  204. ips = append(ips, "-1")
  205. hashs = append(hashs, "-1")
  206. }
  207. for _, tt := range blockHashs {
  208. id := tt.InnerID
  209. hash := tt.BlockHash
  210. hashs[id] = hash //这里有问题,采取的其实是直接顺序读的方式,等待加入自适应读模块
  211. cache := Query_Cache(hash)
  212. ip := cache[0].NodeIP
  213. ips[id] = ip
  214. }
  215. //这里也有和上面一样的问题
  216. for i := 1; i < ecK; i++ {
  217. blockIds = append(blockIds, i)
  218. }
  219. }
  220. return ramsg.NewCoorReadRespOK(
  221. redundancy,
  222. ips,
  223. hashs,
  224. blockIds,
  225. ecName,
  226. fileSizeInBytes,
  227. )
  228. }
  229. func (service *CommandService) TempCacheReport(msg *ramsg.TempCacheReport) {
  230. service.db.BatchInsertOrUpdateCache(msg.Hashes, msg.IP)
  231. }
  232. func (service *CommandService) AgentStatusReport(msg *ramsg.AgentStatusReport) {
  233. //jh:根据command中的Ip,插入节点延迟表,和节点表的NodeStatus
  234. //根据command中的Ip,插入节点延迟表
  235. ips := utils.GetAgentIps()
  236. Insert_NodeDelay(msg.IP, ips, msg.AgentDelay)
  237. //从配置表里读取节点地域NodeLocation
  238. //插入节点表的NodeStatus
  239. Insert_Node(msg.IP, msg.IP, msg.IPFSStatus, msg.LocalDirStatus)
  240. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。