You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment_model.go 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. package repo
  2. import (
  3. "fmt"
  4. "path"
  5. "strconv"
  6. "strings"
  7. "code.gitea.io/gitea/models"
  8. "code.gitea.io/gitea/modules/context"
  9. "code.gitea.io/gitea/modules/log"
  10. "code.gitea.io/gitea/modules/minio_ext"
  11. "code.gitea.io/gitea/modules/setting"
  12. "code.gitea.io/gitea/modules/storage"
  13. "code.gitea.io/gitea/modules/upload"
  14. gouuid "github.com/satori/go.uuid"
  15. )
  16. func GetModelChunks(ctx *context.Context) {
  17. fileMD5 := ctx.Query("md5")
  18. typeCloudBrain := ctx.QueryInt("type")
  19. fileName := ctx.Query("file_name")
  20. scene := ctx.Query("scene")
  21. modeluuid := ctx.Query("modeluuid")
  22. log.Info("scene=" + scene + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
  23. var chunks string
  24. err := checkTypeCloudBrain(typeCloudBrain)
  25. if err != nil {
  26. ctx.ServerError("checkTypeCloudBrain failed", err)
  27. return
  28. }
  29. fileChunk, err := models.GetModelFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  30. if err != nil {
  31. if models.IsErrFileChunkNotExist(err) {
  32. ctx.JSON(200, map[string]string{
  33. "uuid": "",
  34. "uploaded": "0",
  35. "uploadID": "",
  36. "chunks": "",
  37. })
  38. } else {
  39. ctx.ServerError("GetFileChunkByMD5", err)
  40. }
  41. return
  42. }
  43. isExist := false
  44. if typeCloudBrain == models.TypeCloudBrainOne {
  45. isExist, err = storage.Attachments.HasObject(fileChunk.ObjectName)
  46. if isExist {
  47. log.Info("The file is exist in minio. has uploaded.path=" + fileChunk.ObjectName)
  48. } else {
  49. log.Info("The file is not exist in minio..")
  50. }
  51. if err != nil {
  52. ctx.ServerError("HasObject failed", err)
  53. return
  54. }
  55. } else {
  56. isExist, err = storage.ObsHasObject(fileChunk.ObjectName)
  57. if isExist {
  58. log.Info("The file is exist in obs. has uploaded. path=" + fileChunk.ObjectName)
  59. } else {
  60. log.Info("The file is not exist in obs.")
  61. }
  62. if err != nil {
  63. ctx.ServerError("ObsHasObject failed", err)
  64. return
  65. }
  66. }
  67. if isExist {
  68. if fileChunk.IsUploaded == models.FileNotUploaded {
  69. log.Info("the file has been uploaded but not recorded")
  70. fileChunk.IsUploaded = models.FileUploaded
  71. if err = models.UpdateModelFileChunk(fileChunk); err != nil {
  72. log.Error("UpdateFileChunk failed:", err.Error())
  73. }
  74. }
  75. } else {
  76. if fileChunk.IsUploaded == models.FileUploaded {
  77. log.Info("the file has been recorded but not uploaded")
  78. fileChunk.IsUploaded = models.FileNotUploaded
  79. if err = models.UpdateModelFileChunk(fileChunk); err != nil {
  80. log.Error("UpdateFileChunk failed:", err.Error())
  81. }
  82. }
  83. if typeCloudBrain == models.TypeCloudBrainOne {
  84. chunks, err = storage.GetPartInfos(fileChunk.ObjectName, fileChunk.UploadID)
  85. if err != nil {
  86. log.Error("GetPartInfos failed:%v", err.Error())
  87. }
  88. } else {
  89. chunks, err = storage.GetObsPartInfos(fileChunk.ObjectName, fileChunk.UploadID)
  90. if err != nil {
  91. log.Error("GetObsPartInfos failed:%v", err.Error())
  92. }
  93. }
  94. if err != nil {
  95. models.DeleteModelFileChunk(fileChunk)
  96. ctx.JSON(200, map[string]string{
  97. "uuid": "",
  98. "uploaded": "0",
  99. "uploadID": "",
  100. "chunks": "",
  101. })
  102. return
  103. }
  104. }
  105. var attachID int64
  106. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  107. if err != nil {
  108. if models.IsErrAttachmentNotExist(err) {
  109. attachID = 0
  110. } else {
  111. ctx.ServerError("GetAttachmentByUUID", err)
  112. return
  113. }
  114. } else {
  115. attachID = attach.ID
  116. }
  117. if attach == nil {
  118. ctx.JSON(200, map[string]string{
  119. "uuid": fileChunk.UUID,
  120. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  121. "uploadID": fileChunk.UploadID,
  122. "chunks": string(chunks),
  123. "attachID": "0",
  124. "datasetID": "0",
  125. "fileName": "",
  126. "datasetName": "",
  127. })
  128. return
  129. }
  130. //使用description存储模型信息
  131. dbmodeluuid := attach.Description
  132. modelname := ""
  133. if dbmodeluuid != modeluuid {
  134. log.Info("The file has uploaded.fileChunk.ObjectName=" + fileChunk.ObjectName + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
  135. isExist := copyModelAttachmentFile(typeCloudBrain, fileChunk, fileName, modeluuid)
  136. if dbmodeluuid != "" {
  137. model, err := models.QueryModelById(dbmodeluuid)
  138. if err == nil && model != nil {
  139. modelname = model.Name
  140. }
  141. }
  142. if isExist {
  143. ctx.JSON(200, map[string]string{
  144. "uuid": fileChunk.UUID,
  145. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  146. "uploadID": fileChunk.UploadID,
  147. "chunks": string(chunks),
  148. "attachID": strconv.Itoa(int(attachID)),
  149. "modeluuid": modeluuid,
  150. "fileName": attach.Name,
  151. "modelName": modelname,
  152. })
  153. } else {
  154. UpdateModelSize(modeluuid)
  155. ctx.JSON(200, map[string]string{
  156. "uuid": fileChunk.UUID,
  157. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  158. "uploadID": fileChunk.UploadID,
  159. "chunks": string(chunks),
  160. "attachID": strconv.Itoa(int(attachID)),
  161. "fileName": attach.Name,
  162. })
  163. }
  164. return
  165. } else {
  166. model, err := models.QueryModelById(dbmodeluuid)
  167. if err == nil {
  168. modelname = model.Name
  169. }
  170. ctx.JSON(200, map[string]string{
  171. "uuid": fileChunk.UUID,
  172. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  173. "uploadID": fileChunk.UploadID,
  174. "chunks": string(chunks),
  175. "attachID": strconv.Itoa(int(attachID)),
  176. "modeluuid": dbmodeluuid,
  177. "fileName": attach.Name,
  178. "modelName": modelname,
  179. })
  180. return
  181. }
  182. }
  183. func copyModelAttachmentFile(typeCloudBrain int, fileChunk *models.ModelFileChunk, fileName, modeluuid string) bool {
  184. srcObjectName := fileChunk.ObjectName
  185. var isExist bool
  186. //copy
  187. destObjectName := getObjectName(fileName, modeluuid)
  188. if typeCloudBrain == models.TypeCloudBrainOne {
  189. bucketName := setting.Attachment.Minio.Bucket
  190. log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
  191. if storage.MinioGetFilesSize(bucketName, []string{destObjectName}) > 0 {
  192. isExist = true
  193. } else {
  194. log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
  195. storage.MinioCopyAFile(bucketName, srcObjectName, bucketName, destObjectName)
  196. }
  197. } else {
  198. bucketName := setting.Bucket
  199. log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
  200. if storage.ObsGetFilesSize(bucketName, []string{destObjectName}) > 0 {
  201. isExist = true
  202. } else {
  203. log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
  204. storage.ObsCopyFile(bucketName, srcObjectName, bucketName, destObjectName)
  205. }
  206. }
  207. return isExist
  208. }
  209. func getObjectName(filename string, modeluuid string) string {
  210. return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
  211. }
  212. func NewModelMultipart(ctx *context.Context) {
  213. if !setting.Attachment.Enabled {
  214. ctx.Error(404, "attachment is not enabled")
  215. return
  216. }
  217. fileName := ctx.Query("file_name")
  218. modeluuid := ctx.Query("modeluuid")
  219. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  220. if err != nil {
  221. ctx.Error(400, err.Error())
  222. return
  223. }
  224. typeCloudBrain := ctx.QueryInt("type")
  225. err = checkTypeCloudBrain(typeCloudBrain)
  226. if err != nil {
  227. ctx.ServerError("checkTypeCloudBrain failed", err)
  228. return
  229. }
  230. if setting.Attachment.StoreType == storage.MinioStorageType {
  231. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  232. if totalChunkCounts > minio_ext.MaxPartsCount {
  233. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  234. return
  235. }
  236. fileSize := ctx.QueryInt64("size")
  237. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  238. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  239. return
  240. }
  241. uuid := gouuid.NewV4().String()
  242. var uploadID string
  243. var objectName string
  244. if typeCloudBrain == models.TypeCloudBrainOne {
  245. objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/")
  246. uploadID, err = storage.NewMultiPartUpload(objectName)
  247. if err != nil {
  248. ctx.ServerError("NewMultipart", err)
  249. return
  250. }
  251. } else {
  252. objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/")
  253. uploadID, err = storage.NewObsMultiPartUpload(objectName)
  254. if err != nil {
  255. ctx.ServerError("NewObsMultiPartUpload", err)
  256. return
  257. }
  258. }
  259. _, err = models.InsertModelFileChunk(&models.ModelFileChunk{
  260. UUID: uuid,
  261. UserID: ctx.User.ID,
  262. UploadID: uploadID,
  263. Md5: ctx.Query("md5"),
  264. Size: fileSize,
  265. ObjectName: objectName,
  266. TotalChunks: totalChunkCounts,
  267. Type: typeCloudBrain,
  268. })
  269. if err != nil {
  270. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  271. return
  272. }
  273. ctx.JSON(200, map[string]string{
  274. "uuid": uuid,
  275. "uploadID": uploadID,
  276. })
  277. } else {
  278. ctx.Error(404, "storage type is not enabled")
  279. return
  280. }
  281. }
  282. func GetModelMultipartUploadUrl(ctx *context.Context) {
  283. uuid := ctx.Query("uuid")
  284. uploadID := ctx.Query("uploadID")
  285. partNumber := ctx.QueryInt("chunkNumber")
  286. size := ctx.QueryInt64("size")
  287. fileName := ctx.Query("file_name")
  288. typeCloudBrain := ctx.QueryInt("type")
  289. err := checkTypeCloudBrain(typeCloudBrain)
  290. if err != nil {
  291. ctx.ServerError("checkTypeCloudBrain failed", err)
  292. return
  293. }
  294. fileChunk, err := models.GetModelFileChunkByUUID(uuid)
  295. if err != nil {
  296. if models.IsErrFileChunkNotExist(err) {
  297. ctx.Error(404)
  298. } else {
  299. ctx.ServerError("GetFileChunkByUUID", err)
  300. }
  301. return
  302. }
  303. url := ""
  304. if typeCloudBrain == models.TypeCloudBrainOne {
  305. if size > minio_ext.MinPartSize {
  306. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  307. return
  308. }
  309. url, err = storage.GenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber, size)
  310. if err != nil {
  311. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  312. return
  313. }
  314. } else {
  315. if setting.PROXYURL != "" {
  316. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  317. log.Info("return url=" + url)
  318. } else {
  319. url, err = storage.ObsGenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber)
  320. if err != nil {
  321. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  322. return
  323. }
  324. log.Info("url=" + url)
  325. }
  326. }
  327. ctx.JSON(200, map[string]string{
  328. "url": url,
  329. })
  330. }
  331. func CompleteModelMultipart(ctx *context.Context) {
  332. uuid := ctx.Query("uuid")
  333. uploadID := ctx.Query("uploadID")
  334. typeCloudBrain := ctx.QueryInt("type")
  335. fileName := ctx.Query("file_name")
  336. modeluuid := ctx.Query("modeluuid")
  337. log.Warn("uuid:" + uuid)
  338. log.Warn("modeluuid:" + modeluuid)
  339. log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))
  340. err := checkTypeCloudBrain(typeCloudBrain)
  341. if err != nil {
  342. ctx.ServerError("checkTypeCloudBrain failed", err)
  343. return
  344. }
  345. fileChunk, err := models.GetModelFileChunkByUUID(uuid)
  346. if err != nil {
  347. if models.IsErrFileChunkNotExist(err) {
  348. ctx.Error(404)
  349. } else {
  350. ctx.ServerError("GetFileChunkByUUID", err)
  351. }
  352. return
  353. }
  354. if typeCloudBrain == models.TypeCloudBrainOne {
  355. _, err = storage.CompleteMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks)
  356. if err != nil {
  357. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  358. return
  359. }
  360. } else {
  361. err = storage.CompleteObsMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks)
  362. if err != nil {
  363. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  364. return
  365. }
  366. }
  367. fileChunk.IsUploaded = models.FileUploaded
  368. err = models.UpdateModelFileChunk(fileChunk)
  369. if err != nil {
  370. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  371. return
  372. }
  373. //更新模型大小信息
  374. UpdateModelSize(modeluuid)
  375. _, err = models.InsertAttachment(&models.Attachment{
  376. UUID: uuid,
  377. UploaderID: ctx.User.ID,
  378. IsPrivate: true,
  379. Name: fileName,
  380. Size: ctx.QueryInt64("size"),
  381. DatasetID: 0,
  382. Description: modeluuid,
  383. Type: typeCloudBrain,
  384. })
  385. if err != nil {
  386. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  387. return
  388. }
  389. ctx.JSON(200, map[string]string{
  390. "result_code": "0",
  391. })
  392. }