You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 21 kB

4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "strconv"
  13. "strings"
  14. "code.gitea.io/gitea/models"
  15. "code.gitea.io/gitea/modules/context"
  16. "code.gitea.io/gitea/modules/log"
  17. "code.gitea.io/gitea/modules/minio_ext"
  18. "code.gitea.io/gitea/modules/setting"
  19. "code.gitea.io/gitea/modules/storage"
  20. "code.gitea.io/gitea/modules/upload"
  21. "code.gitea.io/gitea/modules/worker"
  22. gouuid "github.com/satori/go.uuid"
  23. )
  24. const (
  25. //result of decompress
  26. DecompressSuccess = "0"
  27. DecompressFailed = "1"
  28. )
  29. type CloudBrainDataset struct {
  30. UUID string `json:"id"`
  31. Name string `json:"name"`
  32. Path string `json:"place"`
  33. UserName string `json:"provider"`
  34. CreateTime string `json:"created_at"`
  35. }
  36. type UploadForm struct {
  37. UploadID string `form:"uploadId"`
  38. UuID string `form:"uuid"`
  39. PartSize int64 `form:"size"`
  40. Offset int64 `form:"offset"`
  41. PartNumber int `form:"chunkNumber"`
  42. PartFile multipart.File `form:"file"`
  43. }
  44. func RenderAttachmentSettings(ctx *context.Context) {
  45. renderAttachmentSettings(ctx)
  46. }
  47. func renderAttachmentSettings(ctx *context.Context) {
  48. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  49. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  50. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  51. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  52. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  53. }
  54. // UploadAttachment response for uploading issue's attachment
  55. func UploadAttachment(ctx *context.Context) {
  56. if !setting.Attachment.Enabled {
  57. ctx.Error(404, "attachment is not enabled")
  58. return
  59. }
  60. file, header, err := ctx.Req.FormFile("file")
  61. if err != nil {
  62. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  63. return
  64. }
  65. defer file.Close()
  66. buf := make([]byte, 1024)
  67. n, _ := file.Read(buf)
  68. if n > 0 {
  69. buf = buf[:n]
  70. }
  71. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  72. if err != nil {
  73. ctx.Error(400, err.Error())
  74. return
  75. }
  76. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  77. attach, err := models.NewAttachment(&models.Attachment{
  78. IsPrivate: true,
  79. UploaderID: ctx.User.ID,
  80. Name: header.Filename,
  81. DatasetID: datasetID,
  82. }, buf, file)
  83. if err != nil {
  84. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  85. return
  86. }
  87. log.Trace("New attachment uploaded: %s", attach.UUID)
  88. ctx.JSON(200, map[string]string{
  89. "uuid": attach.UUID,
  90. })
  91. }
  92. func UpdatePublicAttachment(ctx *context.Context) {
  93. file := ctx.Query("file")
  94. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  95. attach, err := models.GetAttachmentByUUID(file)
  96. if err != nil {
  97. ctx.Error(404, err.Error())
  98. return
  99. }
  100. attach.IsPrivate = isPrivate
  101. models.UpdateAttachment(attach)
  102. }
  103. // DeleteAttachment response for deleting issue's attachment
  104. func DeleteAttachment(ctx *context.Context) {
  105. file := ctx.Query("file")
  106. attach, err := models.GetAttachmentByUUID(file)
  107. if err != nil {
  108. ctx.Error(400, err.Error())
  109. return
  110. }
  111. if !ctx.IsSigned || (ctx.User.ID != attach.UploaderID) {
  112. ctx.Error(403)
  113. return
  114. }
  115. err = models.DeleteAttachment(attach, false)
  116. if err != nil {
  117. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  118. return
  119. }
  120. ctx.JSON(200, map[string]string{
  121. "uuid": attach.UUID,
  122. })
  123. }
  124. // GetAttachment serve attachements
  125. func GetAttachment(ctx *context.Context) {
  126. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  127. if err != nil {
  128. if models.IsErrAttachmentNotExist(err) {
  129. ctx.Error(404)
  130. } else {
  131. ctx.ServerError("GetAttachmentByUUID", err)
  132. }
  133. return
  134. }
  135. repository, unitType, err := attach.LinkedRepository()
  136. if err != nil {
  137. ctx.ServerError("LinkedRepository", err)
  138. return
  139. }
  140. if repository == nil { //If not linked
  141. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  142. ctx.Error(http.StatusNotFound)
  143. return
  144. }
  145. } else { //If we have the repository we check access
  146. perm, err := models.GetUserRepoPermission(repository, ctx.User)
  147. if err != nil {
  148. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err.Error())
  149. return
  150. }
  151. if !perm.CanRead(unitType) {
  152. ctx.Error(http.StatusNotFound)
  153. return
  154. }
  155. }
  156. dataSet, err := attach.LinkedDataSet()
  157. if err != nil {
  158. ctx.ServerError("LinkedDataSet", err)
  159. return
  160. }
  161. if dataSet != nil {
  162. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  163. if err != nil {
  164. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  165. return
  166. }
  167. if !isPermit {
  168. ctx.Error(http.StatusNotFound)
  169. return
  170. }
  171. }
  172. //If we have matched and access to release or issue
  173. if setting.Attachment.StoreType == storage.MinioStorageType {
  174. url, err := storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name)
  175. if err != nil {
  176. ctx.ServerError("PresignedGetURL", err)
  177. return
  178. }
  179. if err = increaseDownloadCount(attach, dataSet); err != nil {
  180. ctx.ServerError("Update", err)
  181. return
  182. }
  183. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  184. } else {
  185. fr, err := storage.Attachments.Open(attach.RelativePath())
  186. if err != nil {
  187. ctx.ServerError("Open", err)
  188. return
  189. }
  190. defer fr.Close()
  191. if err = increaseDownloadCount(attach, dataSet); err != nil {
  192. ctx.ServerError("Update", err)
  193. return
  194. }
  195. if err = ServeData(ctx, attach.Name, fr); err != nil {
  196. ctx.ServerError("ServeData", err)
  197. return
  198. }
  199. }
  200. }
  201. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  202. if err := attach.IncreaseDownloadCount(); err != nil {
  203. return err
  204. }
  205. if dataSet != nil {
  206. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  207. return err
  208. }
  209. }
  210. return nil
  211. }
  212. // Get a presigned url for put object
  213. func GetPresignedPutObjectURL(ctx *context.Context) {
  214. if !setting.Attachment.Enabled {
  215. ctx.Error(404, "attachment is not enabled")
  216. return
  217. }
  218. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  219. if err != nil {
  220. ctx.Error(400, err.Error())
  221. return
  222. }
  223. if setting.Attachment.StoreType == storage.MinioStorageType {
  224. uuid := gouuid.NewV4().String()
  225. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  226. if err != nil {
  227. ctx.ServerError("PresignedPutURL", err)
  228. return
  229. }
  230. ctx.JSON(200, map[string]string{
  231. "uuid": uuid,
  232. "url": url,
  233. })
  234. } else {
  235. ctx.Error(404, "storage type is not enabled")
  236. return
  237. }
  238. }
  239. // AddAttachment response for add attachment record
  240. func AddAttachment(ctx *context.Context) {
  241. uuid := ctx.Query("uuid")
  242. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  243. if err != nil {
  244. ctx.ServerError("HasObject", err)
  245. return
  246. }
  247. if !has {
  248. ctx.Error(404, "attachment has not been uploaded")
  249. return
  250. }
  251. attachment, err := models.InsertAttachment(&models.Attachment{
  252. UUID: uuid,
  253. UploaderID: ctx.User.ID,
  254. IsPrivate: true,
  255. Name: ctx.Query("file_name"),
  256. Size: ctx.QueryInt64("size"),
  257. DatasetID: ctx.QueryInt64("dataset_id"),
  258. })
  259. if err != nil {
  260. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  261. return
  262. }
  263. if attachment.DatasetID != 0 {
  264. if strings.HasSuffix(attachment.Name, ".zip") {
  265. err = worker.SendDecompressTask(contexExt.Background(), uuid)
  266. if err != nil {
  267. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  268. } else {
  269. attachment.DecompressState = models.DecompressStateIng
  270. err = models.UpdateAttachment(attachment)
  271. if err != nil {
  272. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  273. }
  274. }
  275. }
  276. }
  277. ctx.JSON(200, map[string]string{
  278. "result_code": "0",
  279. })
  280. }
  281. func UpdateAttachmentDecompressState(ctx *context.Context) {
  282. uuid := ctx.Query("uuid")
  283. result := ctx.Query("result")
  284. attach, err := models.GetAttachmentByUUID(uuid)
  285. if err != nil {
  286. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  287. return
  288. }
  289. if result == DecompressSuccess {
  290. attach.DecompressState = models.DecompressStateDone
  291. } else if result == DecompressFailed {
  292. attach.DecompressState = models.DecompressStateFailed
  293. } else {
  294. log.Error("result is error:", result)
  295. return
  296. }
  297. err = models.UpdateAttachment(attach)
  298. if err != nil {
  299. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  300. return
  301. }
  302. ctx.JSON(200, map[string]string{
  303. "result_code": "0",
  304. })
  305. }
  306. func GetSuccessChunks(ctx *context.Context) {
  307. fileMD5 := ctx.Query("md5")
  308. typeCloudBrain := ctx.QueryInt("type")
  309. var chunks string
  310. err := checkTypeCloudBrain(typeCloudBrain)
  311. if err != nil {
  312. ctx.ServerError("checkTypeCloudBrain failed", err)
  313. return
  314. }
  315. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  316. if err != nil {
  317. if models.IsErrFileChunkNotExist(err) {
  318. ctx.JSON(200, map[string]string{
  319. "uuid": "",
  320. "uploaded": "0",
  321. "uploadID": "",
  322. "chunks": "",
  323. })
  324. } else {
  325. ctx.ServerError("GetFileChunkByMD5", err)
  326. }
  327. return
  328. }
  329. isExist := false
  330. if typeCloudBrain == models.TypeCloudBrainOne {
  331. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  332. if err != nil {
  333. ctx.ServerError("HasObject failed", err)
  334. return
  335. }
  336. } else {
  337. isExist, err = storage.ObsHasObject(models.AttachmentRelativePath(fileChunk.UUID))
  338. if err != nil {
  339. ctx.ServerError("ObsHasObject failed", err)
  340. return
  341. }
  342. }
  343. if isExist {
  344. if fileChunk.IsUploaded == models.FileNotUploaded {
  345. log.Info("the file has been uploaded but not recorded")
  346. fileChunk.IsUploaded = models.FileUploaded
  347. if err = models.UpdateFileChunk(fileChunk); err != nil {
  348. log.Error("UpdateFileChunk failed:", err.Error())
  349. }
  350. }
  351. } else {
  352. if fileChunk.IsUploaded == models.FileUploaded {
  353. log.Info("the file has been recorded but not uploaded")
  354. fileChunk.IsUploaded = models.FileNotUploaded
  355. if err = models.UpdateFileChunk(fileChunk); err != nil {
  356. log.Error("UpdateFileChunk failed:", err.Error())
  357. }
  358. }
  359. if typeCloudBrain == models.TypeCloudBrainOne {
  360. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  361. if err != nil {
  362. ctx.ServerError("GetPartInfos failed", err)
  363. return
  364. }
  365. } else {
  366. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
  367. if err != nil {
  368. ctx.ServerError("GetObsPartInfos failed", err)
  369. return
  370. }
  371. }
  372. }
  373. var attachID int64
  374. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  375. if err != nil {
  376. if models.IsErrAttachmentNotExist(err) {
  377. attachID = 0
  378. } else {
  379. ctx.ServerError("GetAttachmentByUUID", err)
  380. return
  381. }
  382. } else {
  383. attachID = attach.ID
  384. }
  385. if attach == nil {
  386. ctx.JSON(200, map[string]string{
  387. "uuid": fileChunk.UUID,
  388. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  389. "uploadID": fileChunk.UploadID,
  390. "chunks": string(chunks),
  391. "attachID": "0",
  392. "datasetID": "0",
  393. "fileName": "",
  394. "datasetName": "",
  395. })
  396. return
  397. }
  398. dataset, err := models.GetDatasetByID(attach.DatasetID)
  399. if err != nil {
  400. ctx.ServerError("GetDatasetByID", err)
  401. return
  402. }
  403. ctx.JSON(200, map[string]string{
  404. "uuid": fileChunk.UUID,
  405. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  406. "uploadID": fileChunk.UploadID,
  407. "chunks": string(chunks),
  408. "attachID": strconv.Itoa(int(attachID)),
  409. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  410. "fileName": attach.Name,
  411. "datasetName": dataset.Title,
  412. })
  413. }
  414. func NewMultipart(ctx *context.Context) {
  415. if !setting.Attachment.Enabled {
  416. ctx.Error(404, "attachment is not enabled")
  417. return
  418. }
  419. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  420. if err != nil {
  421. ctx.Error(400, err.Error())
  422. return
  423. }
  424. typeCloudBrain := ctx.QueryInt("type")
  425. err = checkTypeCloudBrain(typeCloudBrain)
  426. if err != nil {
  427. ctx.ServerError("checkTypeCloudBrain failed", err)
  428. return
  429. }
  430. if setting.Attachment.StoreType == storage.MinioStorageType {
  431. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  432. if totalChunkCounts > minio_ext.MaxPartsCount {
  433. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  434. return
  435. }
  436. fileSize := ctx.QueryInt64("size")
  437. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  438. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  439. return
  440. }
  441. uuid := gouuid.NewV4().String()
  442. var uploadID string
  443. if typeCloudBrain == models.TypeCloudBrainOne {
  444. uploadID, err = storage.NewMultiPartUpload(uuid)
  445. if err != nil {
  446. ctx.ServerError("NewMultipart", err)
  447. return
  448. }
  449. } else {
  450. uploadID, err = storage.NewObsMultiPartUpload(uuid)
  451. if err != nil {
  452. ctx.ServerError("NewObsMultiPartUpload", err)
  453. return
  454. }
  455. }
  456. _, err = models.InsertFileChunk(&models.FileChunk{
  457. UUID: uuid,
  458. UserID: ctx.User.ID,
  459. UploadID: uploadID,
  460. Md5: ctx.Query("md5"),
  461. Size: fileSize,
  462. TotalChunks: totalChunkCounts,
  463. Type: typeCloudBrain,
  464. })
  465. if err != nil {
  466. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  467. return
  468. }
  469. ctx.JSON(200, map[string]string{
  470. "uuid": uuid,
  471. "uploadID": uploadID,
  472. })
  473. } else {
  474. ctx.Error(404, "storage type is not enabled")
  475. return
  476. }
  477. }
  478. func GetMultipartUploadUrl(ctx *context.Context) {
  479. uuid := ctx.Query("uuid")
  480. uploadID := ctx.Query("uploadID")
  481. partNumber := ctx.QueryInt("chunkNumber")
  482. size := ctx.QueryInt64("size")
  483. typeCloudBrain := ctx.QueryInt("type")
  484. err := checkTypeCloudBrain(typeCloudBrain)
  485. if err != nil {
  486. ctx.ServerError("checkTypeCloudBrain failed", err)
  487. return
  488. }
  489. url := ""
  490. if typeCloudBrain == models.TypeCloudBrainOne {
  491. if size > minio_ext.MinPartSize {
  492. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  493. return
  494. }
  495. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  496. if err != nil {
  497. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  498. return
  499. }
  500. } else {
  501. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  502. if err != nil {
  503. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  504. return
  505. }
  506. }
  507. ctx.JSON(200, map[string]string{
  508. "url": url,
  509. })
  510. }
  511. func UploadPart(ctx *context.Context) {
  512. tmp, err := ctx.Req.Body().String()
  513. log.Info(tmp)
  514. err = ctx.Req.ParseMultipartForm(100*1024*1024)
  515. if err != nil {
  516. ctx.Error(http.StatusBadRequest, fmt.Sprintf("ParseMultipartForm failed: %v", err))
  517. return
  518. }
  519. file, fileHeader, err := ctx.Req.FormFile("file")
  520. log.Info(ctx.Req.Form.Get("file"))
  521. if err != nil {
  522. ctx.Error(http.StatusBadRequest, fmt.Sprintf("FormFile failed: %v", err))
  523. return
  524. }
  525. log.Info(fileHeader.Filename)
  526. etag, err := storage.ObsUploadPart("", "", 1, 1, file)
  527. if err != nil {
  528. ctx.Error(500, fmt.Sprintf("ObsUploadPart failed: %v", err))
  529. return
  530. }
  531. ctx.JSON(200, map[string]string{
  532. "etag": etag,
  533. })
  534. }
  535. func CompleteMultipart(ctx *context.Context) {
  536. uuid := ctx.Query("uuid")
  537. uploadID := ctx.Query("uploadID")
  538. typeCloudBrain := ctx.QueryInt("type")
  539. err := checkTypeCloudBrain(typeCloudBrain)
  540. if err != nil {
  541. ctx.ServerError("checkTypeCloudBrain failed", err)
  542. return
  543. }
  544. fileChunk, err := models.GetFileChunkByUUID(uuid)
  545. if err != nil {
  546. if models.IsErrFileChunkNotExist(err) {
  547. ctx.Error(404)
  548. } else {
  549. ctx.ServerError("GetFileChunkByUUID", err)
  550. }
  551. return
  552. }
  553. if typeCloudBrain == models.TypeCloudBrainOne {
  554. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  555. if err != nil {
  556. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  557. return
  558. }
  559. } else {
  560. err = storage.CompleteObsMultiPartUpload(uuid, uploadID)
  561. if err != nil {
  562. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  563. return
  564. }
  565. }
  566. fileChunk.IsUploaded = models.FileUploaded
  567. err = models.UpdateFileChunk(fileChunk)
  568. if err != nil {
  569. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  570. return
  571. }
  572. attachment, err := models.InsertAttachment(&models.Attachment{
  573. UUID: uuid,
  574. UploaderID: ctx.User.ID,
  575. IsPrivate: true,
  576. Name: ctx.Query("file_name"),
  577. Size: ctx.QueryInt64("size"),
  578. DatasetID: ctx.QueryInt64("dataset_id"),
  579. Type: typeCloudBrain,
  580. })
  581. if err != nil {
  582. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  583. return
  584. }
  585. if attachment.DatasetID != 0 {
  586. if strings.HasSuffix(attachment.Name, ".zip") {
  587. err = worker.SendDecompressTask(contexExt.Background(), uuid)
  588. if err != nil {
  589. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  590. } else {
  591. attachment.DecompressState = models.DecompressStateIng
  592. err = models.UpdateAttachment(attachment)
  593. if err != nil {
  594. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  595. }
  596. }
  597. }
  598. }
  599. ctx.JSON(200, map[string]string{
  600. "result_code": "0",
  601. })
  602. }
  603. func UpdateMultipart(ctx *context.Context) {
  604. uuid := ctx.Query("uuid")
  605. partNumber := ctx.QueryInt("chunkNumber")
  606. etag := ctx.Query("etag")
  607. fileChunk, err := models.GetFileChunkByUUID(uuid)
  608. if err != nil {
  609. if models.IsErrFileChunkNotExist(err) {
  610. ctx.Error(404)
  611. } else {
  612. ctx.ServerError("GetFileChunkByUUID", err)
  613. }
  614. return
  615. }
  616. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  617. err = models.UpdateFileChunk(fileChunk)
  618. if err != nil {
  619. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  620. return
  621. }
  622. ctx.JSON(200, map[string]string{
  623. "result_code": "0",
  624. })
  625. }
  626. func HandleUnDecompressAttachment() {
  627. attachs, err := models.GetUnDecompressAttachments()
  628. if err != nil {
  629. log.Error("GetUnDecompressAttachments failed:", err.Error())
  630. return
  631. }
  632. for _, attach := range attachs {
  633. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID)
  634. if err != nil {
  635. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  636. } else {
  637. attach.DecompressState = models.DecompressStateIng
  638. err = models.UpdateAttachment(attach)
  639. if err != nil {
  640. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  641. }
  642. }
  643. }
  644. return
  645. }
  646. func QueryAllPublicDataset(ctx *context.Context) {
  647. attachs, err := models.GetAllPublicAttachments()
  648. if err != nil {
  649. ctx.JSON(200, map[string]string{
  650. "result_code": "-1",
  651. "error_msg": err.Error(),
  652. "data": "",
  653. })
  654. return
  655. }
  656. queryDatasets(ctx, attachs)
  657. }
  658. func QueryPrivateDataset(ctx *context.Context) {
  659. username := ctx.Params(":username")
  660. attachs, err := models.GetPrivateAttachments(username)
  661. if err != nil {
  662. ctx.JSON(200, map[string]string{
  663. "result_code": "-1",
  664. "error_msg": err.Error(),
  665. "data": "",
  666. })
  667. return
  668. }
  669. for _, attach := range attachs {
  670. attach.Name = username
  671. }
  672. queryDatasets(ctx, attachs)
  673. }
  674. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  675. var datasets []CloudBrainDataset
  676. if len(attachs) == 0 {
  677. log.Info("dataset is null")
  678. ctx.JSON(200, map[string]string{
  679. "result_code": "0",
  680. "error_msg": "",
  681. "data": "",
  682. })
  683. return
  684. }
  685. for _, attch := range attachs {
  686. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  687. if err != nil || !has {
  688. continue
  689. }
  690. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  691. attch.Attachment.Name,
  692. setting.Attachment.Minio.RealPath +
  693. setting.Attachment.Minio.Bucket + "/" +
  694. setting.Attachment.Minio.BasePath +
  695. models.AttachmentRelativePath(attch.UUID) +
  696. attch.UUID,
  697. attch.Name,
  698. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  699. }
  700. data, err := json.Marshal(datasets)
  701. if err != nil {
  702. log.Error("json.Marshal failed:", err.Error())
  703. ctx.JSON(200, map[string]string{
  704. "result_code": "-1",
  705. "error_msg": err.Error(),
  706. "data": "",
  707. })
  708. return
  709. }
  710. ctx.JSON(200, map[string]string{
  711. "result_code": "0",
  712. "error_msg": "",
  713. "data": string(data),
  714. })
  715. return
  716. }
  717. func checkTypeCloudBrain(typeCloudBrain int) error {
  718. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  719. log.Error("type error:", typeCloudBrain)
  720. return errors.New("type error")
  721. }
  722. return nil
  723. }