You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 24 kB

4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/models"
  16. "code.gitea.io/gitea/modules/context"
  17. "code.gitea.io/gitea/modules/labelmsg"
  18. "code.gitea.io/gitea/modules/log"
  19. "code.gitea.io/gitea/modules/minio_ext"
  20. "code.gitea.io/gitea/modules/setting"
  21. "code.gitea.io/gitea/modules/storage"
  22. "code.gitea.io/gitea/modules/upload"
  23. "code.gitea.io/gitea/modules/worker"
  24. gouuid "github.com/satori/go.uuid"
  25. )
  26. const (
  27. //result of decompress
  28. DecompressSuccess = "0"
  29. DecompressFailed = "1"
  30. )
  31. type CloudBrainDataset struct {
  32. UUID string `json:"id"`
  33. Name string `json:"name"`
  34. Path string `json:"place"`
  35. UserName string `json:"provider"`
  36. CreateTime string `json:"created_at"`
  37. }
  38. type UploadForm struct {
  39. UploadID string `form:"uploadId"`
  40. UuID string `form:"uuid"`
  41. PartSize int64 `form:"size"`
  42. Offset int64 `form:"offset"`
  43. PartNumber int `form:"chunkNumber"`
  44. PartFile multipart.File `form:"file"`
  45. }
  46. func RenderAttachmentSettings(ctx *context.Context) {
  47. renderAttachmentSettings(ctx)
  48. }
  49. func renderAttachmentSettings(ctx *context.Context) {
  50. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  51. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  52. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  53. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  54. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  55. }
  56. // UploadAttachment response for uploading issue's attachment
  57. func UploadAttachment(ctx *context.Context) {
  58. if !setting.Attachment.Enabled {
  59. ctx.Error(404, "attachment is not enabled")
  60. return
  61. }
  62. file, header, err := ctx.Req.FormFile("file")
  63. if err != nil {
  64. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  65. return
  66. }
  67. defer file.Close()
  68. buf := make([]byte, 1024)
  69. n, _ := file.Read(buf)
  70. if n > 0 {
  71. buf = buf[:n]
  72. }
  73. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  74. if err != nil {
  75. ctx.Error(400, err.Error())
  76. return
  77. }
  78. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  79. attach, err := models.NewAttachment(&models.Attachment{
  80. IsPrivate: true,
  81. UploaderID: ctx.User.ID,
  82. Name: header.Filename,
  83. DatasetID: datasetID,
  84. }, buf, file)
  85. if err != nil {
  86. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  87. return
  88. }
  89. log.Trace("New attachment uploaded: %s", attach.UUID)
  90. ctx.JSON(200, map[string]string{
  91. "uuid": attach.UUID,
  92. })
  93. }
  94. func UpdatePublicAttachment(ctx *context.Context) {
  95. file := ctx.Query("file")
  96. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  97. attach, err := models.GetAttachmentByUUID(file)
  98. if err != nil {
  99. ctx.Error(404, err.Error())
  100. return
  101. }
  102. attach.IsPrivate = isPrivate
  103. models.UpdateAttachment(attach)
  104. }
  105. // DeleteAttachment response for deleting issue's attachment
  106. func DeleteAttachment(ctx *context.Context) {
  107. file := ctx.Query("file")
  108. attach, err := models.GetAttachmentByUUID(file)
  109. if err != nil {
  110. ctx.Error(400, err.Error())
  111. return
  112. }
  113. if !ctx.IsSigned || (ctx.User.ID != attach.UploaderID) {
  114. ctx.Error(403)
  115. return
  116. }
  117. err = models.DeleteAttachment(attach, true)
  118. if err != nil {
  119. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  120. return
  121. }
  122. attachjson, _ := json.Marshal(attach)
  123. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  124. DeleteAllUnzipFile(attach, "")
  125. _, err = models.DeleteFileChunkById(attach.UUID)
  126. if err != nil {
  127. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  128. return
  129. }
  130. ctx.JSON(200, map[string]string{
  131. "uuid": attach.UUID,
  132. })
  133. }
  134. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  135. dataset, err := models.GetDatasetByID(attach.DatasetID)
  136. if err != nil {
  137. log.Info("query dataset error")
  138. } else {
  139. repo, err := models.GetRepositoryByID(dataset.RepoID)
  140. if err != nil {
  141. log.Info("query repo error.")
  142. } else {
  143. repo.GetOwner()
  144. if ctx.User != nil {
  145. if repo.Owner.IsOrganization() {
  146. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  147. log.Info("org user may visit the attach.")
  148. return true
  149. }
  150. }
  151. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  152. if isCollaborator {
  153. log.Info("Collaborator user may visit the attach.")
  154. return true
  155. }
  156. }
  157. }
  158. }
  159. return false
  160. }
  161. // GetAttachment serve attachements
  162. func GetAttachment(ctx *context.Context) {
  163. typeCloudBrain := ctx.QueryInt("type")
  164. err := checkTypeCloudBrain(typeCloudBrain)
  165. if err != nil {
  166. ctx.ServerError("checkTypeCloudBrain failed", err)
  167. return
  168. }
  169. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  170. if err != nil {
  171. if models.IsErrAttachmentNotExist(err) {
  172. ctx.Error(404)
  173. } else {
  174. ctx.ServerError("GetAttachmentByUUID", err)
  175. }
  176. return
  177. }
  178. repository, unitType, err := attach.LinkedRepository()
  179. if err != nil {
  180. ctx.ServerError("LinkedRepository", err)
  181. return
  182. }
  183. dataSet, err := attach.LinkedDataSet()
  184. if err != nil {
  185. ctx.ServerError("LinkedDataSet", err)
  186. return
  187. }
  188. if repository == nil && dataSet != nil {
  189. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  190. unitType = models.UnitTypeDatasets
  191. }
  192. if repository == nil { //If not linked
  193. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  194. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  195. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  196. ctx.Error(http.StatusNotFound)
  197. return
  198. }
  199. } else { //If we have the repository we check access
  200. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  201. if errPermission != nil {
  202. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  203. return
  204. }
  205. if !perm.CanRead(unitType) {
  206. ctx.Error(http.StatusNotFound)
  207. return
  208. }
  209. }
  210. if dataSet != nil {
  211. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  212. if err != nil {
  213. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  214. return
  215. }
  216. if !isPermit {
  217. ctx.Error(http.StatusNotFound)
  218. return
  219. }
  220. }
  221. //If we have matched and access to release or issue
  222. if setting.Attachment.StoreType == storage.MinioStorageType {
  223. url := ""
  224. if typeCloudBrain == models.TypeCloudBrainOne {
  225. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  226. if err != nil {
  227. ctx.ServerError("PresignedGetURL", err)
  228. return
  229. }
  230. } else {
  231. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  232. if err != nil {
  233. ctx.ServerError("ObsGetPreSignedUrl", err)
  234. return
  235. }
  236. }
  237. if err = increaseDownloadCount(attach, dataSet); err != nil {
  238. ctx.ServerError("Update", err)
  239. return
  240. }
  241. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  242. } else {
  243. fr, err := storage.Attachments.Open(attach.RelativePath())
  244. if err != nil {
  245. ctx.ServerError("Open", err)
  246. return
  247. }
  248. defer fr.Close()
  249. if err = increaseDownloadCount(attach, dataSet); err != nil {
  250. ctx.ServerError("Update", err)
  251. return
  252. }
  253. if err = ServeData(ctx, attach.Name, fr); err != nil {
  254. ctx.ServerError("ServeData", err)
  255. return
  256. }
  257. }
  258. }
  259. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  260. if err := attach.IncreaseDownloadCount(); err != nil {
  261. return err
  262. }
  263. if dataSet != nil {
  264. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  265. return err
  266. }
  267. }
  268. return nil
  269. }
  270. // Get a presigned url for put object
  271. func GetPresignedPutObjectURL(ctx *context.Context) {
  272. if !setting.Attachment.Enabled {
  273. ctx.Error(404, "attachment is not enabled")
  274. return
  275. }
  276. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  277. if err != nil {
  278. ctx.Error(400, err.Error())
  279. return
  280. }
  281. if setting.Attachment.StoreType == storage.MinioStorageType {
  282. uuid := gouuid.NewV4().String()
  283. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  284. if err != nil {
  285. ctx.ServerError("PresignedPutURL", err)
  286. return
  287. }
  288. ctx.JSON(200, map[string]string{
  289. "uuid": uuid,
  290. "url": url,
  291. })
  292. } else {
  293. ctx.Error(404, "storage type is not enabled")
  294. return
  295. }
  296. }
  297. // AddAttachment response for add attachment record
  298. func AddAttachment(ctx *context.Context) {
  299. typeCloudBrain := ctx.QueryInt("type")
  300. err := checkTypeCloudBrain(typeCloudBrain)
  301. if err != nil {
  302. ctx.ServerError("checkTypeCloudBrain failed", err)
  303. return
  304. }
  305. uuid := ctx.Query("uuid")
  306. has := false
  307. if typeCloudBrain == models.TypeCloudBrainOne {
  308. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  309. if err != nil {
  310. ctx.ServerError("HasObject", err)
  311. return
  312. }
  313. } else {
  314. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid)
  315. if err != nil {
  316. ctx.ServerError("ObsHasObject", err)
  317. return
  318. }
  319. }
  320. if !has {
  321. ctx.Error(404, "attachment has not been uploaded")
  322. return
  323. }
  324. attachment, err := models.InsertAttachment(&models.Attachment{
  325. UUID: uuid,
  326. UploaderID: ctx.User.ID,
  327. IsPrivate: true,
  328. Name: ctx.Query("file_name"),
  329. Size: ctx.QueryInt64("size"),
  330. DatasetID: ctx.QueryInt64("dataset_id"),
  331. Type: typeCloudBrain,
  332. })
  333. if err != nil {
  334. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  335. return
  336. }
  337. if attachment.DatasetID != 0 {
  338. if isCanDecompress(attachment.Name) {
  339. if typeCloudBrain == models.TypeCloudBrainOne {
  340. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  341. if err != nil {
  342. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  343. } else {
  344. attachment.DecompressState = models.DecompressStateIng
  345. err = models.UpdateAttachment(attachment)
  346. if err != nil {
  347. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  348. }
  349. }
  350. }
  351. //todo:decompress type_two
  352. }
  353. }
  354. ctx.JSON(200, map[string]string{
  355. "result_code": "0",
  356. })
  357. }
  358. func isCanDecompress(name string) bool {
  359. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  360. return true
  361. }
  362. return false
  363. }
  364. func UpdateAttachmentDecompressState(ctx *context.Context) {
  365. uuid := ctx.Query("uuid")
  366. result := ctx.Query("result")
  367. attach, err := models.GetAttachmentByUUID(uuid)
  368. if err != nil {
  369. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  370. return
  371. }
  372. if result == DecompressSuccess {
  373. attach.DecompressState = models.DecompressStateDone
  374. } else if result == DecompressFailed {
  375. attach.DecompressState = models.DecompressStateFailed
  376. } else {
  377. log.Error("result is error:", result)
  378. return
  379. }
  380. err = models.UpdateAttachment(attach)
  381. if err != nil {
  382. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  383. return
  384. }
  385. log.Info("start to send msg to labelsystem ")
  386. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  387. var labelMap map[string]string
  388. labelMap = make(map[string]string)
  389. labelMap["UUID"] = uuid
  390. labelMap["Type"] = fmt.Sprint(attach.Type)
  391. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  392. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  393. labelMap["AttachName"] = attach.Name
  394. attachjson, _ := json.Marshal(labelMap)
  395. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  396. log.Info("end to send msg to labelsystem ")
  397. ctx.JSON(200, map[string]string{
  398. "result_code": "0",
  399. })
  400. }
  401. func GetSuccessChunks(ctx *context.Context) {
  402. fileMD5 := ctx.Query("md5")
  403. typeCloudBrain := ctx.QueryInt("type")
  404. var chunks string
  405. err := checkTypeCloudBrain(typeCloudBrain)
  406. if err != nil {
  407. ctx.ServerError("checkTypeCloudBrain failed", err)
  408. return
  409. }
  410. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  411. if err != nil {
  412. if models.IsErrFileChunkNotExist(err) {
  413. ctx.JSON(200, map[string]string{
  414. "uuid": "",
  415. "uploaded": "0",
  416. "uploadID": "",
  417. "chunks": "",
  418. })
  419. } else {
  420. ctx.ServerError("GetFileChunkByMD5", err)
  421. }
  422. return
  423. }
  424. isExist := false
  425. if typeCloudBrain == models.TypeCloudBrainOne {
  426. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  427. if err != nil {
  428. ctx.ServerError("HasObject failed", err)
  429. return
  430. }
  431. } else {
  432. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileChunk.UUID)
  433. if err != nil {
  434. ctx.ServerError("ObsHasObject failed", err)
  435. return
  436. }
  437. }
  438. if isExist {
  439. if fileChunk.IsUploaded == models.FileNotUploaded {
  440. log.Info("the file has been uploaded but not recorded")
  441. fileChunk.IsUploaded = models.FileUploaded
  442. if err = models.UpdateFileChunk(fileChunk); err != nil {
  443. log.Error("UpdateFileChunk failed:", err.Error())
  444. }
  445. }
  446. } else {
  447. if fileChunk.IsUploaded == models.FileUploaded {
  448. log.Info("the file has been recorded but not uploaded")
  449. fileChunk.IsUploaded = models.FileNotUploaded
  450. if err = models.UpdateFileChunk(fileChunk); err != nil {
  451. log.Error("UpdateFileChunk failed:", err.Error())
  452. }
  453. }
  454. if typeCloudBrain == models.TypeCloudBrainOne {
  455. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  456. if err != nil {
  457. log.Error("GetPartInfos failed:%v", err.Error())
  458. }
  459. } else {
  460. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
  461. if err != nil {
  462. log.Error("GetObsPartInfos failed:%v", err.Error())
  463. }
  464. }
  465. if err != nil {
  466. models.DeleteFileChunk(fileChunk)
  467. ctx.JSON(200, map[string]string{
  468. "uuid": "",
  469. "uploaded": "0",
  470. "uploadID": "",
  471. "chunks": "",
  472. })
  473. return
  474. }
  475. }
  476. var attachID int64
  477. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  478. if err != nil {
  479. if models.IsErrAttachmentNotExist(err) {
  480. attachID = 0
  481. } else {
  482. ctx.ServerError("GetAttachmentByUUID", err)
  483. return
  484. }
  485. } else {
  486. attachID = attach.ID
  487. }
  488. if attach == nil {
  489. ctx.JSON(200, map[string]string{
  490. "uuid": fileChunk.UUID,
  491. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  492. "uploadID": fileChunk.UploadID,
  493. "chunks": string(chunks),
  494. "attachID": "0",
  495. "datasetID": "0",
  496. "fileName": "",
  497. "datasetName": "",
  498. })
  499. return
  500. }
  501. dataset, err := models.GetDatasetByID(attach.DatasetID)
  502. if err != nil {
  503. ctx.ServerError("GetDatasetByID", err)
  504. return
  505. }
  506. ctx.JSON(200, map[string]string{
  507. "uuid": fileChunk.UUID,
  508. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  509. "uploadID": fileChunk.UploadID,
  510. "chunks": string(chunks),
  511. "attachID": strconv.Itoa(int(attachID)),
  512. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  513. "fileName": attach.Name,
  514. "datasetName": dataset.Title,
  515. })
  516. }
  517. func NewMultipart(ctx *context.Context) {
  518. if !setting.Attachment.Enabled {
  519. ctx.Error(404, "attachment is not enabled")
  520. return
  521. }
  522. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  523. if err != nil {
  524. ctx.Error(400, err.Error())
  525. return
  526. }
  527. typeCloudBrain := ctx.QueryInt("type")
  528. err = checkTypeCloudBrain(typeCloudBrain)
  529. if err != nil {
  530. ctx.ServerError("checkTypeCloudBrain failed", err)
  531. return
  532. }
  533. fileName := ctx.Query("file_name")
  534. if setting.Attachment.StoreType == storage.MinioStorageType {
  535. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  536. if totalChunkCounts > minio_ext.MaxPartsCount {
  537. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  538. return
  539. }
  540. fileSize := ctx.QueryInt64("size")
  541. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  542. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  543. return
  544. }
  545. uuid := gouuid.NewV4().String()
  546. var uploadID string
  547. if typeCloudBrain == models.TypeCloudBrainOne {
  548. uploadID, err = storage.NewMultiPartUpload(uuid)
  549. if err != nil {
  550. ctx.ServerError("NewMultipart", err)
  551. return
  552. }
  553. } else {
  554. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  555. if err != nil {
  556. ctx.ServerError("NewObsMultiPartUpload", err)
  557. return
  558. }
  559. }
  560. _, err = models.InsertFileChunk(&models.FileChunk{
  561. UUID: uuid,
  562. UserID: ctx.User.ID,
  563. UploadID: uploadID,
  564. Md5: ctx.Query("md5"),
  565. Size: fileSize,
  566. TotalChunks: totalChunkCounts,
  567. Type: typeCloudBrain,
  568. })
  569. if err != nil {
  570. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  571. return
  572. }
  573. ctx.JSON(200, map[string]string{
  574. "uuid": uuid,
  575. "uploadID": uploadID,
  576. })
  577. } else {
  578. ctx.Error(404, "storage type is not enabled")
  579. return
  580. }
  581. }
  582. func GetMultipartUploadUrl(ctx *context.Context) {
  583. uuid := ctx.Query("uuid")
  584. uploadID := ctx.Query("uploadID")
  585. partNumber := ctx.QueryInt("chunkNumber")
  586. size := ctx.QueryInt64("size")
  587. fileName := ctx.Query("file_name")
  588. typeCloudBrain := ctx.QueryInt("type")
  589. err := checkTypeCloudBrain(typeCloudBrain)
  590. if err != nil {
  591. ctx.ServerError("checkTypeCloudBrain failed", err)
  592. return
  593. }
  594. url := ""
  595. if typeCloudBrain == models.TypeCloudBrainOne {
  596. if size > minio_ext.MinPartSize {
  597. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  598. return
  599. }
  600. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  601. if err != nil {
  602. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  603. return
  604. }
  605. } else {
  606. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  607. if err != nil {
  608. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  609. return
  610. }
  611. }
  612. ctx.JSON(200, map[string]string{
  613. "url": url,
  614. })
  615. }
  616. func GetObsKey(ctx *context.Context) {
  617. uuid := gouuid.NewV4().String()
  618. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  619. ctx.JSON(200, map[string]string{
  620. "uuid": uuid,
  621. "key": key,
  622. "access_key_id": setting.AccessKeyID,
  623. "secret_access_key": setting.SecretAccessKey,
  624. "server": setting.Endpoint,
  625. "bucket": setting.Bucket,
  626. })
  627. }
  628. func CompleteMultipart(ctx *context.Context) {
  629. uuid := ctx.Query("uuid")
  630. uploadID := ctx.Query("uploadID")
  631. typeCloudBrain := ctx.QueryInt("type")
  632. fileName := ctx.Query("file_name")
  633. err := checkTypeCloudBrain(typeCloudBrain)
  634. if err != nil {
  635. ctx.ServerError("checkTypeCloudBrain failed", err)
  636. return
  637. }
  638. fileChunk, err := models.GetFileChunkByUUID(uuid)
  639. if err != nil {
  640. if models.IsErrFileChunkNotExist(err) {
  641. ctx.Error(404)
  642. } else {
  643. ctx.ServerError("GetFileChunkByUUID", err)
  644. }
  645. return
  646. }
  647. if typeCloudBrain == models.TypeCloudBrainOne {
  648. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  649. if err != nil {
  650. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  651. return
  652. }
  653. } else {
  654. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  655. if err != nil {
  656. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  657. return
  658. }
  659. }
  660. fileChunk.IsUploaded = models.FileUploaded
  661. err = models.UpdateFileChunk(fileChunk)
  662. if err != nil {
  663. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  664. return
  665. }
  666. attachment, err := models.InsertAttachment(&models.Attachment{
  667. UUID: uuid,
  668. UploaderID: ctx.User.ID,
  669. IsPrivate: true,
  670. Name: fileName,
  671. Size: ctx.QueryInt64("size"),
  672. DatasetID: ctx.QueryInt64("dataset_id"),
  673. Type: typeCloudBrain,
  674. })
  675. if err != nil {
  676. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  677. return
  678. }
  679. if attachment.DatasetID != 0 {
  680. if isCanDecompress(attachment.Name) {
  681. if typeCloudBrain == models.TypeCloudBrainOne {
  682. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  683. if err != nil {
  684. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  685. } else {
  686. attachment.DecompressState = models.DecompressStateIng
  687. err = models.UpdateAttachment(attachment)
  688. if err != nil {
  689. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  690. }
  691. }
  692. }
  693. if typeCloudBrain == models.TypeCloudBrainTwo {
  694. attachjson, _ := json.Marshal(attachment)
  695. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  696. }
  697. } else {
  698. dataset, _ := models.GetDatasetByID(attachment.DatasetID)
  699. var labelMap map[string]string
  700. labelMap = make(map[string]string)
  701. labelMap["UUID"] = uuid
  702. labelMap["Type"] = fmt.Sprint(attachment.Type)
  703. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  704. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  705. labelMap["AttachName"] = attachment.Name
  706. attachjson, _ := json.Marshal(labelMap)
  707. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  708. }
  709. }
  710. ctx.JSON(200, map[string]string{
  711. "result_code": "0",
  712. })
  713. }
  714. func UpdateMultipart(ctx *context.Context) {
  715. uuid := ctx.Query("uuid")
  716. partNumber := ctx.QueryInt("chunkNumber")
  717. etag := ctx.Query("etag")
  718. fileChunk, err := models.GetFileChunkByUUID(uuid)
  719. if err != nil {
  720. if models.IsErrFileChunkNotExist(err) {
  721. ctx.Error(404)
  722. } else {
  723. ctx.ServerError("GetFileChunkByUUID", err)
  724. }
  725. return
  726. }
  727. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  728. err = models.UpdateFileChunk(fileChunk)
  729. if err != nil {
  730. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  731. return
  732. }
  733. ctx.JSON(200, map[string]string{
  734. "result_code": "0",
  735. })
  736. }
  737. func HandleUnDecompressAttachment() {
  738. attachs, err := models.GetUnDecompressAttachments()
  739. if err != nil {
  740. log.Error("GetUnDecompressAttachments failed:", err.Error())
  741. return
  742. }
  743. for _, attach := range attachs {
  744. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  745. if err != nil {
  746. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  747. } else {
  748. attach.DecompressState = models.DecompressStateIng
  749. err = models.UpdateAttachment(attach)
  750. if err != nil {
  751. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  752. }
  753. }
  754. }
  755. return
  756. }
  757. func QueryAllPublicDataset(ctx *context.Context) {
  758. attachs, err := models.GetAllPublicAttachments()
  759. if err != nil {
  760. ctx.JSON(200, map[string]string{
  761. "result_code": "-1",
  762. "error_msg": err.Error(),
  763. "data": "",
  764. })
  765. return
  766. }
  767. queryDatasets(ctx, attachs)
  768. }
  769. func QueryPrivateDataset(ctx *context.Context) {
  770. username := ctx.Params(":username")
  771. attachs, err := models.GetPrivateAttachments(username)
  772. if err != nil {
  773. ctx.JSON(200, map[string]string{
  774. "result_code": "-1",
  775. "error_msg": err.Error(),
  776. "data": "",
  777. })
  778. return
  779. }
  780. for _, attach := range attachs {
  781. attach.Name = username
  782. }
  783. queryDatasets(ctx, attachs)
  784. }
  785. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  786. var datasets []CloudBrainDataset
  787. if len(attachs) == 0 {
  788. log.Info("dataset is null")
  789. ctx.JSON(200, map[string]string{
  790. "result_code": "0",
  791. "error_msg": "",
  792. "data": "",
  793. })
  794. return
  795. }
  796. for _, attch := range attachs {
  797. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  798. if err != nil || !has {
  799. continue
  800. }
  801. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  802. attch.Attachment.Name,
  803. setting.Attachment.Minio.RealPath +
  804. setting.Attachment.Minio.Bucket + "/" +
  805. setting.Attachment.Minio.BasePath +
  806. models.AttachmentRelativePath(attch.UUID) +
  807. attch.UUID,
  808. attch.Name,
  809. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  810. }
  811. data, err := json.Marshal(datasets)
  812. if err != nil {
  813. log.Error("json.Marshal failed:", err.Error())
  814. ctx.JSON(200, map[string]string{
  815. "result_code": "-1",
  816. "error_msg": err.Error(),
  817. "data": "",
  818. })
  819. return
  820. }
  821. ctx.JSON(200, map[string]string{
  822. "result_code": "0",
  823. "error_msg": "",
  824. "data": string(data),
  825. })
  826. return
  827. }
  828. func checkTypeCloudBrain(typeCloudBrain int) error {
  829. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  830. log.Error("type error:", typeCloudBrain)
  831. return errors.New("type error")
  832. }
  833. return nil
  834. }