You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 26 kB

4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/models"
  16. "code.gitea.io/gitea/modules/context"
  17. "code.gitea.io/gitea/modules/labelmsg"
  18. "code.gitea.io/gitea/modules/log"
  19. "code.gitea.io/gitea/modules/minio_ext"
  20. "code.gitea.io/gitea/modules/setting"
  21. "code.gitea.io/gitea/modules/storage"
  22. "code.gitea.io/gitea/modules/upload"
  23. "code.gitea.io/gitea/modules/worker"
  24. gouuid "github.com/satori/go.uuid"
  25. )
  26. const (
  27. //result of decompress
  28. DecompressSuccess = "0"
  29. DecompressFailed = "1"
  30. )
  31. type CloudBrainDataset struct {
  32. UUID string `json:"id"`
  33. Name string `json:"name"`
  34. Path string `json:"place"`
  35. UserName string `json:"provider"`
  36. CreateTime string `json:"created_at"`
  37. }
  38. type UploadForm struct {
  39. UploadID string `form:"uploadId"`
  40. UuID string `form:"uuid"`
  41. PartSize int64 `form:"size"`
  42. Offset int64 `form:"offset"`
  43. PartNumber int `form:"chunkNumber"`
  44. PartFile multipart.File `form:"file"`
  45. }
  46. func RenderAttachmentSettings(ctx *context.Context) {
  47. renderAttachmentSettings(ctx)
  48. }
  49. func renderAttachmentSettings(ctx *context.Context) {
  50. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  51. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  52. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  53. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  54. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  55. }
  56. // UploadAttachment response for uploading issue's attachment
  57. func UploadAttachment(ctx *context.Context) {
  58. if !setting.Attachment.Enabled {
  59. ctx.Error(404, "attachment is not enabled")
  60. return
  61. }
  62. file, header, err := ctx.Req.FormFile("file")
  63. if err != nil {
  64. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  65. return
  66. }
  67. defer file.Close()
  68. buf := make([]byte, 1024)
  69. n, _ := file.Read(buf)
  70. if n > 0 {
  71. buf = buf[:n]
  72. }
  73. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  74. if err != nil {
  75. ctx.Error(400, err.Error())
  76. return
  77. }
  78. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  79. attach, err := models.NewAttachment(&models.Attachment{
  80. IsPrivate: true,
  81. UploaderID: ctx.User.ID,
  82. Name: header.Filename,
  83. DatasetID: datasetID,
  84. }, buf, file)
  85. if err != nil {
  86. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  87. return
  88. }
  89. log.Trace("New attachment uploaded: %s", attach.UUID)
  90. ctx.JSON(200, map[string]string{
  91. "uuid": attach.UUID,
  92. })
  93. }
  94. func UpdatePublicAttachment(ctx *context.Context) {
  95. file := ctx.Query("file")
  96. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  97. attach, err := models.GetAttachmentByUUID(file)
  98. if err != nil {
  99. ctx.Error(404, err.Error())
  100. return
  101. }
  102. attach.IsPrivate = isPrivate
  103. models.UpdateAttachment(attach)
  104. }
  105. // DeleteAttachment response for deleting issue's attachment
  106. func DeleteAttachment(ctx *context.Context) {
  107. file := ctx.Query("file")
  108. attach, err := models.GetAttachmentByUUID(file)
  109. if err != nil {
  110. ctx.Error(400, err.Error())
  111. return
  112. }
  113. //issue 214: mod del-dataset permission
  114. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  115. ctx.Error(403)
  116. return
  117. }
  118. err = models.DeleteAttachment(attach, true)
  119. if err != nil {
  120. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  121. return
  122. }
  123. attachjson, _ := json.Marshal(attach)
  124. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  125. DeleteAllUnzipFile(attach, "")
  126. _, err = models.DeleteFileChunkById(attach.UUID)
  127. if err != nil {
  128. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  129. return
  130. }
  131. ctx.JSON(200, map[string]string{
  132. "uuid": attach.UUID,
  133. })
  134. }
  135. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  136. dataset, err := models.GetDatasetByID(attach.DatasetID)
  137. if err != nil {
  138. log.Info("query dataset error")
  139. } else {
  140. repo, err := models.GetRepositoryByID(dataset.RepoID)
  141. if err != nil {
  142. log.Info("query repo error.")
  143. } else {
  144. repo.GetOwner()
  145. if ctx.User != nil {
  146. if repo.Owner.IsOrganization() {
  147. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  148. log.Info("org user may visit the attach.")
  149. return true
  150. }
  151. }
  152. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  153. if isCollaborator {
  154. log.Info("Collaborator user may visit the attach.")
  155. return true
  156. }
  157. }
  158. }
  159. }
  160. return false
  161. }
  162. // GetAttachment serve attachements
  163. func GetAttachment(ctx *context.Context) {
  164. typeCloudBrain := ctx.QueryInt("type")
  165. err := checkTypeCloudBrain(typeCloudBrain)
  166. if err != nil {
  167. ctx.ServerError("checkTypeCloudBrain failed", err)
  168. return
  169. }
  170. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  171. if err != nil {
  172. if models.IsErrAttachmentNotExist(err) {
  173. ctx.Error(404)
  174. } else {
  175. ctx.ServerError("GetAttachmentByUUID", err)
  176. }
  177. return
  178. }
  179. repository, unitType, err := attach.LinkedRepository()
  180. if err != nil {
  181. ctx.ServerError("LinkedRepository", err)
  182. return
  183. }
  184. dataSet, err := attach.LinkedDataSet()
  185. if err != nil {
  186. ctx.ServerError("LinkedDataSet", err)
  187. return
  188. }
  189. if repository == nil && dataSet != nil {
  190. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  191. unitType = models.UnitTypeDatasets
  192. }
  193. if repository == nil { //If not linked
  194. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  195. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  196. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  197. ctx.Error(http.StatusNotFound)
  198. return
  199. }
  200. } else { //If we have the repository we check access
  201. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  202. if errPermission != nil {
  203. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  204. return
  205. }
  206. if !perm.CanRead(unitType) {
  207. ctx.Error(http.StatusNotFound)
  208. return
  209. }
  210. }
  211. if dataSet != nil {
  212. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  213. if err != nil {
  214. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  215. return
  216. }
  217. if !isPermit {
  218. ctx.Error(http.StatusNotFound)
  219. return
  220. }
  221. }
  222. //If we have matched and access to release or issue
  223. if setting.Attachment.StoreType == storage.MinioStorageType {
  224. url := ""
  225. if typeCloudBrain == models.TypeCloudBrainOne {
  226. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  227. if err != nil {
  228. ctx.ServerError("PresignedGetURL", err)
  229. return
  230. }
  231. } else {
  232. if setting.PROXYURL != "" {
  233. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  234. log.Info("return url=" + url)
  235. } else {
  236. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  237. if err != nil {
  238. ctx.ServerError("ObsGetPreSignedUrl", err)
  239. return
  240. }
  241. }
  242. }
  243. if err = increaseDownloadCount(attach, dataSet); err != nil {
  244. ctx.ServerError("Update", err)
  245. return
  246. }
  247. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  248. } else {
  249. fr, err := storage.Attachments.Open(attach.RelativePath())
  250. if err != nil {
  251. ctx.ServerError("Open", err)
  252. return
  253. }
  254. defer fr.Close()
  255. log.Info("go here to download.")
  256. if err = increaseDownloadCount(attach, dataSet); err != nil {
  257. ctx.ServerError("Update", err)
  258. return
  259. }
  260. if err = ServeData(ctx, attach.Name, fr); err != nil {
  261. ctx.ServerError("ServeData", err)
  262. return
  263. }
  264. }
  265. }
  266. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  267. if err := attach.IncreaseDownloadCount(); err != nil {
  268. return err
  269. }
  270. if dataSet != nil {
  271. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  272. return err
  273. }
  274. }
  275. return nil
  276. }
  277. // Get a presigned url for put object
  278. func GetPresignedPutObjectURL(ctx *context.Context) {
  279. if !setting.Attachment.Enabled {
  280. ctx.Error(404, "attachment is not enabled")
  281. return
  282. }
  283. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  284. if err != nil {
  285. ctx.Error(400, err.Error())
  286. return
  287. }
  288. if setting.Attachment.StoreType == storage.MinioStorageType {
  289. uuid := gouuid.NewV4().String()
  290. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  291. if err != nil {
  292. ctx.ServerError("PresignedPutURL", err)
  293. return
  294. }
  295. ctx.JSON(200, map[string]string{
  296. "uuid": uuid,
  297. "url": url,
  298. })
  299. } else {
  300. ctx.Error(404, "storage type is not enabled")
  301. return
  302. }
  303. }
  304. // AddAttachment response for add attachment record
  305. func AddAttachment(ctx *context.Context) {
  306. typeCloudBrain := ctx.QueryInt("type")
  307. err := checkTypeCloudBrain(typeCloudBrain)
  308. if err != nil {
  309. ctx.ServerError("checkTypeCloudBrain failed", err)
  310. return
  311. }
  312. uuid := ctx.Query("uuid")
  313. has := false
  314. if typeCloudBrain == models.TypeCloudBrainOne {
  315. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  316. if err != nil {
  317. ctx.ServerError("HasObject", err)
  318. return
  319. }
  320. } else {
  321. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid)
  322. if err != nil {
  323. ctx.ServerError("ObsHasObject", err)
  324. return
  325. }
  326. }
  327. if !has {
  328. ctx.Error(404, "attachment has not been uploaded")
  329. return
  330. }
  331. attachment, err := models.InsertAttachment(&models.Attachment{
  332. UUID: uuid,
  333. UploaderID: ctx.User.ID,
  334. IsPrivate: true,
  335. Name: ctx.Query("file_name"),
  336. Size: ctx.QueryInt64("size"),
  337. DatasetID: ctx.QueryInt64("dataset_id"),
  338. Type: typeCloudBrain,
  339. })
  340. if err != nil {
  341. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  342. return
  343. }
  344. if attachment.DatasetID != 0 {
  345. if isCanDecompress(attachment.Name) {
  346. if typeCloudBrain == models.TypeCloudBrainOne {
  347. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  348. if err != nil {
  349. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  350. } else {
  351. attachment.DecompressState = models.DecompressStateIng
  352. err = models.UpdateAttachment(attachment)
  353. if err != nil {
  354. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  355. }
  356. }
  357. }
  358. //todo:decompress type_two
  359. }
  360. }
  361. ctx.JSON(200, map[string]string{
  362. "result_code": "0",
  363. })
  364. }
  365. func isCanDecompress(name string) bool {
  366. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  367. return true
  368. }
  369. return false
  370. }
  371. func UpdateAttachmentDecompressState(ctx *context.Context) {
  372. uuid := ctx.Query("uuid")
  373. result := ctx.Query("result")
  374. attach, err := models.GetAttachmentByUUID(uuid)
  375. if err != nil {
  376. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  377. return
  378. }
  379. if result == DecompressSuccess {
  380. attach.DecompressState = models.DecompressStateDone
  381. } else if result == DecompressFailed {
  382. attach.DecompressState = models.DecompressStateFailed
  383. } else {
  384. log.Error("result is error:", result)
  385. return
  386. }
  387. err = models.UpdateAttachment(attach)
  388. if err != nil {
  389. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  390. return
  391. }
  392. log.Info("start to send msg to labelsystem ")
  393. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  394. var labelMap map[string]string
  395. labelMap = make(map[string]string)
  396. labelMap["UUID"] = uuid
  397. labelMap["Type"] = fmt.Sprint(attach.Type)
  398. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  399. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  400. labelMap["AttachName"] = attach.Name
  401. attachjson, _ := json.Marshal(labelMap)
  402. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  403. log.Info("end to send msg to labelsystem ")
  404. ctx.JSON(200, map[string]string{
  405. "result_code": "0",
  406. })
  407. }
  408. func GetSuccessChunks(ctx *context.Context) {
  409. fileMD5 := ctx.Query("md5")
  410. typeCloudBrain := ctx.QueryInt("type")
  411. var chunks string
  412. err := checkTypeCloudBrain(typeCloudBrain)
  413. if err != nil {
  414. ctx.ServerError("checkTypeCloudBrain failed", err)
  415. return
  416. }
  417. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  418. if err != nil {
  419. if models.IsErrFileChunkNotExist(err) {
  420. ctx.JSON(200, map[string]string{
  421. "uuid": "",
  422. "uploaded": "0",
  423. "uploadID": "",
  424. "chunks": "",
  425. })
  426. } else {
  427. ctx.ServerError("GetFileChunkByMD5", err)
  428. }
  429. return
  430. }
  431. isExist := false
  432. if typeCloudBrain == models.TypeCloudBrainOne {
  433. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  434. if err != nil {
  435. ctx.ServerError("HasObject failed", err)
  436. return
  437. }
  438. } else {
  439. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileChunk.UUID)
  440. if err != nil {
  441. ctx.ServerError("ObsHasObject failed", err)
  442. return
  443. }
  444. }
  445. if isExist {
  446. if fileChunk.IsUploaded == models.FileNotUploaded {
  447. log.Info("the file has been uploaded but not recorded")
  448. fileChunk.IsUploaded = models.FileUploaded
  449. if err = models.UpdateFileChunk(fileChunk); err != nil {
  450. log.Error("UpdateFileChunk failed:", err.Error())
  451. }
  452. }
  453. } else {
  454. if fileChunk.IsUploaded == models.FileUploaded {
  455. log.Info("the file has been recorded but not uploaded")
  456. fileChunk.IsUploaded = models.FileNotUploaded
  457. if err = models.UpdateFileChunk(fileChunk); err != nil {
  458. log.Error("UpdateFileChunk failed:", err.Error())
  459. }
  460. }
  461. if typeCloudBrain == models.TypeCloudBrainOne {
  462. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  463. if err != nil {
  464. log.Error("GetPartInfos failed:%v", err.Error())
  465. }
  466. } else {
  467. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
  468. if err != nil {
  469. log.Error("GetObsPartInfos failed:%v", err.Error())
  470. }
  471. }
  472. if err != nil {
  473. models.DeleteFileChunk(fileChunk)
  474. ctx.JSON(200, map[string]string{
  475. "uuid": "",
  476. "uploaded": "0",
  477. "uploadID": "",
  478. "chunks": "",
  479. })
  480. return
  481. }
  482. }
  483. var attachID int64
  484. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  485. if err != nil {
  486. if models.IsErrAttachmentNotExist(err) {
  487. attachID = 0
  488. } else {
  489. ctx.ServerError("GetAttachmentByUUID", err)
  490. return
  491. }
  492. } else {
  493. attachID = attach.ID
  494. }
  495. if attach == nil {
  496. ctx.JSON(200, map[string]string{
  497. "uuid": fileChunk.UUID,
  498. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  499. "uploadID": fileChunk.UploadID,
  500. "chunks": string(chunks),
  501. "attachID": "0",
  502. "datasetID": "0",
  503. "fileName": "",
  504. "datasetName": "",
  505. })
  506. return
  507. }
  508. dataset, err := models.GetDatasetByID(attach.DatasetID)
  509. if err != nil {
  510. ctx.ServerError("GetDatasetByID", err)
  511. return
  512. }
  513. ctx.JSON(200, map[string]string{
  514. "uuid": fileChunk.UUID,
  515. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  516. "uploadID": fileChunk.UploadID,
  517. "chunks": string(chunks),
  518. "attachID": strconv.Itoa(int(attachID)),
  519. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  520. "fileName": attach.Name,
  521. "datasetName": dataset.Title,
  522. })
  523. }
  524. func NewMultipart(ctx *context.Context) {
  525. if !setting.Attachment.Enabled {
  526. ctx.Error(404, "attachment is not enabled")
  527. return
  528. }
  529. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  530. if err != nil {
  531. ctx.Error(400, err.Error())
  532. return
  533. }
  534. typeCloudBrain := ctx.QueryInt("type")
  535. err = checkTypeCloudBrain(typeCloudBrain)
  536. if err != nil {
  537. ctx.ServerError("checkTypeCloudBrain failed", err)
  538. return
  539. }
  540. fileName := ctx.Query("file_name")
  541. if setting.Attachment.StoreType == storage.MinioStorageType {
  542. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  543. if totalChunkCounts > minio_ext.MaxPartsCount {
  544. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  545. return
  546. }
  547. fileSize := ctx.QueryInt64("size")
  548. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  549. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  550. return
  551. }
  552. uuid := gouuid.NewV4().String()
  553. var uploadID string
  554. if typeCloudBrain == models.TypeCloudBrainOne {
  555. uploadID, err = storage.NewMultiPartUpload(uuid)
  556. if err != nil {
  557. ctx.ServerError("NewMultipart", err)
  558. return
  559. }
  560. } else {
  561. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  562. if err != nil {
  563. ctx.ServerError("NewObsMultiPartUpload", err)
  564. return
  565. }
  566. }
  567. _, err = models.InsertFileChunk(&models.FileChunk{
  568. UUID: uuid,
  569. UserID: ctx.User.ID,
  570. UploadID: uploadID,
  571. Md5: ctx.Query("md5"),
  572. Size: fileSize,
  573. TotalChunks: totalChunkCounts,
  574. Type: typeCloudBrain,
  575. })
  576. if err != nil {
  577. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  578. return
  579. }
  580. ctx.JSON(200, map[string]string{
  581. "uuid": uuid,
  582. "uploadID": uploadID,
  583. })
  584. } else {
  585. ctx.Error(404, "storage type is not enabled")
  586. return
  587. }
  588. }
  589. func PutOBSProxyUpload(ctx *context.Context) {
  590. uuid := ctx.Query("uuid")
  591. uploadID := ctx.Query("uploadId")
  592. partNumber := ctx.QueryInt("partNumber")
  593. fileName := ctx.Query("file_name")
  594. RequestBody := ctx.Req.Body()
  595. if RequestBody == nil {
  596. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  597. return
  598. }
  599. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  600. if err != nil {
  601. log.Info("upload error.")
  602. }
  603. }
  604. func GetOBSProxyDownload(ctx *context.Context) {
  605. uuid := ctx.Query("uuid")
  606. fileName := ctx.Query("file_name")
  607. body, err := storage.ObsDownload(uuid, fileName)
  608. if err != nil {
  609. log.Info("upload error.")
  610. } else {
  611. defer body.Close()
  612. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  613. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  614. p := make([]byte, 1024)
  615. var readErr error
  616. var readCount int
  617. // 读取对象内容
  618. for {
  619. readCount, readErr = body.Read(p)
  620. if readCount > 0 {
  621. ctx.Resp.Write(p[:readCount])
  622. //fmt.Printf("%s", p[:readCount])
  623. }
  624. if readErr != nil {
  625. break
  626. }
  627. }
  628. }
  629. }
  630. func GetMultipartUploadUrl(ctx *context.Context) {
  631. uuid := ctx.Query("uuid")
  632. uploadID := ctx.Query("uploadID")
  633. partNumber := ctx.QueryInt("chunkNumber")
  634. size := ctx.QueryInt64("size")
  635. fileName := ctx.Query("file_name")
  636. typeCloudBrain := ctx.QueryInt("type")
  637. err := checkTypeCloudBrain(typeCloudBrain)
  638. if err != nil {
  639. ctx.ServerError("checkTypeCloudBrain failed", err)
  640. return
  641. }
  642. url := ""
  643. if typeCloudBrain == models.TypeCloudBrainOne {
  644. if size > minio_ext.MinPartSize {
  645. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  646. return
  647. }
  648. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  649. if err != nil {
  650. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  651. return
  652. }
  653. } else {
  654. if setting.PROXYURL != "" {
  655. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  656. log.Info("return url=" + url)
  657. } else {
  658. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  659. if err != nil {
  660. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  661. return
  662. }
  663. log.Info("url=" + url)
  664. }
  665. }
  666. ctx.JSON(200, map[string]string{
  667. "url": url,
  668. })
  669. }
  670. func GetObsKey(ctx *context.Context) {
  671. uuid := gouuid.NewV4().String()
  672. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  673. ctx.JSON(200, map[string]string{
  674. "uuid": uuid,
  675. "key": key,
  676. "access_key_id": setting.AccessKeyID,
  677. "secret_access_key": setting.SecretAccessKey,
  678. "server": setting.Endpoint,
  679. "bucket": setting.Bucket,
  680. })
  681. }
  682. func CompleteMultipart(ctx *context.Context) {
  683. uuid := ctx.Query("uuid")
  684. uploadID := ctx.Query("uploadID")
  685. typeCloudBrain := ctx.QueryInt("type")
  686. fileName := ctx.Query("file_name")
  687. err := checkTypeCloudBrain(typeCloudBrain)
  688. if err != nil {
  689. ctx.ServerError("checkTypeCloudBrain failed", err)
  690. return
  691. }
  692. fileChunk, err := models.GetFileChunkByUUID(uuid)
  693. if err != nil {
  694. if models.IsErrFileChunkNotExist(err) {
  695. ctx.Error(404)
  696. } else {
  697. ctx.ServerError("GetFileChunkByUUID", err)
  698. }
  699. return
  700. }
  701. if typeCloudBrain == models.TypeCloudBrainOne {
  702. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  703. if err != nil {
  704. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  705. return
  706. }
  707. } else {
  708. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  709. if err != nil {
  710. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  711. return
  712. }
  713. }
  714. fileChunk.IsUploaded = models.FileUploaded
  715. err = models.UpdateFileChunk(fileChunk)
  716. if err != nil {
  717. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  718. return
  719. }
  720. attachment, err := models.InsertAttachment(&models.Attachment{
  721. UUID: uuid,
  722. UploaderID: ctx.User.ID,
  723. IsPrivate: true,
  724. Name: fileName,
  725. Size: ctx.QueryInt64("size"),
  726. DatasetID: ctx.QueryInt64("dataset_id"),
  727. Type: typeCloudBrain,
  728. })
  729. if err != nil {
  730. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  731. return
  732. }
  733. if attachment.DatasetID != 0 {
  734. if isCanDecompress(attachment.Name) {
  735. if typeCloudBrain == models.TypeCloudBrainOne {
  736. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  737. if err != nil {
  738. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  739. } else {
  740. attachment.DecompressState = models.DecompressStateIng
  741. err = models.UpdateAttachment(attachment)
  742. if err != nil {
  743. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  744. }
  745. }
  746. }
  747. if typeCloudBrain == models.TypeCloudBrainTwo {
  748. attachjson, _ := json.Marshal(attachment)
  749. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  750. }
  751. } else {
  752. dataset, _ := models.GetDatasetByID(attachment.DatasetID)
  753. var labelMap map[string]string
  754. labelMap = make(map[string]string)
  755. labelMap["UUID"] = uuid
  756. labelMap["Type"] = fmt.Sprint(attachment.Type)
  757. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  758. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  759. labelMap["AttachName"] = attachment.Name
  760. attachjson, _ := json.Marshal(labelMap)
  761. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  762. }
  763. }
  764. ctx.JSON(200, map[string]string{
  765. "result_code": "0",
  766. })
  767. }
  768. func UpdateMultipart(ctx *context.Context) {
  769. uuid := ctx.Query("uuid")
  770. partNumber := ctx.QueryInt("chunkNumber")
  771. etag := ctx.Query("etag")
  772. fileChunk, err := models.GetFileChunkByUUID(uuid)
  773. if err != nil {
  774. if models.IsErrFileChunkNotExist(err) {
  775. ctx.Error(404)
  776. } else {
  777. ctx.ServerError("GetFileChunkByUUID", err)
  778. }
  779. return
  780. }
  781. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  782. err = models.UpdateFileChunk(fileChunk)
  783. if err != nil {
  784. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  785. return
  786. }
  787. ctx.JSON(200, map[string]string{
  788. "result_code": "0",
  789. })
  790. }
  791. func HandleUnDecompressAttachment() {
  792. attachs, err := models.GetUnDecompressAttachments()
  793. if err != nil {
  794. log.Error("GetUnDecompressAttachments failed:", err.Error())
  795. return
  796. }
  797. for _, attach := range attachs {
  798. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  799. if err != nil {
  800. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  801. } else {
  802. attach.DecompressState = models.DecompressStateIng
  803. err = models.UpdateAttachment(attach)
  804. if err != nil {
  805. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  806. }
  807. }
  808. }
  809. return
  810. }
  811. func QueryAllPublicDataset(ctx *context.Context) {
  812. attachs, err := models.GetAllPublicAttachments()
  813. if err != nil {
  814. ctx.JSON(200, map[string]string{
  815. "result_code": "-1",
  816. "error_msg": err.Error(),
  817. "data": "",
  818. })
  819. return
  820. }
  821. queryDatasets(ctx, attachs)
  822. }
  823. func QueryPrivateDataset(ctx *context.Context) {
  824. username := ctx.Params(":username")
  825. attachs, err := models.GetPrivateAttachments(username)
  826. if err != nil {
  827. ctx.JSON(200, map[string]string{
  828. "result_code": "-1",
  829. "error_msg": err.Error(),
  830. "data": "",
  831. })
  832. return
  833. }
  834. for _, attach := range attachs {
  835. attach.Name = username
  836. }
  837. queryDatasets(ctx, attachs)
  838. }
  839. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  840. var datasets []CloudBrainDataset
  841. if len(attachs) == 0 {
  842. log.Info("dataset is null")
  843. ctx.JSON(200, map[string]string{
  844. "result_code": "0",
  845. "error_msg": "",
  846. "data": "",
  847. })
  848. return
  849. }
  850. for _, attch := range attachs {
  851. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  852. if err != nil || !has {
  853. continue
  854. }
  855. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  856. attch.Attachment.Name,
  857. setting.Attachment.Minio.RealPath +
  858. setting.Attachment.Minio.Bucket + "/" +
  859. setting.Attachment.Minio.BasePath +
  860. models.AttachmentRelativePath(attch.UUID) +
  861. attch.UUID,
  862. attch.Name,
  863. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  864. }
  865. data, err := json.Marshal(datasets)
  866. if err != nil {
  867. log.Error("json.Marshal failed:", err.Error())
  868. ctx.JSON(200, map[string]string{
  869. "result_code": "-1",
  870. "error_msg": err.Error(),
  871. "data": "",
  872. })
  873. return
  874. }
  875. ctx.JSON(200, map[string]string{
  876. "result_code": "0",
  877. "error_msg": "",
  878. "data": string(data),
  879. })
  880. return
  881. }
  882. func checkTypeCloudBrain(typeCloudBrain int) error {
  883. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainNotebook {
  884. log.Error("type error:", typeCloudBrain)
  885. return errors.New("type error")
  886. }
  887. return nil
  888. }