You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 27 kB

4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
3 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/models"
  16. "code.gitea.io/gitea/modules/context"
  17. "code.gitea.io/gitea/modules/labelmsg"
  18. "code.gitea.io/gitea/modules/log"
  19. "code.gitea.io/gitea/modules/minio_ext"
  20. "code.gitea.io/gitea/modules/notification"
  21. "code.gitea.io/gitea/modules/setting"
  22. "code.gitea.io/gitea/modules/storage"
  23. "code.gitea.io/gitea/modules/upload"
  24. "code.gitea.io/gitea/modules/worker"
  25. gouuid "github.com/satori/go.uuid"
  26. )
  27. const (
  28. //result of decompress
  29. DecompressSuccess = "0"
  30. DecompressFailed = "1"
  31. )
  32. type CloudBrainDataset struct {
  33. UUID string `json:"id"`
  34. Name string `json:"name"`
  35. Path string `json:"place"`
  36. UserName string `json:"provider"`
  37. CreateTime string `json:"created_at"`
  38. }
  39. type UploadForm struct {
  40. UploadID string `form:"uploadId"`
  41. UuID string `form:"uuid"`
  42. PartSize int64 `form:"size"`
  43. Offset int64 `form:"offset"`
  44. PartNumber int `form:"chunkNumber"`
  45. PartFile multipart.File `form:"file"`
  46. }
  47. func RenderAttachmentSettings(ctx *context.Context) {
  48. renderAttachmentSettings(ctx)
  49. }
  50. func renderAttachmentSettings(ctx *context.Context) {
  51. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  52. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  53. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  54. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  55. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  56. }
  57. // UploadAttachment response for uploading issue's attachment
  58. func UploadAttachment(ctx *context.Context) {
  59. if !setting.Attachment.Enabled {
  60. ctx.Error(404, "attachment is not enabled")
  61. return
  62. }
  63. file, header, err := ctx.Req.FormFile("file")
  64. if err != nil {
  65. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  66. return
  67. }
  68. defer file.Close()
  69. buf := make([]byte, 1024)
  70. n, _ := file.Read(buf)
  71. if n > 0 {
  72. buf = buf[:n]
  73. }
  74. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  75. if err != nil {
  76. ctx.Error(400, err.Error())
  77. return
  78. }
  79. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  80. attach, err := models.NewAttachment(&models.Attachment{
  81. IsPrivate: true,
  82. UploaderID: ctx.User.ID,
  83. Name: header.Filename,
  84. DatasetID: datasetID,
  85. }, buf, file)
  86. if err != nil {
  87. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  88. return
  89. }
  90. log.Trace("New attachment uploaded: %s", attach.UUID)
  91. ctx.JSON(200, map[string]string{
  92. "uuid": attach.UUID,
  93. })
  94. }
  95. func UpdatePublicAttachment(ctx *context.Context) {
  96. file := ctx.Query("file")
  97. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  98. attach, err := models.GetAttachmentByUUID(file)
  99. if err != nil {
  100. ctx.Error(404, err.Error())
  101. return
  102. }
  103. attach.IsPrivate = isPrivate
  104. models.UpdateAttachment(attach)
  105. }
  106. // DeleteAttachment response for deleting issue's attachment
  107. func DeleteAttachment(ctx *context.Context) {
  108. file := ctx.Query("file")
  109. attach, err := models.GetAttachmentByUUID(file)
  110. if err != nil {
  111. ctx.Error(400, err.Error())
  112. return
  113. }
  114. //issue 214: mod del-dataset permission
  115. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  116. ctx.Error(403)
  117. return
  118. }
  119. err = models.DeleteAttachment(attach, true)
  120. if err != nil {
  121. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  122. return
  123. }
  124. attachjson, _ := json.Marshal(attach)
  125. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  126. DeleteAllUnzipFile(attach, "")
  127. _, err = models.DeleteFileChunkById(attach.UUID)
  128. if err != nil {
  129. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  130. return
  131. }
  132. ctx.JSON(200, map[string]string{
  133. "uuid": attach.UUID,
  134. })
  135. }
  136. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  137. dataset, err := models.GetDatasetByID(attach.DatasetID)
  138. if err != nil {
  139. log.Info("query dataset error")
  140. } else {
  141. repo, err := models.GetRepositoryByID(dataset.RepoID)
  142. if err != nil {
  143. log.Info("query repo error.")
  144. } else {
  145. repo.GetOwner()
  146. if ctx.User != nil {
  147. if repo.Owner.IsOrganization() {
  148. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  149. log.Info("org user may visit the attach.")
  150. return true
  151. }
  152. }
  153. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  154. if isCollaborator {
  155. log.Info("Collaborator user may visit the attach.")
  156. return true
  157. }
  158. }
  159. }
  160. }
  161. return false
  162. }
  163. // GetAttachment serve attachements
  164. func GetAttachment(ctx *context.Context) {
  165. typeCloudBrain := ctx.QueryInt("type")
  166. err := checkTypeCloudBrain(typeCloudBrain)
  167. if err != nil {
  168. ctx.ServerError("checkTypeCloudBrain failed", err)
  169. return
  170. }
  171. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  172. if err != nil {
  173. if models.IsErrAttachmentNotExist(err) {
  174. ctx.Error(404)
  175. } else {
  176. ctx.ServerError("GetAttachmentByUUID", err)
  177. }
  178. return
  179. }
  180. repository, unitType, err := attach.LinkedRepository()
  181. if err != nil {
  182. ctx.ServerError("LinkedRepository", err)
  183. return
  184. }
  185. dataSet, err := attach.LinkedDataSet()
  186. if err != nil {
  187. ctx.ServerError("LinkedDataSet", err)
  188. return
  189. }
  190. if repository == nil && dataSet != nil {
  191. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  192. unitType = models.UnitTypeDatasets
  193. }
  194. if repository == nil { //If not linked
  195. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  196. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  197. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  198. ctx.Error(http.StatusNotFound)
  199. return
  200. }
  201. } else { //If we have the repository we check access
  202. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  203. if errPermission != nil {
  204. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  205. return
  206. }
  207. if !perm.CanRead(unitType) {
  208. ctx.Error(http.StatusNotFound)
  209. return
  210. }
  211. }
  212. if dataSet != nil {
  213. if !ctx.IsSigned {
  214. ctx.SetCookie("redirect_to", setting.AppSubURL+ctx.Req.URL.RequestURI(), 0, setting.AppSubURL)
  215. ctx.Redirect(setting.AppSubURL + "/user/login")
  216. return
  217. } else {
  218. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  219. if err != nil {
  220. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  221. return
  222. }
  223. if !isPermit {
  224. ctx.Error(http.StatusNotFound)
  225. return
  226. }
  227. }
  228. }
  229. //If we have matched and access to release or issue
  230. if setting.Attachment.StoreType == storage.MinioStorageType {
  231. url := ""
  232. if typeCloudBrain == models.TypeCloudBrainOne {
  233. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  234. if err != nil {
  235. ctx.ServerError("PresignedGetURL", err)
  236. return
  237. }
  238. } else {
  239. if setting.PROXYURL != "" {
  240. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  241. log.Info("return url=" + url)
  242. } else {
  243. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  244. if err != nil {
  245. ctx.ServerError("ObsGetPreSignedUrl", err)
  246. return
  247. }
  248. }
  249. }
  250. if err = increaseDownloadCount(attach, dataSet); err != nil {
  251. ctx.ServerError("Update", err)
  252. return
  253. }
  254. if dataSet != nil {
  255. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  256. } else {
  257. fr, err := storage.Attachments.Open(attach.RelativePath())
  258. if err != nil {
  259. ctx.ServerError("Open", err)
  260. return
  261. }
  262. defer fr.Close()
  263. if err = ServeData(ctx, attach.Name, fr); err != nil {
  264. ctx.ServerError("ServeData", err)
  265. return
  266. }
  267. }
  268. } else {
  269. fr, err := storage.Attachments.Open(attach.RelativePath())
  270. if err != nil {
  271. ctx.ServerError("Open", err)
  272. return
  273. }
  274. defer fr.Close()
  275. if err = increaseDownloadCount(attach, dataSet); err != nil {
  276. ctx.ServerError("Update", err)
  277. return
  278. }
  279. if err = ServeData(ctx, attach.Name, fr); err != nil {
  280. ctx.ServerError("ServeData", err)
  281. return
  282. }
  283. }
  284. }
  285. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  286. if err := attach.IncreaseDownloadCount(); err != nil {
  287. return err
  288. }
  289. if dataSet != nil {
  290. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  291. return err
  292. }
  293. }
  294. return nil
  295. }
  296. // Get a presigned url for put object
  297. func GetPresignedPutObjectURL(ctx *context.Context) {
  298. if !setting.Attachment.Enabled {
  299. ctx.Error(404, "attachment is not enabled")
  300. return
  301. }
  302. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  303. if err != nil {
  304. ctx.Error(400, err.Error())
  305. return
  306. }
  307. if setting.Attachment.StoreType == storage.MinioStorageType {
  308. uuid := gouuid.NewV4().String()
  309. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  310. if err != nil {
  311. ctx.ServerError("PresignedPutURL", err)
  312. return
  313. }
  314. ctx.JSON(200, map[string]string{
  315. "uuid": uuid,
  316. "url": url,
  317. })
  318. } else {
  319. ctx.Error(404, "storage type is not enabled")
  320. return
  321. }
  322. }
  323. // AddAttachment response for add attachment record
  324. func AddAttachment(ctx *context.Context) {
  325. typeCloudBrain := ctx.QueryInt("type")
  326. fileName := ctx.Query("file_name")
  327. err := checkTypeCloudBrain(typeCloudBrain)
  328. if err != nil {
  329. ctx.ServerError("checkTypeCloudBrain failed", err)
  330. return
  331. }
  332. uuid := ctx.Query("uuid")
  333. has := false
  334. if typeCloudBrain == models.TypeCloudBrainOne {
  335. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  336. if err != nil {
  337. ctx.ServerError("HasObject", err)
  338. return
  339. }
  340. } else {
  341. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  342. if err != nil {
  343. ctx.ServerError("ObsHasObject", err)
  344. return
  345. }
  346. }
  347. if !has {
  348. ctx.Error(404, "attachment has not been uploaded")
  349. return
  350. }
  351. attachment, err := models.InsertAttachment(&models.Attachment{
  352. UUID: uuid,
  353. UploaderID: ctx.User.ID,
  354. IsPrivate: true,
  355. Name: fileName,
  356. Size: ctx.QueryInt64("size"),
  357. DatasetID: ctx.QueryInt64("dataset_id"),
  358. Type: typeCloudBrain,
  359. })
  360. if err != nil {
  361. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  362. return
  363. }
  364. if attachment.DatasetID != 0 {
  365. if isCanDecompress(attachment.Name) {
  366. if typeCloudBrain == models.TypeCloudBrainOne {
  367. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  368. if err != nil {
  369. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  370. } else {
  371. attachment.DecompressState = models.DecompressStateIng
  372. err = models.UpdateAttachment(attachment)
  373. if err != nil {
  374. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  375. }
  376. }
  377. }
  378. //todo:decompress type_two
  379. }
  380. }
  381. ctx.JSON(200, map[string]string{
  382. "result_code": "0",
  383. })
  384. }
  385. func isCanDecompress(name string) bool {
  386. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  387. return true
  388. }
  389. return false
  390. }
  391. func UpdateAttachmentDecompressState(ctx *context.Context) {
  392. uuid := ctx.Query("uuid")
  393. result := ctx.Query("result")
  394. attach, err := models.GetAttachmentByUUID(uuid)
  395. if err != nil {
  396. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  397. return
  398. }
  399. if result == DecompressSuccess {
  400. attach.DecompressState = models.DecompressStateDone
  401. } else if result == DecompressFailed {
  402. attach.DecompressState = models.DecompressStateFailed
  403. } else {
  404. log.Error("result is error:", result)
  405. return
  406. }
  407. err = models.UpdateAttachment(attach)
  408. if err != nil {
  409. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  410. return
  411. }
  412. log.Info("start to send msg to labelsystem ")
  413. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  414. var labelMap map[string]string
  415. labelMap = make(map[string]string)
  416. labelMap["UUID"] = uuid
  417. labelMap["Type"] = fmt.Sprint(attach.Type)
  418. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  419. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  420. labelMap["AttachName"] = attach.Name
  421. attachjson, _ := json.Marshal(labelMap)
  422. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  423. log.Info("end to send msg to labelsystem ")
  424. ctx.JSON(200, map[string]string{
  425. "result_code": "0",
  426. })
  427. }
  428. func GetSuccessChunks(ctx *context.Context) {
  429. fileMD5 := ctx.Query("md5")
  430. typeCloudBrain := ctx.QueryInt("type")
  431. fileName := ctx.Query("file_name")
  432. var chunks string
  433. err := checkTypeCloudBrain(typeCloudBrain)
  434. if err != nil {
  435. ctx.ServerError("checkTypeCloudBrain failed", err)
  436. return
  437. }
  438. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  439. if err != nil {
  440. if models.IsErrFileChunkNotExist(err) {
  441. ctx.JSON(200, map[string]string{
  442. "uuid": "",
  443. "uploaded": "0",
  444. "uploadID": "",
  445. "chunks": "",
  446. })
  447. } else {
  448. ctx.ServerError("GetFileChunkByMD5", err)
  449. }
  450. return
  451. }
  452. isExist := false
  453. if typeCloudBrain == models.TypeCloudBrainOne {
  454. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  455. if err != nil {
  456. ctx.ServerError("HasObject failed", err)
  457. return
  458. }
  459. } else {
  460. oldFileName := fileName
  461. oldAttachment, _ := models.GetAttachmentByUUID(fileChunk.UUID)
  462. if oldAttachment != nil {
  463. oldFileName = oldAttachment.Name
  464. }
  465. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + oldFileName)
  466. if err != nil {
  467. ctx.ServerError("ObsHasObject failed", err)
  468. return
  469. }
  470. }
  471. if isExist {
  472. if fileChunk.IsUploaded == models.FileNotUploaded {
  473. log.Info("the file has been uploaded but not recorded")
  474. fileChunk.IsUploaded = models.FileUploaded
  475. if err = models.UpdateFileChunk(fileChunk); err != nil {
  476. log.Error("UpdateFileChunk failed:", err.Error())
  477. }
  478. }
  479. } else {
  480. if fileChunk.IsUploaded == models.FileUploaded {
  481. log.Info("the file has been recorded but not uploaded")
  482. fileChunk.IsUploaded = models.FileNotUploaded
  483. if err = models.UpdateFileChunk(fileChunk); err != nil {
  484. log.Error("UpdateFileChunk failed:", err.Error())
  485. }
  486. }
  487. if typeCloudBrain == models.TypeCloudBrainOne {
  488. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  489. if err != nil {
  490. log.Error("GetPartInfos failed:%v", err.Error())
  491. }
  492. } else {
  493. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID, fileName)
  494. if err != nil {
  495. log.Error("GetObsPartInfos failed:%v", err.Error())
  496. }
  497. }
  498. if err != nil {
  499. models.DeleteFileChunk(fileChunk)
  500. ctx.JSON(200, map[string]string{
  501. "uuid": "",
  502. "uploaded": "0",
  503. "uploadID": "",
  504. "chunks": "",
  505. })
  506. return
  507. }
  508. }
  509. var attachID int64
  510. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  511. if err != nil {
  512. if models.IsErrAttachmentNotExist(err) {
  513. attachID = 0
  514. } else {
  515. ctx.ServerError("GetAttachmentByUUID", err)
  516. return
  517. }
  518. } else {
  519. attachID = attach.ID
  520. }
  521. if attach == nil {
  522. ctx.JSON(200, map[string]string{
  523. "uuid": fileChunk.UUID,
  524. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  525. "uploadID": fileChunk.UploadID,
  526. "chunks": string(chunks),
  527. "attachID": "0",
  528. "datasetID": "0",
  529. "fileName": "",
  530. "datasetName": "",
  531. })
  532. return
  533. }
  534. dataset, err := models.GetDatasetByID(attach.DatasetID)
  535. if err != nil {
  536. ctx.ServerError("GetDatasetByID", err)
  537. return
  538. }
  539. ctx.JSON(200, map[string]string{
  540. "uuid": fileChunk.UUID,
  541. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  542. "uploadID": fileChunk.UploadID,
  543. "chunks": string(chunks),
  544. "attachID": strconv.Itoa(int(attachID)),
  545. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  546. "fileName": attach.Name,
  547. "datasetName": dataset.Title,
  548. })
  549. }
  550. func NewMultipart(ctx *context.Context) {
  551. if !setting.Attachment.Enabled {
  552. ctx.Error(404, "attachment is not enabled")
  553. return
  554. }
  555. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  556. if err != nil {
  557. ctx.Error(400, err.Error())
  558. return
  559. }
  560. typeCloudBrain := ctx.QueryInt("type")
  561. err = checkTypeCloudBrain(typeCloudBrain)
  562. if err != nil {
  563. ctx.ServerError("checkTypeCloudBrain failed", err)
  564. return
  565. }
  566. fileName := ctx.Query("file_name")
  567. if setting.Attachment.StoreType == storage.MinioStorageType {
  568. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  569. if totalChunkCounts > minio_ext.MaxPartsCount {
  570. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  571. return
  572. }
  573. fileSize := ctx.QueryInt64("size")
  574. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  575. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  576. return
  577. }
  578. uuid := gouuid.NewV4().String()
  579. var uploadID string
  580. if typeCloudBrain == models.TypeCloudBrainOne {
  581. uploadID, err = storage.NewMultiPartUpload(uuid)
  582. if err != nil {
  583. ctx.ServerError("NewMultipart", err)
  584. return
  585. }
  586. } else {
  587. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  588. if err != nil {
  589. ctx.ServerError("NewObsMultiPartUpload", err)
  590. return
  591. }
  592. }
  593. _, err = models.InsertFileChunk(&models.FileChunk{
  594. UUID: uuid,
  595. UserID: ctx.User.ID,
  596. UploadID: uploadID,
  597. Md5: ctx.Query("md5"),
  598. Size: fileSize,
  599. TotalChunks: totalChunkCounts,
  600. Type: typeCloudBrain,
  601. })
  602. if err != nil {
  603. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  604. return
  605. }
  606. ctx.JSON(200, map[string]string{
  607. "uuid": uuid,
  608. "uploadID": uploadID,
  609. })
  610. } else {
  611. ctx.Error(404, "storage type is not enabled")
  612. return
  613. }
  614. }
  615. func PutOBSProxyUpload(ctx *context.Context) {
  616. uuid := ctx.Query("uuid")
  617. uploadID := ctx.Query("uploadId")
  618. partNumber := ctx.QueryInt("partNumber")
  619. fileName := ctx.Query("file_name")
  620. RequestBody := ctx.Req.Body()
  621. if RequestBody == nil {
  622. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  623. return
  624. }
  625. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  626. if err != nil {
  627. log.Info("upload error.")
  628. }
  629. }
  630. func GetOBSProxyDownload(ctx *context.Context) {
  631. uuid := ctx.Query("uuid")
  632. fileName := ctx.Query("file_name")
  633. body, err := storage.ObsDownload(uuid, fileName)
  634. if err != nil {
  635. log.Info("upload error.")
  636. } else {
  637. defer body.Close()
  638. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  639. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  640. p := make([]byte, 1024)
  641. var readErr error
  642. var readCount int
  643. // 读取对象内容
  644. for {
  645. readCount, readErr = body.Read(p)
  646. if readCount > 0 {
  647. ctx.Resp.Write(p[:readCount])
  648. //fmt.Printf("%s", p[:readCount])
  649. }
  650. if readErr != nil {
  651. break
  652. }
  653. }
  654. }
  655. }
  656. func GetMultipartUploadUrl(ctx *context.Context) {
  657. uuid := ctx.Query("uuid")
  658. uploadID := ctx.Query("uploadID")
  659. partNumber := ctx.QueryInt("chunkNumber")
  660. size := ctx.QueryInt64("size")
  661. fileName := ctx.Query("file_name")
  662. typeCloudBrain := ctx.QueryInt("type")
  663. err := checkTypeCloudBrain(typeCloudBrain)
  664. if err != nil {
  665. ctx.ServerError("checkTypeCloudBrain failed", err)
  666. return
  667. }
  668. url := ""
  669. if typeCloudBrain == models.TypeCloudBrainOne {
  670. if size > minio_ext.MinPartSize {
  671. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  672. return
  673. }
  674. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  675. if err != nil {
  676. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  677. return
  678. }
  679. } else {
  680. if setting.PROXYURL != "" {
  681. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  682. log.Info("return url=" + url)
  683. } else {
  684. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  685. if err != nil {
  686. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  687. return
  688. }
  689. log.Info("url=" + url)
  690. }
  691. }
  692. ctx.JSON(200, map[string]string{
  693. "url": url,
  694. })
  695. }
  696. func GetObsKey(ctx *context.Context) {
  697. uuid := gouuid.NewV4().String()
  698. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  699. ctx.JSON(200, map[string]string{
  700. "uuid": uuid,
  701. "key": key,
  702. "access_key_id": setting.AccessKeyID,
  703. "secret_access_key": setting.SecretAccessKey,
  704. "server": setting.Endpoint,
  705. "bucket": setting.Bucket,
  706. })
  707. }
  708. func CompleteMultipart(ctx *context.Context) {
  709. uuid := ctx.Query("uuid")
  710. uploadID := ctx.Query("uploadID")
  711. typeCloudBrain := ctx.QueryInt("type")
  712. fileName := ctx.Query("file_name")
  713. err := checkTypeCloudBrain(typeCloudBrain)
  714. if err != nil {
  715. ctx.ServerError("checkTypeCloudBrain failed", err)
  716. return
  717. }
  718. fileChunk, err := models.GetFileChunkByUUID(uuid)
  719. if err != nil {
  720. if models.IsErrFileChunkNotExist(err) {
  721. ctx.Error(404)
  722. } else {
  723. ctx.ServerError("GetFileChunkByUUID", err)
  724. }
  725. return
  726. }
  727. if typeCloudBrain == models.TypeCloudBrainOne {
  728. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  729. if err != nil {
  730. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  731. return
  732. }
  733. } else {
  734. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  735. if err != nil {
  736. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  737. return
  738. }
  739. }
  740. fileChunk.IsUploaded = models.FileUploaded
  741. err = models.UpdateFileChunk(fileChunk)
  742. if err != nil {
  743. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  744. return
  745. }
  746. attachment, err := models.InsertAttachment(&models.Attachment{
  747. UUID: uuid,
  748. UploaderID: ctx.User.ID,
  749. IsPrivate: true,
  750. Name: fileName,
  751. Size: ctx.QueryInt64("size"),
  752. DatasetID: ctx.QueryInt64("dataset_id"),
  753. Type: typeCloudBrain,
  754. })
  755. if err != nil {
  756. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  757. return
  758. }
  759. dataset, _ := models.GetDatasetByID(attachment.DatasetID)
  760. repository, _ := models.GetRepositoryByID(dataset.RepoID)
  761. notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(attachment.Type), attachment.Name, models.ActionUploadAttachment)
  762. if attachment.DatasetID != 0 {
  763. if isCanDecompress(attachment.Name) {
  764. if typeCloudBrain == models.TypeCloudBrainOne {
  765. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  766. if err != nil {
  767. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  768. } else {
  769. attachment.DecompressState = models.DecompressStateIng
  770. err = models.UpdateAttachment(attachment)
  771. if err != nil {
  772. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  773. }
  774. }
  775. }
  776. if typeCloudBrain == models.TypeCloudBrainTwo {
  777. attachjson, _ := json.Marshal(attachment)
  778. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  779. }
  780. } else {
  781. var labelMap map[string]string
  782. labelMap = make(map[string]string)
  783. labelMap["UUID"] = uuid
  784. labelMap["Type"] = fmt.Sprint(attachment.Type)
  785. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  786. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  787. labelMap["AttachName"] = attachment.Name
  788. attachjson, _ := json.Marshal(labelMap)
  789. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  790. }
  791. }
  792. ctx.JSON(200, map[string]string{
  793. "result_code": "0",
  794. })
  795. }
  796. func UpdateMultipart(ctx *context.Context) {
  797. uuid := ctx.Query("uuid")
  798. partNumber := ctx.QueryInt("chunkNumber")
  799. etag := ctx.Query("etag")
  800. fileChunk, err := models.GetFileChunkByUUID(uuid)
  801. if err != nil {
  802. if models.IsErrFileChunkNotExist(err) {
  803. ctx.Error(404)
  804. } else {
  805. ctx.ServerError("GetFileChunkByUUID", err)
  806. }
  807. return
  808. }
  809. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  810. err = models.UpdateFileChunk(fileChunk)
  811. if err != nil {
  812. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  813. return
  814. }
  815. ctx.JSON(200, map[string]string{
  816. "result_code": "0",
  817. })
  818. }
  819. func HandleUnDecompressAttachment() {
  820. attachs, err := models.GetUnDecompressAttachments()
  821. if err != nil {
  822. log.Error("GetUnDecompressAttachments failed:", err.Error())
  823. return
  824. }
  825. for _, attach := range attachs {
  826. if attach.Type == models.TypeCloudBrainOne {
  827. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  828. if err != nil {
  829. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  830. } else {
  831. attach.DecompressState = models.DecompressStateIng
  832. err = models.UpdateAttachment(attach)
  833. if err != nil {
  834. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  835. }
  836. }
  837. } else if attach.Type == models.TypeCloudBrainTwo {
  838. attachjson, _ := json.Marshal(attach)
  839. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  840. }
  841. }
  842. return
  843. }
  844. func QueryAllPublicDataset(ctx *context.Context) {
  845. attachs, err := models.GetAllPublicAttachments()
  846. if err != nil {
  847. ctx.JSON(200, map[string]string{
  848. "result_code": "-1",
  849. "error_msg": err.Error(),
  850. "data": "",
  851. })
  852. return
  853. }
  854. queryDatasets(ctx, attachs)
  855. }
  856. func QueryPrivateDataset(ctx *context.Context) {
  857. username := ctx.Params(":username")
  858. attachs, err := models.GetPrivateAttachments(username)
  859. if err != nil {
  860. ctx.JSON(200, map[string]string{
  861. "result_code": "-1",
  862. "error_msg": err.Error(),
  863. "data": "",
  864. })
  865. return
  866. }
  867. for _, attach := range attachs {
  868. attach.Name = username
  869. }
  870. queryDatasets(ctx, attachs)
  871. }
  872. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  873. var datasets []CloudBrainDataset
  874. if len(attachs) == 0 {
  875. log.Info("dataset is null")
  876. ctx.JSON(200, map[string]string{
  877. "result_code": "0",
  878. "error_msg": "",
  879. "data": "",
  880. })
  881. return
  882. }
  883. for _, attch := range attachs {
  884. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  885. if err != nil || !has {
  886. continue
  887. }
  888. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  889. attch.Attachment.Name,
  890. setting.Attachment.Minio.RealPath +
  891. setting.Attachment.Minio.Bucket + "/" +
  892. setting.Attachment.Minio.BasePath +
  893. models.AttachmentRelativePath(attch.UUID) +
  894. attch.UUID,
  895. attch.Name,
  896. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  897. }
  898. data, err := json.Marshal(datasets)
  899. if err != nil {
  900. log.Error("json.Marshal failed:", err.Error())
  901. ctx.JSON(200, map[string]string{
  902. "result_code": "-1",
  903. "error_msg": err.Error(),
  904. "data": "",
  905. })
  906. return
  907. }
  908. ctx.JSON(200, map[string]string{
  909. "result_code": "0",
  910. "error_msg": "",
  911. "data": string(data),
  912. })
  913. return
  914. }
  915. func checkTypeCloudBrain(typeCloudBrain int) error {
  916. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  917. log.Error("type error:", typeCloudBrain)
  918. return errors.New("type error")
  919. }
  920. return nil
  921. }