You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 29 kB

4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/modules/auth"
  16. "code.gitea.io/gitea/modules/base"
  17. "code.gitea.io/gitea/models"
  18. "code.gitea.io/gitea/modules/context"
  19. "code.gitea.io/gitea/modules/labelmsg"
  20. "code.gitea.io/gitea/modules/log"
  21. "code.gitea.io/gitea/modules/minio_ext"
  22. "code.gitea.io/gitea/modules/notification"
  23. "code.gitea.io/gitea/modules/setting"
  24. "code.gitea.io/gitea/modules/storage"
  25. "code.gitea.io/gitea/modules/upload"
  26. "code.gitea.io/gitea/modules/worker"
  27. gouuid "github.com/satori/go.uuid"
  28. )
  29. const (
  30. //result of decompress
  31. DecompressSuccess = "0"
  32. DecompressFailed = "1"
  33. tplAttachmentUpload base.TplName = "repo/attachment/upload"
  34. tplAttachmentEdit base.TplName = "repo/attachment/edit"
  35. )
  36. type CloudBrainDataset struct {
  37. UUID string `json:"id"`
  38. Name string `json:"name"`
  39. Path string `json:"place"`
  40. UserName string `json:"provider"`
  41. CreateTime string `json:"created_at"`
  42. }
  43. type UploadForm struct {
  44. UploadID string `form:"uploadId"`
  45. UuID string `form:"uuid"`
  46. PartSize int64 `form:"size"`
  47. Offset int64 `form:"offset"`
  48. PartNumber int `form:"chunkNumber"`
  49. PartFile multipart.File `form:"file"`
  50. }
  51. func RenderAttachmentSettings(ctx *context.Context) {
  52. renderAttachmentSettings(ctx)
  53. }
  54. func renderAttachmentSettings(ctx *context.Context) {
  55. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  56. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  57. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  58. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  59. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  60. }
  61. func UploadAttachmentUI(ctx *context.Context) {
  62. ctx.Data["datasetId"] = ctx.Query("datasetId")
  63. dataset, _ := models.GetDatasetByID(ctx.QueryInt64("datasetId"))
  64. if dataset == nil {
  65. ctx.Error(404, "The dataset does not exits.")
  66. }
  67. r, _ := models.GetRepositoryByID(dataset.RepoID)
  68. ctx.Data["Repo"] = r
  69. ctx.HTML(200, tplAttachmentUpload)
  70. }
  71. func EditAttachmentUI(ctx *context.Context) {
  72. id, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64)
  73. attachment, _ := models.GetAttachmentByID(id)
  74. if attachment == nil {
  75. ctx.Error(404, "The attachment does not exits.")
  76. }
  77. dataset, _ := models.GetDatasetByID(attachment.DatasetID)
  78. r, _ := models.GetRepositoryByID(dataset.RepoID)
  79. ctx.Data["Repo"] = r
  80. ctx.Data["Attachment"] = attachment
  81. ctx.HTML(200, tplAttachmentEdit)
  82. }
  83. func EditAttachment(ctx *context.Context, form auth.EditAttachmentForm) {
  84. err := models.UpdateAttachmentDescription(&models.Attachment{
  85. ID: form.ID,
  86. Description: form.Description,
  87. })
  88. if err != nil {
  89. ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.edit_attachment_fail")))
  90. }
  91. ctx.JSON(http.StatusOK, models.BaseOKMessage)
  92. }
  93. // UploadAttachment response for uploading issue's attachment
  94. func UploadAttachment(ctx *context.Context) {
  95. if !setting.Attachment.Enabled {
  96. ctx.Error(404, "attachment is not enabled")
  97. return
  98. }
  99. file, header, err := ctx.Req.FormFile("file")
  100. if err != nil {
  101. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  102. return
  103. }
  104. defer file.Close()
  105. buf := make([]byte, 1024)
  106. n, _ := file.Read(buf)
  107. if n > 0 {
  108. buf = buf[:n]
  109. }
  110. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  111. if err != nil {
  112. ctx.Error(400, err.Error())
  113. return
  114. }
  115. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  116. attach, err := models.NewAttachment(&models.Attachment{
  117. IsPrivate: true,
  118. UploaderID: ctx.User.ID,
  119. Name: header.Filename,
  120. DatasetID: datasetID,
  121. }, buf, file)
  122. if err != nil {
  123. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  124. return
  125. }
  126. log.Trace("New attachment uploaded: %s", attach.UUID)
  127. ctx.JSON(200, map[string]string{
  128. "uuid": attach.UUID,
  129. })
  130. }
  131. func UpdatePublicAttachment(ctx *context.Context) {
  132. file := ctx.Query("file")
  133. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  134. attach, err := models.GetAttachmentByUUID(file)
  135. if err != nil {
  136. ctx.Error(404, err.Error())
  137. return
  138. }
  139. attach.IsPrivate = isPrivate
  140. models.UpdateAttachment(attach)
  141. }
  142. // DeleteAttachment response for deleting issue's attachment
  143. func DeleteAttachment(ctx *context.Context) {
  144. file := ctx.Query("file")
  145. attach, err := models.GetAttachmentByUUID(file)
  146. if err != nil {
  147. ctx.Error(400, err.Error())
  148. return
  149. }
  150. //issue 214: mod del-dataset permission
  151. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  152. ctx.Error(403)
  153. return
  154. }
  155. err = models.DeleteAttachment(attach, true)
  156. if err != nil {
  157. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  158. return
  159. }
  160. attachjson, _ := json.Marshal(attach)
  161. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  162. DeleteAllUnzipFile(attach, "")
  163. _, err = models.DeleteFileChunkById(attach.UUID)
  164. if err != nil {
  165. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  166. return
  167. }
  168. ctx.JSON(200, map[string]string{
  169. "uuid": attach.UUID,
  170. })
  171. }
  172. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  173. dataset, err := models.GetDatasetByID(attach.DatasetID)
  174. if err != nil {
  175. log.Info("query dataset error")
  176. } else {
  177. repo, err := models.GetRepositoryByID(dataset.RepoID)
  178. if err != nil {
  179. log.Info("query repo error.")
  180. } else {
  181. repo.GetOwner()
  182. if ctx.User != nil {
  183. if repo.Owner.IsOrganization() {
  184. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  185. log.Info("org user may visit the attach.")
  186. return true
  187. }
  188. }
  189. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  190. if isCollaborator {
  191. log.Info("Collaborator user may visit the attach.")
  192. return true
  193. }
  194. }
  195. }
  196. }
  197. return false
  198. }
  199. // GetAttachment serve attachements
  200. func GetAttachment(ctx *context.Context) {
  201. typeCloudBrain := ctx.QueryInt("type")
  202. err := checkTypeCloudBrain(typeCloudBrain)
  203. if err != nil {
  204. ctx.ServerError("checkTypeCloudBrain failed", err)
  205. return
  206. }
  207. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  208. if err != nil {
  209. if models.IsErrAttachmentNotExist(err) {
  210. ctx.Error(404)
  211. } else {
  212. ctx.ServerError("GetAttachmentByUUID", err)
  213. }
  214. return
  215. }
  216. repository, unitType, err := attach.LinkedRepository()
  217. if err != nil {
  218. ctx.ServerError("LinkedRepository", err)
  219. return
  220. }
  221. dataSet, err := attach.LinkedDataSet()
  222. if err != nil {
  223. ctx.ServerError("LinkedDataSet", err)
  224. return
  225. }
  226. if repository == nil && dataSet != nil {
  227. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  228. unitType = models.UnitTypeDatasets
  229. }
  230. if repository == nil { //If not linked
  231. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  232. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  233. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  234. ctx.Error(http.StatusNotFound)
  235. return
  236. }
  237. } else { //If we have the repository we check access
  238. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  239. if errPermission != nil {
  240. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  241. return
  242. }
  243. if !perm.CanRead(unitType) {
  244. ctx.Error(http.StatusNotFound)
  245. return
  246. }
  247. }
  248. if dataSet != nil {
  249. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  250. if err != nil {
  251. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  252. return
  253. }
  254. if !isPermit {
  255. ctx.Error(http.StatusNotFound)
  256. return
  257. }
  258. }
  259. //If we have matched and access to release or issue
  260. if setting.Attachment.StoreType == storage.MinioStorageType {
  261. url := ""
  262. if typeCloudBrain == models.TypeCloudBrainOne {
  263. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  264. if err != nil {
  265. ctx.ServerError("PresignedGetURL", err)
  266. return
  267. }
  268. } else {
  269. if setting.PROXYURL != "" {
  270. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  271. log.Info("return url=" + url)
  272. } else {
  273. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  274. if err != nil {
  275. ctx.ServerError("ObsGetPreSignedUrl", err)
  276. return
  277. }
  278. }
  279. }
  280. if err = increaseDownloadCount(attach, dataSet); err != nil {
  281. ctx.ServerError("Update", err)
  282. return
  283. }
  284. if dataSet != nil {
  285. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  286. } else {
  287. fr, err := storage.Attachments.Open(attach.RelativePath())
  288. if err != nil {
  289. ctx.ServerError("Open", err)
  290. return
  291. }
  292. defer fr.Close()
  293. if err = ServeData(ctx, attach.Name, fr); err != nil {
  294. ctx.ServerError("ServeData", err)
  295. return
  296. }
  297. }
  298. } else {
  299. fr, err := storage.Attachments.Open(attach.RelativePath())
  300. if err != nil {
  301. ctx.ServerError("Open", err)
  302. return
  303. }
  304. defer fr.Close()
  305. if err = increaseDownloadCount(attach, dataSet); err != nil {
  306. ctx.ServerError("Update", err)
  307. return
  308. }
  309. if err = ServeData(ctx, attach.Name, fr); err != nil {
  310. ctx.ServerError("ServeData", err)
  311. return
  312. }
  313. }
  314. }
  315. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  316. if err := attach.IncreaseDownloadCount(); err != nil {
  317. return err
  318. }
  319. if dataSet != nil {
  320. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  321. return err
  322. }
  323. }
  324. return nil
  325. }
  326. // Get a presigned url for put object
  327. func GetPresignedPutObjectURL(ctx *context.Context) {
  328. if !setting.Attachment.Enabled {
  329. ctx.Error(404, "attachment is not enabled")
  330. return
  331. }
  332. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  333. if err != nil {
  334. ctx.Error(400, err.Error())
  335. return
  336. }
  337. if setting.Attachment.StoreType == storage.MinioStorageType {
  338. uuid := gouuid.NewV4().String()
  339. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  340. if err != nil {
  341. ctx.ServerError("PresignedPutURL", err)
  342. return
  343. }
  344. ctx.JSON(200, map[string]string{
  345. "uuid": uuid,
  346. "url": url,
  347. })
  348. } else {
  349. ctx.Error(404, "storage type is not enabled")
  350. return
  351. }
  352. }
  353. // AddAttachment response for add attachment record
  354. func AddAttachment(ctx *context.Context) {
  355. typeCloudBrain := ctx.QueryInt("type")
  356. fileName := ctx.Query("file_name")
  357. err := checkTypeCloudBrain(typeCloudBrain)
  358. if err != nil {
  359. ctx.ServerError("checkTypeCloudBrain failed", err)
  360. return
  361. }
  362. uuid := ctx.Query("uuid")
  363. has := false
  364. if typeCloudBrain == models.TypeCloudBrainOne {
  365. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  366. if err != nil {
  367. ctx.ServerError("HasObject", err)
  368. return
  369. }
  370. } else {
  371. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  372. if err != nil {
  373. ctx.ServerError("ObsHasObject", err)
  374. return
  375. }
  376. }
  377. if !has {
  378. ctx.Error(404, "attachment has not been uploaded")
  379. return
  380. }
  381. datasetId := ctx.QueryInt64("dataset_id")
  382. dataset, err := models.GetDatasetByID(datasetId)
  383. if err != nil {
  384. ctx.Error(404, "dataset does not exist.")
  385. return
  386. }
  387. attachment, err := models.InsertAttachment(&models.Attachment{
  388. UUID: uuid,
  389. UploaderID: ctx.User.ID,
  390. IsPrivate: dataset.IsPrivate(),
  391. Name: fileName,
  392. Size: ctx.QueryInt64("size"),
  393. DatasetID: ctx.QueryInt64("dataset_id"),
  394. Type: typeCloudBrain,
  395. })
  396. if err != nil {
  397. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  398. return
  399. }
  400. if attachment.DatasetID != 0 {
  401. if isCanDecompress(attachment.Name) {
  402. if typeCloudBrain == models.TypeCloudBrainOne {
  403. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  404. if err != nil {
  405. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  406. } else {
  407. attachment.DecompressState = models.DecompressStateIng
  408. err = models.UpdateAttachment(attachment)
  409. if err != nil {
  410. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  411. }
  412. }
  413. }
  414. //todo:decompress type_two
  415. }
  416. }
  417. ctx.JSON(200, map[string]string{
  418. "result_code": "0",
  419. })
  420. }
  421. func isCanDecompress(name string) bool {
  422. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  423. return true
  424. }
  425. return false
  426. }
  427. func UpdateAttachmentDecompressState(ctx *context.Context) {
  428. uuid := ctx.Query("uuid")
  429. result := ctx.Query("result")
  430. attach, err := models.GetAttachmentByUUID(uuid)
  431. if err != nil {
  432. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  433. return
  434. }
  435. if result == DecompressSuccess {
  436. attach.DecompressState = models.DecompressStateDone
  437. } else if result == DecompressFailed {
  438. attach.DecompressState = models.DecompressStateFailed
  439. } else {
  440. log.Error("result is error:", result)
  441. return
  442. }
  443. err = models.UpdateAttachment(attach)
  444. if err != nil {
  445. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  446. return
  447. }
  448. log.Info("start to send msg to labelsystem ")
  449. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  450. var labelMap map[string]string
  451. labelMap = make(map[string]string)
  452. labelMap["UUID"] = uuid
  453. labelMap["Type"] = fmt.Sprint(attach.Type)
  454. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  455. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  456. labelMap["AttachName"] = attach.Name
  457. attachjson, _ := json.Marshal(labelMap)
  458. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  459. log.Info("end to send msg to labelsystem ")
  460. ctx.JSON(200, map[string]string{
  461. "result_code": "0",
  462. })
  463. }
  464. func GetSuccessChunks(ctx *context.Context) {
  465. fileMD5 := ctx.Query("md5")
  466. typeCloudBrain := ctx.QueryInt("type")
  467. fileName := ctx.Query("file_name")
  468. var chunks string
  469. err := checkTypeCloudBrain(typeCloudBrain)
  470. if err != nil {
  471. ctx.ServerError("checkTypeCloudBrain failed", err)
  472. return
  473. }
  474. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  475. if err != nil {
  476. if models.IsErrFileChunkNotExist(err) {
  477. ctx.JSON(200, map[string]string{
  478. "uuid": "",
  479. "uploaded": "0",
  480. "uploadID": "",
  481. "chunks": "",
  482. })
  483. } else {
  484. ctx.ServerError("GetFileChunkByMD5", err)
  485. }
  486. return
  487. }
  488. isExist := false
  489. if typeCloudBrain == models.TypeCloudBrainOne {
  490. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  491. if err != nil {
  492. ctx.ServerError("HasObject failed", err)
  493. return
  494. }
  495. } else {
  496. oldFileName := fileName
  497. oldAttachment, _ := models.GetAttachmentByUUID(fileChunk.UUID)
  498. if oldAttachment != nil {
  499. oldFileName = oldAttachment.Name
  500. }
  501. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + oldFileName)
  502. if err != nil {
  503. ctx.ServerError("ObsHasObject failed", err)
  504. return
  505. }
  506. }
  507. if isExist {
  508. if fileChunk.IsUploaded == models.FileNotUploaded {
  509. log.Info("the file has been uploaded but not recorded")
  510. fileChunk.IsUploaded = models.FileUploaded
  511. if err = models.UpdateFileChunk(fileChunk); err != nil {
  512. log.Error("UpdateFileChunk failed:", err.Error())
  513. }
  514. }
  515. } else {
  516. if fileChunk.IsUploaded == models.FileUploaded {
  517. log.Info("the file has been recorded but not uploaded")
  518. fileChunk.IsUploaded = models.FileNotUploaded
  519. if err = models.UpdateFileChunk(fileChunk); err != nil {
  520. log.Error("UpdateFileChunk failed:", err.Error())
  521. }
  522. }
  523. if typeCloudBrain == models.TypeCloudBrainOne {
  524. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  525. if err != nil {
  526. log.Error("GetPartInfos failed:%v", err.Error())
  527. }
  528. } else {
  529. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID, fileName)
  530. if err != nil {
  531. log.Error("GetObsPartInfos failed:%v", err.Error())
  532. }
  533. }
  534. if err != nil {
  535. models.DeleteFileChunk(fileChunk)
  536. ctx.JSON(200, map[string]string{
  537. "uuid": "",
  538. "uploaded": "0",
  539. "uploadID": "",
  540. "chunks": "",
  541. })
  542. return
  543. }
  544. }
  545. var attachID int64
  546. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  547. if err != nil {
  548. if models.IsErrAttachmentNotExist(err) {
  549. attachID = 0
  550. } else {
  551. ctx.ServerError("GetAttachmentByUUID", err)
  552. return
  553. }
  554. } else {
  555. attachID = attach.ID
  556. }
  557. if attach == nil {
  558. ctx.JSON(200, map[string]string{
  559. "uuid": fileChunk.UUID,
  560. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  561. "uploadID": fileChunk.UploadID,
  562. "chunks": string(chunks),
  563. "attachID": "0",
  564. "datasetID": "0",
  565. "fileName": "",
  566. "datasetName": "",
  567. })
  568. return
  569. }
  570. dataset, err := models.GetDatasetByID(attach.DatasetID)
  571. if err != nil {
  572. ctx.ServerError("GetDatasetByID", err)
  573. return
  574. }
  575. ctx.JSON(200, map[string]string{
  576. "uuid": fileChunk.UUID,
  577. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  578. "uploadID": fileChunk.UploadID,
  579. "chunks": string(chunks),
  580. "attachID": strconv.Itoa(int(attachID)),
  581. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  582. "fileName": attach.Name,
  583. "datasetName": dataset.Title,
  584. })
  585. }
  586. func NewMultipart(ctx *context.Context) {
  587. if !setting.Attachment.Enabled {
  588. ctx.Error(404, "attachment is not enabled")
  589. return
  590. }
  591. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  592. if err != nil {
  593. ctx.Error(400, err.Error())
  594. return
  595. }
  596. typeCloudBrain := ctx.QueryInt("type")
  597. err = checkTypeCloudBrain(typeCloudBrain)
  598. if err != nil {
  599. ctx.ServerError("checkTypeCloudBrain failed", err)
  600. return
  601. }
  602. fileName := ctx.Query("file_name")
  603. if setting.Attachment.StoreType == storage.MinioStorageType {
  604. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  605. if totalChunkCounts > minio_ext.MaxPartsCount {
  606. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  607. return
  608. }
  609. fileSize := ctx.QueryInt64("size")
  610. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  611. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  612. return
  613. }
  614. uuid := gouuid.NewV4().String()
  615. var uploadID string
  616. if typeCloudBrain == models.TypeCloudBrainOne {
  617. uploadID, err = storage.NewMultiPartUpload(uuid)
  618. if err != nil {
  619. ctx.ServerError("NewMultipart", err)
  620. return
  621. }
  622. } else {
  623. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  624. if err != nil {
  625. ctx.ServerError("NewObsMultiPartUpload", err)
  626. return
  627. }
  628. }
  629. _, err = models.InsertFileChunk(&models.FileChunk{
  630. UUID: uuid,
  631. UserID: ctx.User.ID,
  632. UploadID: uploadID,
  633. Md5: ctx.Query("md5"),
  634. Size: fileSize,
  635. TotalChunks: totalChunkCounts,
  636. Type: typeCloudBrain,
  637. })
  638. if err != nil {
  639. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  640. return
  641. }
  642. ctx.JSON(200, map[string]string{
  643. "uuid": uuid,
  644. "uploadID": uploadID,
  645. })
  646. } else {
  647. ctx.Error(404, "storage type is not enabled")
  648. return
  649. }
  650. }
  651. func PutOBSProxyUpload(ctx *context.Context) {
  652. uuid := ctx.Query("uuid")
  653. uploadID := ctx.Query("uploadId")
  654. partNumber := ctx.QueryInt("partNumber")
  655. fileName := ctx.Query("file_name")
  656. RequestBody := ctx.Req.Body()
  657. if RequestBody == nil {
  658. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  659. return
  660. }
  661. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  662. if err != nil {
  663. log.Info("upload error.")
  664. }
  665. }
  666. func GetOBSProxyDownload(ctx *context.Context) {
  667. uuid := ctx.Query("uuid")
  668. fileName := ctx.Query("file_name")
  669. body, err := storage.ObsDownload(uuid, fileName)
  670. if err != nil {
  671. log.Info("upload error.")
  672. } else {
  673. defer body.Close()
  674. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  675. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  676. p := make([]byte, 1024)
  677. var readErr error
  678. var readCount int
  679. // 读取对象内容
  680. for {
  681. readCount, readErr = body.Read(p)
  682. if readCount > 0 {
  683. ctx.Resp.Write(p[:readCount])
  684. //fmt.Printf("%s", p[:readCount])
  685. }
  686. if readErr != nil {
  687. break
  688. }
  689. }
  690. }
  691. }
  692. func GetMultipartUploadUrl(ctx *context.Context) {
  693. uuid := ctx.Query("uuid")
  694. uploadID := ctx.Query("uploadID")
  695. partNumber := ctx.QueryInt("chunkNumber")
  696. size := ctx.QueryInt64("size")
  697. fileName := ctx.Query("file_name")
  698. typeCloudBrain := ctx.QueryInt("type")
  699. err := checkTypeCloudBrain(typeCloudBrain)
  700. if err != nil {
  701. ctx.ServerError("checkTypeCloudBrain failed", err)
  702. return
  703. }
  704. url := ""
  705. if typeCloudBrain == models.TypeCloudBrainOne {
  706. if size > minio_ext.MinPartSize {
  707. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  708. return
  709. }
  710. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  711. if err != nil {
  712. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  713. return
  714. }
  715. } else {
  716. if setting.PROXYURL != "" {
  717. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  718. log.Info("return url=" + url)
  719. } else {
  720. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  721. if err != nil {
  722. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  723. return
  724. }
  725. log.Info("url=" + url)
  726. }
  727. }
  728. ctx.JSON(200, map[string]string{
  729. "url": url,
  730. })
  731. }
  732. func GetObsKey(ctx *context.Context) {
  733. uuid := gouuid.NewV4().String()
  734. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  735. ctx.JSON(200, map[string]string{
  736. "uuid": uuid,
  737. "key": key,
  738. "access_key_id": setting.AccessKeyID,
  739. "secret_access_key": setting.SecretAccessKey,
  740. "server": setting.Endpoint,
  741. "bucket": setting.Bucket,
  742. })
  743. }
  744. func CompleteMultipart(ctx *context.Context) {
  745. uuid := ctx.Query("uuid")
  746. uploadID := ctx.Query("uploadID")
  747. typeCloudBrain := ctx.QueryInt("type")
  748. fileName := ctx.Query("file_name")
  749. log.Warn("uuid:" + uuid)
  750. log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))
  751. err := checkTypeCloudBrain(typeCloudBrain)
  752. if err != nil {
  753. ctx.ServerError("checkTypeCloudBrain failed", err)
  754. return
  755. }
  756. fileChunk, err := models.GetFileChunkByUUID(uuid)
  757. if err != nil {
  758. if models.IsErrFileChunkNotExist(err) {
  759. ctx.Error(404)
  760. } else {
  761. ctx.ServerError("GetFileChunkByUUID", err)
  762. }
  763. return
  764. }
  765. if typeCloudBrain == models.TypeCloudBrainOne {
  766. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  767. if err != nil {
  768. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  769. return
  770. }
  771. } else {
  772. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  773. if err != nil {
  774. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  775. return
  776. }
  777. }
  778. fileChunk.IsUploaded = models.FileUploaded
  779. err = models.UpdateFileChunk(fileChunk)
  780. if err != nil {
  781. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  782. return
  783. }
  784. dataset, _ := models.GetDatasetByID(ctx.QueryInt64("dataset_id"))
  785. log.Warn("insert attachment to datasetId:" + strconv.FormatInt(dataset.ID, 10))
  786. attachment, err := models.InsertAttachment(&models.Attachment{
  787. UUID: uuid,
  788. UploaderID: ctx.User.ID,
  789. IsPrivate: dataset.IsPrivate(),
  790. Name: fileName,
  791. Size: ctx.QueryInt64("size"),
  792. DatasetID: ctx.QueryInt64("dataset_id"),
  793. Description: ctx.Query("description"),
  794. Type: typeCloudBrain,
  795. })
  796. if err != nil {
  797. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  798. return
  799. }
  800. repository, _ := models.GetRepositoryByID(dataset.RepoID)
  801. notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(attachment.Type), attachment.Name, models.ActionUploadAttachment)
  802. if attachment.DatasetID != 0 {
  803. if isCanDecompress(attachment.Name) {
  804. if typeCloudBrain == models.TypeCloudBrainOne {
  805. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  806. if err != nil {
  807. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  808. } else {
  809. attachment.DecompressState = models.DecompressStateIng
  810. err = models.UpdateAttachment(attachment)
  811. if err != nil {
  812. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  813. }
  814. }
  815. }
  816. if typeCloudBrain == models.TypeCloudBrainTwo {
  817. attachjson, _ := json.Marshal(attachment)
  818. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  819. }
  820. } else {
  821. var labelMap map[string]string
  822. labelMap = make(map[string]string)
  823. labelMap["UUID"] = uuid
  824. labelMap["Type"] = fmt.Sprint(attachment.Type)
  825. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  826. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  827. labelMap["AttachName"] = attachment.Name
  828. attachjson, _ := json.Marshal(labelMap)
  829. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  830. }
  831. }
  832. ctx.JSON(200, map[string]string{
  833. "result_code": "0",
  834. })
  835. }
  836. func UpdateMultipart(ctx *context.Context) {
  837. uuid := ctx.Query("uuid")
  838. partNumber := ctx.QueryInt("chunkNumber")
  839. etag := ctx.Query("etag")
  840. fileChunk, err := models.GetFileChunkByUUID(uuid)
  841. if err != nil {
  842. if models.IsErrFileChunkNotExist(err) {
  843. ctx.Error(404)
  844. } else {
  845. ctx.ServerError("GetFileChunkByUUID", err)
  846. }
  847. return
  848. }
  849. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  850. err = models.UpdateFileChunk(fileChunk)
  851. if err != nil {
  852. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  853. return
  854. }
  855. ctx.JSON(200, map[string]string{
  856. "result_code": "0",
  857. })
  858. }
  859. func HandleUnDecompressAttachment() {
  860. attachs, err := models.GetUnDecompressAttachments()
  861. if err != nil {
  862. log.Error("GetUnDecompressAttachments failed:", err.Error())
  863. return
  864. }
  865. for _, attach := range attachs {
  866. if attach.Type == models.TypeCloudBrainOne {
  867. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  868. if err != nil {
  869. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  870. } else {
  871. attach.DecompressState = models.DecompressStateIng
  872. err = models.UpdateAttachment(attach)
  873. if err != nil {
  874. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  875. }
  876. }
  877. } else if attach.Type == models.TypeCloudBrainTwo {
  878. attachjson, _ := json.Marshal(attach)
  879. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  880. }
  881. }
  882. return
  883. }
  884. func QueryAllPublicDataset(ctx *context.Context) {
  885. attachs, err := models.GetAllPublicAttachments()
  886. if err != nil {
  887. ctx.JSON(200, map[string]string{
  888. "result_code": "-1",
  889. "error_msg": err.Error(),
  890. "data": "",
  891. })
  892. return
  893. }
  894. queryDatasets(ctx, attachs)
  895. }
  896. func QueryPrivateDataset(ctx *context.Context) {
  897. username := ctx.Params(":username")
  898. attachs, err := models.GetPrivateAttachments(username)
  899. if err != nil {
  900. ctx.JSON(200, map[string]string{
  901. "result_code": "-1",
  902. "error_msg": err.Error(),
  903. "data": "",
  904. })
  905. return
  906. }
  907. for _, attach := range attachs {
  908. attach.Name = username
  909. }
  910. queryDatasets(ctx, attachs)
  911. }
  912. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  913. var datasets []CloudBrainDataset
  914. if len(attachs) == 0 {
  915. log.Info("dataset is null")
  916. ctx.JSON(200, map[string]string{
  917. "result_code": "0",
  918. "error_msg": "",
  919. "data": "",
  920. })
  921. return
  922. }
  923. for _, attch := range attachs {
  924. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  925. if err != nil || !has {
  926. continue
  927. }
  928. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  929. attch.Attachment.Name,
  930. setting.Attachment.Minio.RealPath +
  931. setting.Attachment.Minio.Bucket + "/" +
  932. setting.Attachment.Minio.BasePath +
  933. models.AttachmentRelativePath(attch.UUID) +
  934. attch.UUID,
  935. attch.Name,
  936. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  937. }
  938. data, err := json.Marshal(datasets)
  939. if err != nil {
  940. log.Error("json.Marshal failed:", err.Error())
  941. ctx.JSON(200, map[string]string{
  942. "result_code": "-1",
  943. "error_msg": err.Error(),
  944. "data": "",
  945. })
  946. return
  947. }
  948. ctx.JSON(200, map[string]string{
  949. "result_code": "0",
  950. "error_msg": "",
  951. "data": string(data),
  952. })
  953. return
  954. }
  955. func checkTypeCloudBrain(typeCloudBrain int) error {
  956. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  957. log.Error("type error:", typeCloudBrain)
  958. return errors.New("type error")
  959. }
  960. return nil
  961. }