You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dataset.go 15 kB

5 years ago
3 years ago
4 years ago
5 years ago
5 years ago
3 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
3 years ago
3 years ago
5 years ago
4 years ago
4 years ago
3 years ago
4 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
4 years ago
3 years ago
4 years ago
4 years ago
3 years ago
4 years ago
5 years ago
3 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. package models
  2. import (
  3. "errors"
  4. "fmt"
  5. "sort"
  6. "strings"
  7. "code.gitea.io/gitea/modules/log"
  8. "code.gitea.io/gitea/modules/timeutil"
  9. "xorm.io/builder"
  10. )
  11. const (
  12. DatasetStatusPrivate int32 = iota
  13. DatasetStatusPublic
  14. DatasetStatusDeleted
  15. )
  16. type Dataset struct {
  17. ID int64 `xorm:"pk autoincr"`
  18. Title string `xorm:"INDEX NOT NULL"`
  19. Status int32 `xorm:"INDEX"` // normal_private: 0, pulbic: 1, is_delete: 2
  20. Category string
  21. Description string `xorm:"TEXT"`
  22. DownloadTimes int64
  23. UseCount int64 `xorm:"DEFAULT 0"`
  24. NumStars int `xorm:"INDEX NOT NULL DEFAULT 0"`
  25. Recommend bool `xorm:"INDEX NOT NULL DEFAULT false"`
  26. License string
  27. Task string
  28. ReleaseID int64 `xorm:"INDEX"`
  29. UserID int64 `xorm:"INDEX"`
  30. RepoID int64 `xorm:"INDEX"`
  31. Repo *Repository `xorm:"-"`
  32. CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
  33. UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
  34. User *User `xorm:"-"`
  35. Attachments []*Attachment `xorm:"-"`
  36. }
  37. type DatasetWithStar struct {
  38. Dataset
  39. IsStaring bool
  40. }
  41. func (d *Dataset) IsPrivate() bool {
  42. switch d.Status {
  43. case DatasetStatusPrivate:
  44. return true
  45. case DatasetStatusPublic:
  46. return false
  47. case DatasetStatusDeleted:
  48. return false
  49. default:
  50. return false
  51. }
  52. }
  53. type DatasetList []*Dataset
  54. func (datasets DatasetList) loadAttributes(e Engine) error {
  55. if len(datasets) == 0 {
  56. return nil
  57. }
  58. set := make(map[int64]struct{})
  59. userIdSet := make(map[int64]struct{})
  60. datasetIDs := make([]int64, len(datasets))
  61. for i := range datasets {
  62. userIdSet[datasets[i].UserID] = struct{}{}
  63. set[datasets[i].RepoID] = struct{}{}
  64. datasetIDs[i] = datasets[i].ID
  65. }
  66. // Load owners.
  67. users := make(map[int64]*User, len(userIdSet))
  68. repos := make(map[int64]*Repository, len(set))
  69. if err := e.
  70. Where("id > 0").
  71. In("id", keysInt64(userIdSet)).
  72. Cols("id", "lower_name", "name", "full_name", "email").
  73. Find(&users); err != nil {
  74. return fmt.Errorf("find users: %v", err)
  75. }
  76. if err := e.
  77. Where("id > 0").
  78. In("id", keysInt64(set)).
  79. Cols("id", "owner_id", "owner_name", "lower_name", "name", "description", "alias", "lower_alias").
  80. Find(&repos); err != nil {
  81. return fmt.Errorf("find repos: %v", err)
  82. }
  83. for i := range datasets {
  84. datasets[i].User = users[datasets[i].UserID]
  85. datasets[i].Repo = repos[datasets[i].RepoID]
  86. }
  87. return nil
  88. }
  89. func (datasets DatasetList) loadAttachmentAttributes(opts *SearchDatasetOptions) error {
  90. if len(datasets) == 0 {
  91. return nil
  92. }
  93. datasetIDs := make([]int64, len(datasets))
  94. for i := range datasets {
  95. datasetIDs[i] = datasets[i].ID
  96. }
  97. attachments, err := AttachmentsByDatasetOption(datasetIDs, opts)
  98. if err != nil {
  99. return fmt.Errorf("GetAttachmentsByDatasetIds failed error: %v", err)
  100. }
  101. permissionMap := make(map[int64]*Permission, len(datasets))
  102. for _, attachment := range attachments {
  103. for i := range datasets {
  104. if attachment.DatasetID == datasets[i].ID {
  105. if opts.StarByMe {
  106. var permission *Permission
  107. if permission = permissionMap[datasets[i].ID]; permission == nil {
  108. permissionInstance, err := GetUserRepoPermission(datasets[i].Repo, opts.User)
  109. if err != nil {
  110. return fmt.Errorf("GetPermission failed error: %v", err)
  111. }
  112. permission = &permissionInstance
  113. }
  114. if permission.HasAccess() {
  115. datasets[i].Attachments = append(datasets[i].Attachments, attachment)
  116. } else if !attachment.IsPrivate {
  117. datasets[i].Attachments = append(datasets[i].Attachments, attachment)
  118. }
  119. } else {
  120. datasets[i].Attachments = append(datasets[i].Attachments, attachment)
  121. }
  122. }
  123. }
  124. }
  125. for i := range datasets {
  126. datasets[i].Repo.Owner = nil
  127. }
  128. return nil
  129. }
  130. type SearchDatasetOptions struct {
  131. Keyword string
  132. OwnerID int64
  133. User *User
  134. RepoID int64
  135. IncludePublic bool
  136. RecommendOnly bool
  137. Category string
  138. Task string
  139. License string
  140. DatasetIDs []int64 // 目前只在StarByMe为true时起作用
  141. ListOptions
  142. SearchOrderBy
  143. IsOwner bool
  144. StarByMe bool
  145. CloudBrainType int //0 cloudbrain 1 modelarts -1 all
  146. PublicOnly bool
  147. JustNeedZipFile bool
  148. NeedAttachment bool
  149. UploadAttachmentByMe bool
  150. }
  151. func CreateDataset(dataset *Dataset) (err error) {
  152. sess := x.NewSession()
  153. defer sess.Close()
  154. if err := sess.Begin(); err != nil {
  155. return err
  156. }
  157. datasetByRepoId := &Dataset{RepoID: dataset.RepoID}
  158. has, err := sess.Get(datasetByRepoId)
  159. if err != nil {
  160. return err
  161. }
  162. if has {
  163. return fmt.Errorf("The dataset already exists.")
  164. }
  165. if _, err = sess.Insert(dataset); err != nil {
  166. return err
  167. }
  168. return sess.Commit()
  169. }
  170. func RecommendDataset(dataSetId int64, recommend bool) error {
  171. dataset := Dataset{Recommend: recommend}
  172. _, err := x.ID(dataSetId).Cols("recommend").Update(dataset)
  173. return err
  174. }
  175. func SearchDataset(opts *SearchDatasetOptions) (DatasetList, int64, error) {
  176. cond := SearchDatasetCondition(opts)
  177. return SearchDatasetByCondition(opts, cond)
  178. }
  179. func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond {
  180. var cond = builder.NewCond()
  181. cond = cond.And(builder.Neq{"dataset.status": DatasetStatusDeleted})
  182. cond = generateFilterCond(opts, cond)
  183. if opts.RepoID > 0 {
  184. cond = cond.And(builder.Eq{"dataset.repo_id": opts.RepoID})
  185. }
  186. if opts.PublicOnly {
  187. cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic})
  188. cond = cond.And(builder.Eq{"attachment.is_private": false})
  189. } else if opts.IncludePublic {
  190. cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic})
  191. cond = cond.And(builder.Eq{"attachment.is_private": false})
  192. if opts.OwnerID > 0 {
  193. subCon := builder.NewCond()
  194. subCon = subCon.And(builder.Eq{"repository.owner_id": opts.OwnerID})
  195. subCon = generateFilterCond(opts, subCon)
  196. cond = cond.Or(subCon)
  197. }
  198. } else if opts.OwnerID > 0 && !opts.StarByMe && !opts.UploadAttachmentByMe {
  199. cond = cond.And(builder.Eq{"repository.owner_id": opts.OwnerID})
  200. if !opts.IsOwner {
  201. cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic})
  202. cond = cond.And(builder.Eq{"attachment.is_private": false})
  203. }
  204. }
  205. if len(opts.DatasetIDs) > 0 {
  206. if opts.StarByMe {
  207. cond = cond.And(builder.In("dataset.id", opts.DatasetIDs))
  208. } else {
  209. subCon := builder.NewCond()
  210. subCon = subCon.And(builder.In("dataset.id", opts.DatasetIDs))
  211. subCon = generateFilterCond(opts, subCon)
  212. cond = cond.Or(subCon)
  213. }
  214. }
  215. return cond
  216. }
  217. func generateFilterCond(opts *SearchDatasetOptions, cond builder.Cond) builder.Cond {
  218. if len(opts.Keyword) > 0 {
  219. cond = cond.And(builder.Or(builder.Like{"LOWER(dataset.title)", strings.ToLower(opts.Keyword)}, builder.Like{"LOWER(dataset.description)", strings.ToLower(opts.Keyword)}))
  220. }
  221. if len(opts.Category) > 0 {
  222. cond = cond.And(builder.Eq{"dataset.category": opts.Category})
  223. }
  224. if len(opts.Task) > 0 {
  225. cond = cond.And(builder.Eq{"dataset.task": opts.Task})
  226. }
  227. if len(opts.License) > 0 {
  228. cond = cond.And(builder.Eq{"dataset.license": opts.License})
  229. }
  230. if opts.RecommendOnly {
  231. cond = cond.And(builder.Eq{"dataset.recommend": opts.RecommendOnly})
  232. }
  233. if opts.JustNeedZipFile {
  234. cond = cond.And(builder.Gt{"attachment.decompress_state": 0})
  235. }
  236. if opts.CloudBrainType >= 0 {
  237. cond = cond.And(builder.Eq{"attachment.type": opts.CloudBrainType})
  238. }
  239. if opts.UploadAttachmentByMe {
  240. cond = cond.And(builder.Eq{"attachment.uploader_id": opts.OwnerID})
  241. }
  242. return cond
  243. }
  244. func SearchDatasetByCondition(opts *SearchDatasetOptions, cond builder.Cond) (DatasetList, int64, error) {
  245. if opts.Page <= 0 {
  246. opts.Page = 1
  247. }
  248. var err error
  249. sess := x.NewSession()
  250. defer sess.Close()
  251. datasets := make(DatasetList, 0, opts.PageSize)
  252. selectColumnsSql := "distinct dataset.id,dataset.title, dataset.status, dataset.category, dataset.description, dataset.download_times, dataset.license, dataset.task, dataset.release_id, dataset.user_id, dataset.repo_id, dataset.created_unix,dataset.updated_unix,dataset.num_stars,dataset.recommend,dataset.use_count"
  253. count, err := sess.Distinct("dataset.id").Join("INNER", "repository", "repository.id = dataset.repo_id").
  254. Join("INNER", "attachment", "attachment.dataset_id=dataset.id").
  255. Where(cond).Count(new(Dataset))
  256. if err != nil {
  257. return nil, 0, fmt.Errorf("Count: %v", err)
  258. }
  259. sess.Select(selectColumnsSql).Join("INNER", "repository", "repository.id = dataset.repo_id").
  260. Join("INNER", "attachment", "attachment.dataset_id=dataset.id").
  261. Where(cond).OrderBy(opts.SearchOrderBy.String())
  262. if opts.PageSize > 0 {
  263. sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
  264. }
  265. if err = sess.Find(&datasets); err != nil {
  266. return nil, 0, fmt.Errorf("Dataset: %v", err)
  267. }
  268. if err = datasets.loadAttributes(sess); err != nil {
  269. return nil, 0, fmt.Errorf("LoadAttributes: %v", err)
  270. }
  271. if opts.NeedAttachment {
  272. if err = datasets.loadAttachmentAttributes(opts); err != nil {
  273. return nil, 0, fmt.Errorf("LoadAttributes: %v", err)
  274. }
  275. }
  276. return datasets, count, nil
  277. }
  278. type datasetMetaSearch struct {
  279. ID []int64
  280. Rel []*Dataset
  281. }
  282. func (s datasetMetaSearch) Len() int {
  283. return len(s.ID)
  284. }
  285. func (s datasetMetaSearch) Swap(i, j int) {
  286. s.ID[i], s.ID[j] = s.ID[j], s.ID[i]
  287. s.Rel[i], s.Rel[j] = s.Rel[j], s.Rel[i]
  288. }
  289. func (s datasetMetaSearch) Less(i, j int) bool {
  290. return s.ID[i] < s.ID[j]
  291. }
  292. func GetDatasetAttachments(typeCloudBrain int, isSigned bool, user *User, rels ...*Dataset) (err error) {
  293. return getDatasetAttachments(x, typeCloudBrain, isSigned, user, rels...)
  294. }
  295. func getDatasetAttachments(e Engine, typeCloudBrain int, isSigned bool, user *User, rels ...*Dataset) (err error) {
  296. if len(rels) == 0 {
  297. return
  298. }
  299. // To keep this efficient as possible sort all datasets by id,
  300. // select attachments by dataset id,
  301. // then merge join them
  302. // Sort
  303. var sortedRels = datasetMetaSearch{ID: make([]int64, len(rels)), Rel: make([]*Dataset, len(rels))}
  304. var attachments []*Attachment
  305. for index, element := range rels {
  306. element.Attachments = []*Attachment{}
  307. sortedRels.ID[index] = element.ID
  308. sortedRels.Rel[index] = element
  309. }
  310. sort.Sort(sortedRels)
  311. // Select attachments
  312. if typeCloudBrain == -1 {
  313. err = e.
  314. Asc("dataset_id").
  315. In("dataset_id", sortedRels.ID).
  316. Find(&attachments, Attachment{})
  317. if err != nil {
  318. return err
  319. }
  320. } else {
  321. err = e.
  322. Asc("dataset_id").
  323. In("dataset_id", sortedRels.ID).
  324. And("type = ?", typeCloudBrain).
  325. Find(&attachments, Attachment{})
  326. if err != nil {
  327. return err
  328. }
  329. }
  330. // merge join
  331. var currentIndex = 0
  332. for _, attachment := range attachments {
  333. for sortedRels.ID[currentIndex] < attachment.DatasetID {
  334. currentIndex++
  335. }
  336. fileChunks := make([]*FileChunk, 0, 10)
  337. err = e.
  338. Where("uuid = ?", attachment.UUID).
  339. Find(&fileChunks)
  340. if err != nil {
  341. return err
  342. }
  343. if len(fileChunks) > 0 {
  344. attachment.Md5 = fileChunks[0].Md5
  345. } else {
  346. log.Error("has attachment record, but has no file_chunk record")
  347. attachment.Md5 = "no_record"
  348. }
  349. attachment.CanDel = CanDelAttachment(isSigned, user, attachment)
  350. sortedRels.Rel[currentIndex].Attachments = append(sortedRels.Rel[currentIndex].Attachments, attachment)
  351. }
  352. return
  353. }
  354. // AddDatasetAttachments adds a Dataset attachments
  355. func AddDatasetAttachments(DatasetID int64, attachmentUUIDs []string) (err error) {
  356. // Check attachments
  357. attachments, err := GetAttachmentsByUUIDs(attachmentUUIDs)
  358. if err != nil {
  359. return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %v", attachmentUUIDs, err)
  360. }
  361. for i := range attachments {
  362. attachments[i].DatasetID = DatasetID
  363. // No assign value could be 0, so ignore AllCols().
  364. if _, err = x.ID(attachments[i].ID).Update(attachments[i]); err != nil {
  365. return fmt.Errorf("update attachment [%d]: %v", attachments[i].ID, err)
  366. }
  367. }
  368. return
  369. }
  370. func UpdateDataset(ctx DBContext, rel *Dataset) error {
  371. _, err := ctx.e.ID(rel.ID).AllCols().Update(rel)
  372. return err
  373. }
  374. func IncreaseDatasetUseCount(uuid string) {
  375. IncreaseAttachmentUseNumber(uuid)
  376. attachments, _ := GetAttachmentsByUUIDs(strings.Split(uuid, ";"))
  377. countMap := make(map[int64]int)
  378. for _, attachment := range attachments {
  379. value, ok := countMap[attachment.DatasetID]
  380. if ok {
  381. countMap[attachment.DatasetID] = value + 1
  382. } else {
  383. countMap[attachment.DatasetID] = 1
  384. }
  385. }
  386. for key, value := range countMap {
  387. x.Exec("UPDATE `dataset` SET use_count=use_count+? WHERE id=?", value, key)
  388. }
  389. }
  390. // GetDatasetByID returns Dataset with given ID.
  391. func GetDatasetByID(id int64) (*Dataset, error) {
  392. rel := new(Dataset)
  393. has, err := x.
  394. ID(id).
  395. Get(rel)
  396. if err != nil {
  397. return nil, err
  398. } else if !has {
  399. return nil, ErrDatasetNotExist{id}
  400. }
  401. return rel, nil
  402. }
  403. func GetDatasetByRepo(repo *Repository) (*Dataset, error) {
  404. dataset := &Dataset{RepoID: repo.ID}
  405. has, err := x.Get(dataset)
  406. if err != nil {
  407. return nil, err
  408. }
  409. if has {
  410. return dataset, nil
  411. } else {
  412. return nil, ErrNotExist{repo.ID}
  413. }
  414. }
  415. func GetDatasetStarByUser(user *User) ([]*DatasetStar, error) {
  416. datasetStars := make([]*DatasetStar, 0)
  417. err := x.Cols("id", "uid", "dataset_id", "created_unix").Where("uid=?", user.ID).Find(&datasetStars)
  418. return datasetStars, err
  419. }
  420. func DeleteDataset(datasetID int64, uid int64) error {
  421. var err error
  422. sess := x.NewSession()
  423. defer sess.Close()
  424. if err = sess.Begin(); err != nil {
  425. return err
  426. }
  427. dataset := &Dataset{ID: datasetID, UserID: uid}
  428. has, err := sess.Get(dataset)
  429. if err != nil {
  430. return err
  431. } else if !has {
  432. return errors.New("not found")
  433. }
  434. if cnt, err := sess.ID(datasetID).Delete(new(Dataset)); err != nil {
  435. return err
  436. } else if cnt != 1 {
  437. return errors.New("not found")
  438. }
  439. if err = sess.Commit(); err != nil {
  440. sess.Close()
  441. return fmt.Errorf("Commit: %v", err)
  442. }
  443. return nil
  444. }
  445. func GetOwnerDatasetByID(id int64, user *User) (*Dataset, error) {
  446. dataset, err := GetDatasetByID(id)
  447. if err != nil {
  448. return nil, err
  449. }
  450. if !dataset.IsPrivate() {
  451. return dataset, nil
  452. }
  453. if dataset.IsPrivate() && user != nil && user.ID == dataset.UserID {
  454. return dataset, nil
  455. }
  456. return nil, errors.New("dataset not fount")
  457. }
  458. func IncreaseDownloadCount(datasetID int64) error {
  459. // Update download count.
  460. if _, err := x.Exec("UPDATE `dataset` SET download_times=download_times+1 WHERE id=?", datasetID); err != nil {
  461. return fmt.Errorf("increase dataset count: %v", err)
  462. }
  463. return nil
  464. }
  465. func GetCollaboratorDatasetIdsByUserID(userID int64) []int64 {
  466. var datasets []int64
  467. _ = x.Table("dataset").Join("INNER", "collaboration", "dataset.repo_id = collaboration.repo_id and collaboration.mode>0 and collaboration.user_id=?", userID).
  468. Cols("dataset.id").Find(&datasets)
  469. return datasets
  470. }
  471. func GetTeamDatasetIdsByUserID(userID int64) []int64 {
  472. var datasets []int64
  473. _ = x.Table("dataset").Join("INNER", "team_repo", "dataset.repo_id = team_repo.repo_id").
  474. Join("INNER", "team_user", "team_repo.team_id=team_user.team_id and team_user.uid=?", userID).
  475. Cols("dataset.id").Find(&datasets)
  476. return datasets
  477. }