diff --git a/models/attachment.go b/models/attachment.go index a3fc6fa01..7c95a73dd 100755 --- a/models/attachment.go +++ b/models/attachment.go @@ -51,6 +51,7 @@ type Attachment struct { FileChunk *FileChunk `xorm:"-"` CanDel bool `xorm:"-"` Uploader *User `xorm:"-"` + Md5 string `xorm:"-"` } type AttachmentUsername struct { diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 725686482..5e4ef3e21 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -389,7 +389,7 @@ type JobResultPayload struct { AppProgress string `json:"appProgress"` AppTrackingURL string `json:"appTrackingUrl"` AppLaunchedTime int64 `json:"appLaunchedTime"` - AppCompletedTime int64 `json:"appCompletedTime"` + AppCompletedTime interface{} `json:"appCompletedTime"` AppExitCode int `json:"appExitCode"` AppExitDiagnostics string `json:"appExitDiagnostics"` AppExitType interface{} `json:"appExitType"` @@ -1371,6 +1371,16 @@ func getRepoCloudBrain(cb *Cloudbrain) (*Cloudbrain, error) { return cb, nil } +func getRepoCloudBrainWithDeleted(cb *Cloudbrain) (*Cloudbrain, error) { + has, err := x.Unscoped().Get(cb) + if err != nil { + return nil, err + } else if !has { + return nil, ErrJobNotExist{} + } + return cb, nil +} + func GetRepoCloudBrainByJobID(repoID int64, jobID string) (*Cloudbrain, error) { cb := &Cloudbrain{JobID: jobID, RepoID: repoID} return getRepoCloudBrain(cb) @@ -1387,6 +1397,12 @@ func GetCloudbrainByID(id string) (*Cloudbrain, error) { return getRepoCloudBrain(cb) } +func GetCloudbrainByIDWithDeleted(id string) (*Cloudbrain, error) { + idInt64, _ := strconv.ParseInt(id, 10, 64) + cb := &Cloudbrain{ID: idInt64} + return getRepoCloudBrainWithDeleted(cb) +} + func GetCloudbrainByJobIDAndVersionName(jobID string, versionName string) (*Cloudbrain, error) { cb := &Cloudbrain{JobID: jobID, VersionName: versionName} return getRepoCloudBrain(cb) diff --git a/models/dataset.go b/models/dataset.go index af47c53fe..95800100c 100755 --- a/models/dataset.go +++ b/models/dataset.go @@ -1,6 +1,7 @@ package models import ( + "code.gitea.io/gitea/modules/log" "errors" "fmt" "sort" @@ -62,19 +63,20 @@ func (datasets DatasetList) loadAttributes(e Engine) error { } set := make(map[int64]struct{}) + userIdSet := make(map[int64]struct{}) datasetIDs := make([]int64, len(datasets)) for i := range datasets { - set[datasets[i].UserID] = struct{}{} + userIdSet[datasets[i].UserID] = struct{}{} set[datasets[i].RepoID] = struct{}{} datasetIDs[i] = datasets[i].ID } // Load owners. - users := make(map[int64]*User, len(set)) + users := make(map[int64]*User, len(userIdSet)) repos := make(map[int64]*Repository, len(set)) if err := e. Where("id > 0"). - In("id", keysInt64(set)). + In("id", keysInt64(userIdSet)). Find(&users); err != nil { return fmt.Errorf("find users: %v", err) } @@ -139,20 +141,7 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { var cond = builder.NewCond() cond = cond.And(builder.Neq{"dataset.status": DatasetStatusDeleted}) - if len(opts.Keyword) > 0 { - cond = cond.And(builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) - } - - if len(opts.Category) > 0 { - cond = cond.And(builder.Eq{"dataset.category": opts.Category}) - } - - if len(opts.Task) > 0 { - cond = cond.And(builder.Eq{"dataset.task": opts.Task}) - } - if len(opts.License) > 0 { - cond = cond.And(builder.Eq{"dataset.license": opts.License}) - } + cond = generateFilterCond(opts, cond) if opts.RepoID > 0 { cond = cond.And(builder.Eq{"dataset.repo_id": opts.RepoID}) @@ -162,14 +151,12 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic}) cond = cond.And(builder.Eq{"attachment.is_private": false}) if opts.OwnerID > 0 { - if len(opts.Keyword) == 0 { - cond = cond.Or(builder.Eq{"repository.owner_id": opts.OwnerID}) - } else { - subCon := builder.NewCond() - subCon = subCon.And(builder.Eq{"repository.owner_id": opts.OwnerID}, builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) - cond = cond.Or(subCon) - - } + + subCon := builder.NewCond() + subCon = subCon.And(builder.Eq{"repository.owner_id": opts.OwnerID}) + subCon = generateFilterCond(opts, subCon) + cond = cond.Or(subCon) + } } else if opts.OwnerID > 0 { cond = cond.And(builder.Eq{"repository.owner_id": opts.OwnerID}) @@ -182,6 +169,25 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { return cond } +func generateFilterCond(opts *SearchDatasetOptions, cond builder.Cond) builder.Cond { + if len(opts.Keyword) > 0 { + cond = cond.And(builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) + } + + if len(opts.Category) > 0 { + cond = cond.And(builder.Eq{"dataset.category": opts.Category}) + } + + if len(opts.Task) > 0 { + cond = cond.And(builder.Eq{"dataset.task": opts.Task}) + } + if len(opts.License) > 0 { + cond = cond.And(builder.Eq{"dataset.license": opts.License}) + } + + return cond +} + func SearchDatasetByCondition(opts *SearchDatasetOptions, cond builder.Cond) (DatasetList, int64, error) { if opts.Page <= 0 { opts.Page = 1 @@ -292,7 +298,13 @@ func getDatasetAttachments(e Engine, typeCloudBrain int, isSigned bool, user *Us if err != nil { return err } - attachment.FileChunk = fileChunks[0] + if len(fileChunks) > 0 { + attachment.Md5 = fileChunks[0].Md5 + } else { + log.Error("has attachment record, but has no file_chunk record") + attachment.Md5 = "no_record" + } + attachment.CanDel = CanDelAttachment(isSigned, user, attachment) sortedRels.Rel[currentIndex].Attachments = append(sortedRels.Rel[currentIndex].Attachments, attachment) } @@ -348,7 +360,7 @@ func GetDatasetByRepo(repo *Repository) (*Dataset, error) { if has { return dataset, nil } else { - return nil, errors.New("Not Found") + return nil, ErrNotExist{repo.ID} } } diff --git a/models/dbsql/dataset_foreigntable_for_es.sql b/models/dbsql/dataset_foreigntable_for_es.sql index 815b89d02..02e5f0ddf 100644 --- a/models/dbsql/dataset_foreigntable_for_es.sql +++ b/models/dbsql/dataset_foreigntable_for_es.sql @@ -1,4 +1,17 @@ +DELETE FROM public.dataset_es; DROP FOREIGN TABLE public.dataset_es; +DROP TRIGGER IF EXISTS es_insert_dataset on public.dataset; +DROP FUNCTION public.insert_dataset_data(); +DROP TRIGGER IF EXISTS es_udpate_dataset_file_name on public.attachment; +DROP FUNCTION public.udpate_dataset_file_name; + +DROP TRIGGER IF EXISTS es_update_dataset on public.dataset; +DROP FUNCTION public.update_dataset; + +DROP TRIGGER IF EXISTS es_delete_dataset on public.dataset; +DROP FUNCTION public.delete_dataset; + + CREATE FOREIGN TABLE public.dataset_es ( id bigint NOT NULL, diff --git a/models/dbsql/issue_foreigntable_for_es.sql b/models/dbsql/issue_foreigntable_for_es.sql index bb5c1634e..d6a16cd27 100644 --- a/models/dbsql/issue_foreigntable_for_es.sql +++ b/models/dbsql/issue_foreigntable_for_es.sql @@ -1,4 +1,15 @@ +delete from public.issue_es; DROP FOREIGN TABLE public.issue_es; +DROP TRIGGER IF EXISTS es_insert_issue on public.issue; +DROP FUNCTION public.insert_issue_data; +DROP TRIGGER IF EXISTS es_udpate_issue_comment on public.comment; +DROP FUNCTION udpate_issue_comment; +DROP TRIGGER IF EXISTS es_update_issue on public.issue; +DROP FUNCTION public.update_issue; +DROP TRIGGER IF EXISTS es_delete_issue on public.issue; +DROP FUNCTION public.delete_issue; + + CREATE FOREIGN TABLE public.issue_es ( id bigint NOT NULL, @@ -182,6 +193,7 @@ $def$ name=NEW.name, is_closed=NEW.is_closed, num_comments=NEW.num_comments, + updated_unix=NEW.updated_unix, comment=(select array_to_string(array_agg(content order by created_unix desc),',') from public.comment where issue_id=NEW.id) where id=NEW.id; return new; diff --git a/models/dbsql/repo_foreigntable_for_es.sql b/models/dbsql/repo_foreigntable_for_es.sql index f51155ccf..7e06fd99e 100644 --- a/models/dbsql/repo_foreigntable_for_es.sql +++ b/models/dbsql/repo_foreigntable_for_es.sql @@ -1,5 +1,18 @@ -- 要处理项目从私有变为公有,并且从公有变成私有的情况 +DELETE FROM public.repository_es; DROP FOREIGN table if exists public.repository_es; +DROP TRIGGER IF EXISTS es_insert_repository on public.repository; +DROP FUNCTION public.insert_repository_data; +DROP TRIGGER IF EXISTS es_update_repository on public.repository; +DROP FUNCTION public.update_repository; + +DROP TRIGGER IF EXISTS es_delete_repository on public.repository; +DROP FUNCTION public.delete_repository; + +DROP TRIGGER IF EXISTS es_udpate_repository_lang on public.language_stat; +DROP FUNCTION public.udpate_repository_lang; + + CREATE FOREIGN TABLE public.repository_es ( id bigint NOT NULL, owner_id bigint, diff --git a/models/dbsql/user_foreigntable_for_es.sql b/models/dbsql/user_foreigntable_for_es.sql index c3d21b92a..5d77757f0 100644 --- a/models/dbsql/user_foreigntable_for_es.sql +++ b/models/dbsql/user_foreigntable_for_es.sql @@ -1,4 +1,13 @@ +DELETE FROM public.user_es; DROP FOREIGN table if exists public.user_es; +DROP TRIGGER IF EXISTS es_insert_user on public.user; +DROP FUNCTION public.insert_user_data; +DROP TRIGGER IF EXISTS es_update_user on public.user; +DROP FUNCTION public.update_user; + +DROP TRIGGER IF EXISTS es_delete_user on public.user; +DROP FUNCTION public.delete_user; + CREATE FOREIGN TABLE public.user_es ( id bigint NOT NULL , diff --git a/models/models.go b/models/models.go index 362d46618..2ec61941d 100755 --- a/models/models.go +++ b/models/models.go @@ -138,6 +138,7 @@ func init() { new(OfficialTag), new(OfficialTagRepos), new(WechatBindLog), + new(OrgStatistic), new(SearchRecord), ) @@ -153,6 +154,8 @@ func init() { new(UserBusinessAnalysisCurrentWeek), new(UserBusinessAnalysisYesterday), new(UserLoginLog), + new(UserMetrics), + new(UserAnalysisPara), ) gonicNames := []string{"SSL", "UID"} diff --git a/models/org.go b/models/org.go index 85fb157ae..8b3e60ef8 100755 --- a/models/org.go +++ b/models/org.go @@ -8,6 +8,7 @@ package models import ( "fmt" "os" + "strconv" "strings" "code.gitea.io/gitea/modules/log" @@ -19,6 +20,17 @@ import ( "xorm.io/xorm" ) +type OrgStatistic struct { + ID int64 `xorm:"pk autoincr"` + OrgID int64 `xorm:"UNIQUE"` + NumScore int `xorm:"INDEX NOT NULL DEFAULT 0"` +} + +type OrgScore struct { + *User + Score string +} + // IsOwnedBy returns true if given user is in the owner team. func (org *User) IsOwnedBy(uid int64) (bool, error) { return IsOrganizationOwner(org.ID, uid) @@ -135,6 +147,93 @@ func (org *User) RemoveOrgRepo(repoID int64) error { return org.removeOrgRepo(x, repoID) } +func UpdateOrgStatistics() { + ids, err := GetOrganizationsId() + if err != nil { + return + } + for _, id := range ids { + org := User{ID: id} + orgStat := &OrgStatistic{OrgID: id} + numScore, err := org.getOrgStatistics() + if err == nil { + has, _ := x.Get(orgStat) + + orgStat.NumScore = numScore + if has { + x.ID(orgStat.ID).Cols("num_score").Update(&orgStat) + } else { + x.Insert(orgStat) + } + + } + } + +} + +func (org *User) getOrgStatistics() (int, error) { + count, err := getRepositoryCount(x, org) + if err != nil { + return 0, err + } + + err = org.GetRepositories(ListOptions{int(count), 1}) + + if err != nil { + return 0, err + } + var numScore = 0 + for _, repo := range org.Repos { + + numScore += int(getOpenIByRepoId(repo.ID)) + } + + return numScore, nil + +} + +func FindTopNStarsOrgs(n int) ([]*OrgScore, error) { + sql := "select a.id,sum(b.num_stars) score from \"user\" a ,repository b where a.id=b.owner_id and a.type=1 group by a.id order by score desc limit " + strconv.Itoa(n) + + return findTopNOrgs(sql) +} +func FindTopNMembersOrgs(n int) ([]*OrgScore, error) { + sql := "select id, count(user_id) score from" + + " (select org_id as id, uid as user_id from org_user " + + "union select a.id,b.user_id from \"user\" a,collaboration b,repository c " + + "where a.type=1 and a.id=c.owner_id and b.repo_id=c.id) d " + + "group by id order by score desc limit " + strconv.Itoa(n) + + return findTopNOrgs(sql) +} + +func FindTopNOpenIOrgs(n int) ([]*OrgScore, error) { + sql := "select org_id id,num_score score from org_statistic order by num_score desc limit 10" + strconv.Itoa(n) + + return findTopNOrgs(sql) +} + +func findTopNOrgs(sql string) ([]*OrgScore, error) { + resutls, err := x.QueryString(sql) + + if err != nil { + return nil, err + } + var orgScore []*OrgScore + for _, record := range resutls { + id, _ := strconv.ParseInt(record["id"], 10, 64) + user, err := getUserByID(x, id) + if err != nil { + continue + } + orgScore = append(orgScore, &OrgScore{user, record["score"]}) + + } + + return orgScore, nil + +} + // CreateOrganization creates record of a new organization. func CreateOrganization(org, owner *User) (err error) { if !owner.CanCreateOrganization() { diff --git a/models/repo.go b/models/repo.go index b5d4921e4..25bfb4a74 100755 --- a/models/repo.go +++ b/models/repo.go @@ -1607,14 +1607,16 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e } dataset, err := GetDatasetByRepo(repo) - if err != nil { + if err != nil && !IsErrNotExist(err) { return err } - _, err = e.Where("dataset_id = ?", dataset.ID).Cols("is_private").Update(&Attachment{ - IsPrivate: true, - }) - if err != nil { - return err + if dataset != nil { + _, err = e.Where("dataset_id = ?", dataset.ID).Cols("is_private").Update(&Attachment{ + IsPrivate: true, + }) + if err != nil { + return err + } } } else { diff --git a/models/repo_statistic.go b/models/repo_statistic.go index a9e9593af..4f8f13ed7 100755 --- a/models/repo_statistic.go +++ b/models/repo_statistic.go @@ -73,6 +73,16 @@ func (repo *RepoStatistic) DisplayName() string { return repo.Alias } +func getOpenIByRepoId(repoId int64) float64 { + repoStatistic := new(RepoStatistic) + has, err := xStatistic.Cols("radar_total").Where("repo_id=?", repoId).Desc("id").Limit(1).Get(repoStatistic) + if !has || err != nil { + return 0 + } + return repoStatistic.RadarTotal + +} + func DeleteRepoStatDaily(date string) error { sess := xStatistic.NewSession() defer sess.Close() diff --git a/models/user.go b/models/user.go index f72462051..71885aeb1 100755 --- a/models/user.go +++ b/models/user.go @@ -2104,6 +2104,12 @@ func GetOrganizationsCount() (int64, error) { } +func GetOrganizationsId() ([]int64, error) { + var ids []int64 + err := x.Table("user").Where("type=1").Cols("id").Find(&ids) + return ids, err +} + func GetBlockChainUnSuccessUsers() ([]*User, error) { users := make([]*User, 0, 10) err := x.Where("public_key = ''"). diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go index 288762161..65ce642d5 100644 --- a/models/user_business_analysis.go +++ b/models/user_business_analysis.go @@ -6,7 +6,6 @@ import ( "strconv" "time" - "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/timeutil" "xorm.io/builder" @@ -81,6 +80,19 @@ type UserBusinessAnalysisAll struct { Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + //cloudbraintask + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysis struct { @@ -146,6 +158,18 @@ type UserBusinessAnalysis struct { Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisQueryOptions struct { @@ -183,6 +207,29 @@ func getLastCountDate() int64 { return pageStartTime.Unix() } +func QueryMetrics(start int64, end int64) ([]*UserMetrics, int64) { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + userMetricsList := make([]*UserMetrics, 0) + if err := statictisSess.Table(new(UserMetrics)).Where("count_date >" + fmt.Sprint(start) + " and count_date<" + fmt.Sprint(end)).OrderBy("count_date desc"). + Find(&userMetricsList); err != nil { + return nil, 0 + } + return userMetricsList, int64(len(userMetricsList)) +} + +func QueryRankList(key string, tableName string, limit int) ([]*UserBusinessAnalysisAll, int64) { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + + userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0) + if err := statictisSess.Table(tableName).OrderBy(key+" desc,id desc").Limit(limit, 0). + Find(&userBusinessAnalysisAllList); err != nil { + return nil, 0 + } + return userBusinessAnalysisAllList, int64(len(userBusinessAnalysisAllList)) +} + func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, queryObj interface{}, userName string) ([]*UserBusinessAnalysisAll, int64) { statictisSess := xStatistic.NewSession() defer statictisSess.Close() @@ -334,6 +381,7 @@ func QueryUserStaticDataPage(opts *UserBusinessAnalysisQueryOptions) ([]*UserBus resultMap[userRecord.ID].WatchedCount += userRecord.WatchedCount resultMap[userRecord.ID].CommitCodeSize += userRecord.CommitCodeSize resultMap[userRecord.ID].CommitDatasetSize += userRecord.CommitDatasetSize + resultMap[userRecord.ID].CommitDatasetNum += userRecord.CommitDatasetNum resultMap[userRecord.ID].CommitModelCount += userRecord.CommitModelCount resultMap[userRecord.ID].SolveIssueCount += userRecord.SolveIssueCount resultMap[userRecord.ID].EncyclopediasCount += userRecord.EncyclopediasCount @@ -353,7 +401,7 @@ func QueryUserStaticDataPage(opts *UserBusinessAnalysisQueryOptions) ([]*UserBus return userBusinessAnalysisReturnList, count } -func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats, tableName string, pageStartTime time.Time, pageEndTime time.Time) { +func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageStartTime time.Time, pageEndTime time.Time, userMetrics map[string]int) { sess := x.NewSession() defer sess.Close() @@ -379,14 +427,15 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s FocusRepoCountMap := queryWatch(start_unix, end_unix) StarRepoCountMap := queryStar(start_unix, end_unix) WatchedCountMap := queryFollow(start_unix, end_unix) - - CommitDatasetSizeMap := queryDatasetSize(start_unix, end_unix) + CommitCodeSizeMap := queryCommitCodeSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(startTime.Unix(), end_unix) - + CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) + AiModelManageMap := queryUserModel(start_unix, end_unix) DataDate := currentTimeNow.Format("2006-01-02") + " 00:01" cond := "type != 1 and is_active=true" @@ -395,6 +444,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s log.Info("query user error. return.") return } + ParaWeight := getParaWeight() var indexTotal int64 indexTotal = 0 insertCount := 0 @@ -412,84 +462,22 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s dateRecordAll.Name = userRecord.Name dateRecordAll.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) dateRecordAll.DataDate = DataDate - - if _, ok := CodeMergeCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CodeMergeCount = 0 - } else { - dateRecordAll.CodeMergeCount = CodeMergeCountMap[dateRecordAll.ID] - } - - if _, ok := CommitCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommitCount = 0 - } else { - dateRecordAll.CommitCount = CommitCountMap[dateRecordAll.ID] - } - - if _, ok := IssueCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.IssueCount = 0 - } else { - dateRecordAll.IssueCount = IssueCountMap[dateRecordAll.ID] - } - - if _, ok := CommentCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommentCount = 0 - } else { - dateRecordAll.CommentCount = CommentCountMap[dateRecordAll.ID] - } - - if _, ok := FocusRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.FocusRepoCount = 0 - } else { - dateRecordAll.FocusRepoCount = FocusRepoCountMap[dateRecordAll.ID] - } - - if _, ok := StarRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.StarRepoCount = 0 - } else { - dateRecordAll.StarRepoCount = StarRepoCountMap[dateRecordAll.ID] - } - - if _, ok := WatchedCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.WatchedCount = 0 - } else { - dateRecordAll.WatchedCount = WatchedCountMap[dateRecordAll.ID] - } - - if _, ok := CommitCodeSizeMap[dateRecordAll.Email]; !ok { - dateRecordAll.CommitCodeSize = 0 - } else { - dateRecordAll.CommitCodeSize = int(CommitCodeSizeMap[dateRecordAll.Email].CommitLines) - } - - if _, ok := CommitDatasetSizeMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommitDatasetSize = 0 - } else { - dateRecordAll.CommitDatasetSize = CommitDatasetSizeMap[dateRecordAll.ID] - } - - if _, ok := SolveIssueCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.SolveIssueCount = 0 - } else { - dateRecordAll.SolveIssueCount = SolveIssueCountMap[dateRecordAll.ID] - } - - if _, ok := wikiCountMap[dateRecordAll.Name]; !ok { - dateRecordAll.EncyclopediasCount = 0 - } else { - dateRecordAll.EncyclopediasCount = wikiCountMap[dateRecordAll.Name] - } - - if _, ok := CreateRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CreateRepoCount = 0 - } else { - dateRecordAll.CreateRepoCount = CreateRepoCountMap[dateRecordAll.ID] - } - - if _, ok := LoginCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.LoginCount = 0 - } else { - dateRecordAll.LoginCount = LoginCountMap[dateRecordAll.ID] - } + dateRecordAll.UserLocation = userRecord.Location + + dateRecordAll.CodeMergeCount = getMapValue(dateRecordAll.ID, CodeMergeCountMap) + dateRecordAll.CommitCount = getMapValue(dateRecordAll.ID, CommitCountMap) + dateRecordAll.IssueCount = getMapValue(dateRecordAll.ID, IssueCountMap) + dateRecordAll.CommentCount = getMapValue(dateRecordAll.ID, CommentCountMap) + dateRecordAll.FocusRepoCount = getMapValue(dateRecordAll.ID, FocusRepoCountMap) + dateRecordAll.StarRepoCount = getMapValue(dateRecordAll.ID, StarRepoCountMap) + dateRecordAll.WatchedCount = getMapValue(dateRecordAll.ID, WatchedCountMap) + dateRecordAll.CommitCodeSize = getMapValue(dateRecordAll.ID, CommitCodeSizeMap) + dateRecordAll.CommitDatasetSize = getMapValue(dateRecordAll.ID, CommitDatasetSizeMap) + dateRecordAll.CommitDatasetNum = getMapValue(dateRecordAll.ID, CommitDatasetNumMap) + dateRecordAll.SolveIssueCount = getMapValue(dateRecordAll.ID, SolveIssueCountMap) + dateRecordAll.EncyclopediasCount = getMapKeyStringValue(dateRecordAll.Name, wikiCountMap) + dateRecordAll.CreateRepoCount = getMapValue(dateRecordAll.ID, CreateRepoCountMap) + dateRecordAll.LoginCount = getMapValue(dateRecordAll.ID, LoginCountMap) if _, ok := OpenIIndexMap[dateRecordAll.ID]; !ok { dateRecordAll.OpenIIndex = 0 @@ -497,7 +485,15 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s dateRecordAll.OpenIIndex = OpenIIndexMap[dateRecordAll.ID] } - dateRecordAll.CommitModelCount = 0 + dateRecordAll.CloudBrainTaskNum = getMapValue(dateRecordAll.ID, CloudBrainTaskMap) + dateRecordAll.GpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuDebugJob", CloudBrainTaskItemMap) + dateRecordAll.NpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuDebugJob", CloudBrainTaskItemMap) + dateRecordAll.GpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuTrainJob", CloudBrainTaskItemMap) + dateRecordAll.NpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuTrainJob", CloudBrainTaskItemMap) + dateRecordAll.NpuInferenceJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuInferenceJob", CloudBrainTaskItemMap) + dateRecordAll.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap) + dateRecordAll.CommitModelCount = getMapValue(dateRecordAll.ID, AiModelManageMap) + dateRecordAll.UserIndex = getUserIndexFromAnalysisAll(dateRecordAll, ParaWeight) dateRecordBatch = append(dateRecordBatch, dateRecordAll) if len(dateRecordBatch) >= BATCH_INSERT_SIZE { @@ -508,6 +504,11 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s } dateRecordBatch = make([]UserBusinessAnalysisAll, 0) } + if tableName == "user_business_analysis_all" { + if dateRecordAll.UserIndex > 0 || dateRecordAll.LoginCount > 0 { + userMetrics["TotalHasActivityUser"] = getMapKeyStringValue("TotalHasActivityUser", userMetrics) + 1 + } + } } indexTotal += PAGE_SIZE if indexTotal >= count { @@ -529,7 +530,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static insertBatchSql := "INSERT INTO public." + tableName + "(id, count_date, code_merge_count, commit_count, issue_count, comment_count, focus_repo_count, star_repo_count, watched_count, gitea_age_month, commit_code_size, commit_dataset_size, " + - "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date) " + + "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location) " + "VALUES" for i, record := range dateRecords { @@ -537,7 +538,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static ", " + fmt.Sprint(record.IssueCount) + ", " + fmt.Sprint(record.CommentCount) + ", " + fmt.Sprint(record.FocusRepoCount) + ", " + fmt.Sprint(record.StarRepoCount) + ", " + fmt.Sprint(record.WatchedCount) + ", " + fmt.Sprint(record.GiteaAgeMonth) + ", " + fmt.Sprint(record.CommitCodeSize) + ", " + fmt.Sprint(record.CommitDatasetSize) + ", " + fmt.Sprint(record.CommitModelCount) + ", " + fmt.Sprint(record.SolveIssueCount) + ", " + fmt.Sprint(record.EncyclopediasCount) + ", " + fmt.Sprint(record.RegistDate) + - ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "')" + ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "'," + fmt.Sprint(record.CloudBrainTaskNum) + "," + fmt.Sprint(record.GpuDebugJob) + "," + fmt.Sprint(record.NpuDebugJob) + "," + fmt.Sprint(record.GpuTrainJob) + "," + fmt.Sprint(record.NpuTrainJob) + "," + fmt.Sprint(record.NpuInferenceJob) + "," + fmt.Sprint(record.GpuBenchMarkJob) + "," + fmt.Sprint(record.CloudBrainRunTime) + "," + fmt.Sprint(record.CommitDatasetNum) + "," + fmt.Sprint(record.UserIndex) + ",'" + record.UserLocation + "')" if i < (len(dateRecords) - 1) { insertBatchSql += "," } @@ -546,36 +547,36 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static statictisSess.Exec(insertBatchSql) } -func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats) { +func RefreshUserStaticAllTabel(wikiCountMap map[string]int, userMetrics map[string]int) { currentTimeNow := time.Now() pageStartTime := time.Date(2021, 11, 5, 0, 0, 0, 0, currentTimeNow.Location()) pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_all", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_all", pageStartTime, pageEndTime, userMetrics) log.Info("refresh all data finished.") pageStartTime = time.Date(currentTimeNow.Year(), 1, 1, 0, 0, 0, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_year", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_year", pageStartTime, pageEndTime, userMetrics) thisMonth := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 0, 0, 0, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_month", thisMonth, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_month", thisMonth, pageEndTime, userMetrics) offset := int(time.Monday - currentTimeNow.Weekday()) if offset > 0 { offset = -6 } pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, offset) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_week", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_week", pageStartTime, pageEndTime, userMetrics) pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -30) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_last30_day", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_last30_day", pageStartTime, pageEndTime, userMetrics) pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -1) pageEndTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_yesterday", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_yesterday", pageStartTime, pageEndTime, userMetrics) pageStartTime = thisMonth.AddDate(0, -1, 0) pageEndTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_last_month", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_last_month", pageStartTime, pageEndTime, userMetrics) } @@ -613,12 +614,13 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, } else { log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap))) } - CommitDatasetSizeMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix) - + CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) + AiModelManageMap := queryUserModel(start_unix, end_unix) statictisSess := xStatistic.NewSession() defer statictisSess.Close() @@ -628,6 +630,9 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, log.Info("query user error. return.") return err } + + ParaWeight := getParaWeight() + userMetrics := make(map[string]int) var indexTotal int64 indexTotal = 0 for { @@ -648,47 +653,14 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.Name = userRecord.Name dateRecord.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) dateRecord.DataDate = DataDate - if _, ok := CodeMergeCountMap[dateRecord.ID]; !ok { - dateRecord.CodeMergeCount = 0 - } else { - dateRecord.CodeMergeCount = CodeMergeCountMap[dateRecord.ID] - } - - if _, ok := CommitCountMap[dateRecord.ID]; !ok { - dateRecord.CommitCount = 0 - } else { - dateRecord.CommitCount = CommitCountMap[dateRecord.ID] - } - - if _, ok := IssueCountMap[dateRecord.ID]; !ok { - dateRecord.IssueCount = 0 - } else { - dateRecord.IssueCount = IssueCountMap[dateRecord.ID] - } - - if _, ok := CommentCountMap[dateRecord.ID]; !ok { - dateRecord.CommentCount = 0 - } else { - dateRecord.CommentCount = CommentCountMap[dateRecord.ID] - } - - if _, ok := FocusRepoCountMap[dateRecord.ID]; !ok { - dateRecord.FocusRepoCount = 0 - } else { - dateRecord.FocusRepoCount = FocusRepoCountMap[dateRecord.ID] - } - if _, ok := StarRepoCountMap[dateRecord.ID]; !ok { - dateRecord.StarRepoCount = 0 - } else { - dateRecord.StarRepoCount = StarRepoCountMap[dateRecord.ID] - } - - if _, ok := WatchedCountMap[dateRecord.ID]; !ok { - dateRecord.WatchedCount = 0 - } else { - dateRecord.WatchedCount = WatchedCountMap[dateRecord.ID] - } + dateRecord.CodeMergeCount = getMapValue(dateRecord.ID, CodeMergeCountMap) + dateRecord.CommitCount = getMapValue(dateRecord.ID, CommitCountMap) + dateRecord.IssueCount = getMapValue(dateRecord.ID, IssueCountMap) + dateRecord.CommentCount = getMapValue(dateRecord.ID, CommentCountMap) + dateRecord.FocusRepoCount = getMapValue(dateRecord.ID, FocusRepoCountMap) + dateRecord.StarRepoCount = getMapValue(dateRecord.ID, StarRepoCountMap) + dateRecord.WatchedCount = getMapValue(dateRecord.ID, WatchedCountMap) if _, ok := CommitCodeSizeMap[dateRecord.Email]; !ok { dateRecord.CommitCodeSize = 0 @@ -696,35 +668,15 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.CommitCodeSize = int(CommitCodeSizeMap[dateRecord.Email].CommitLines) } - if _, ok := CommitDatasetSizeMap[dateRecord.ID]; !ok { - dateRecord.CommitDatasetSize = 0 - } else { - dateRecord.CommitDatasetSize = CommitDatasetSizeMap[dateRecord.ID] - } + dateRecord.CommitDatasetSize = getMapValue(dateRecord.ID, CommitDatasetSizeMap) + dateRecord.CommitDatasetNum = getMapValue(dateRecord.ID, CommitDatasetNumMap) + dateRecord.SolveIssueCount = getMapValue(dateRecord.ID, SolveIssueCountMap) - if _, ok := SolveIssueCountMap[dateRecord.ID]; !ok { - dateRecord.SolveIssueCount = 0 - } else { - dateRecord.SolveIssueCount = SolveIssueCountMap[dateRecord.ID] - } + dateRecord.EncyclopediasCount = getMapKeyStringValue(dateRecord.Name, wikiCountMap) - if _, ok := wikiCountMap[dateRecord.Name]; !ok { - dateRecord.EncyclopediasCount = 0 - } else { - dateRecord.EncyclopediasCount = wikiCountMap[dateRecord.Name] - } + dateRecord.CreateRepoCount = getMapValue(dateRecord.ID, CreateRepoCountMap) - if _, ok := CreateRepoCountMap[dateRecord.ID]; !ok { - dateRecord.CreateRepoCount = 0 - } else { - dateRecord.CreateRepoCount = CreateRepoCountMap[dateRecord.ID] - } - - if _, ok := LoginCountMap[dateRecord.ID]; !ok { - dateRecord.LoginCount = 0 - } else { - dateRecord.LoginCount = LoginCountMap[dateRecord.ID] - } + dateRecord.LoginCount = getMapValue(dateRecord.ID, LoginCountMap) if _, ok := OpenIIndexMap[dateRecord.ID]; !ok { dateRecord.OpenIIndex = 0 @@ -732,8 +684,17 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.OpenIIndex = OpenIIndexMap[dateRecord.ID] } - dateRecord.CommitModelCount = 0 - + dateRecord.CloudBrainTaskNum = getMapValue(dateRecord.ID, CloudBrainTaskMap) + dateRecord.GpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuDebugJob", CloudBrainTaskItemMap) + dateRecord.NpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuDebugJob", CloudBrainTaskItemMap) + dateRecord.GpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuTrainJob", CloudBrainTaskItemMap) + dateRecord.NpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuTrainJob", CloudBrainTaskItemMap) + dateRecord.NpuInferenceJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuInferenceJob", CloudBrainTaskItemMap) + dateRecord.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap) + dateRecord.CloudBrainRunTime = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_CloudBrainRunTime", CloudBrainTaskItemMap) + dateRecord.CommitModelCount = getMapValue(dateRecord.ID, AiModelManageMap) + dateRecord.UserIndex = getUserIndex(dateRecord, ParaWeight) + setUserMetrics(userMetrics, userRecord, start_unix, end_unix, dateRecord) _, err = statictisSess.Insert(&dateRecord) if err != nil { log.Info("insert daterecord failed." + err.Error()) @@ -747,11 +708,142 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, } } - RefreshUserStaticAllTabel(wikiCountMap, CommitCodeSizeMap) + RefreshUserStaticAllTabel(wikiCountMap, userMetrics) + + //insert userMetrics table + var useMetrics UserMetrics + useMetrics.CountDate = CountDate.Unix() + statictisSess.Delete(&useMetrics) + + useMetrics.ActivateRegistUser = getMapKeyStringValue("ActivateRegistUser", userMetrics) + useMetrics.HasActivityUser = getMapKeyStringValue("HasActivityUser", userMetrics) + useMetrics.NotActivateRegistUser = getMapKeyStringValue("NotActivateRegistUser", userMetrics) + useMetrics.TotalActivateRegistUser = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) + useMetrics.TotalHasActivityUser = getMapKeyStringValue("TotalHasActivityUser", userMetrics) + statictisSess.Insert(&useMetrics) return nil } +func setUserMetrics(userMetrics map[string]int, user *User, start_time int64, end_time int64, dateRecord UserBusinessAnalysis) { + //ActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //NotActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //HasActivityUser int `xorm:"NOT NULL DEFAULT 0"` + //TotalActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //TotalHasActivityUser + regist_time := user.CreatedUnix.AsTime().Unix() + if regist_time >= start_time && regist_time <= end_time { + if user.IsActive { + userMetrics["ActivateRegistUser"] = getMapKeyStringValue("ActivateRegistUser", userMetrics) + 1 + } else { + userMetrics["NotActivateRegistUser"] = getMapKeyStringValue("NotActivateRegistUser", userMetrics) + 1 + } + } + if user.IsActive { + userMetrics["TotalActivateRegistUser"] = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) + 1 + } + + if dateRecord.UserIndex > 0 || dateRecord.LoginCount > 0 { + userMetrics["HasActivityUser"] = getMapKeyStringValue("HasActivityUser", userMetrics) + 1 + } + +} + +func getParaWeight() map[string]float64 { + result := make(map[string]float64) + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + statictisSess.Select("*").Table(new(UserAnalysisPara)) + paraList := make([]*UserAnalysisPara, 0) + statictisSess.Find(¶List) + for _, paraRecord := range paraList { + result[paraRecord.Key] = paraRecord.Value + } + return result +} + +func getUserIndexFromAnalysisAll(dateRecord UserBusinessAnalysisAll, ParaWeight map[string]float64) float64 { + var result float64 + // PR数 0.20 + // commit数 0.20 + // 提出任务数 0.20 + // 评论数 0.20 + // 关注项目数 0.10 + // 点赞项目数 0.10 + // 登录次数 0.10 + result = float64(dateRecord.CodeMergeCount) * getParaWeightValue("CodeMergeCount", ParaWeight, 0.2) + result += float64(dateRecord.CommitCount) * getParaWeightValue("CommitCount", ParaWeight, 0.2) + log.Info("1 result=" + fmt.Sprint(result)) + result += float64(dateRecord.IssueCount) * getParaWeightValue("IssueCount", ParaWeight, 0.2) + result += float64(dateRecord.CommentCount) * getParaWeightValue("CommentCount", ParaWeight, 0.2) + result += float64(dateRecord.FocusRepoCount) * getParaWeightValue("FocusRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.StarRepoCount) * getParaWeightValue("StarRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.LoginCount) * getParaWeightValue("LoginCount", ParaWeight, 0.1) + result += float64(dateRecord.WatchedCount) * getParaWeightValue("WatchedCount", ParaWeight, 0.3) + result += float64(dateRecord.CommitCodeSize) * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1) + result += float64(dateRecord.SolveIssueCount) * getParaWeightValue("SolveIssueCount", ParaWeight, 0.2) + result += float64(dateRecord.EncyclopediasCount) * getParaWeightValue("EncyclopediasCount", ParaWeight, 0.1) + result += float64(dateRecord.CreateRepoCount) * getParaWeightValue("CreateRepoCount", ParaWeight, 0.05) + result += float64(dateRecord.CloudBrainTaskNum) * getParaWeightValue("CloudBrainTaskNum", ParaWeight, 0.3) + result += float64(dateRecord.CommitModelCount) * getParaWeightValue("CommitModelCount", ParaWeight, 0.2) + result += dateRecord.OpenIIndex * getParaWeightValue("OpenIIndex", ParaWeight, 0.1) + + return result +} + +func getUserIndex(dateRecord UserBusinessAnalysis, ParaWeight map[string]float64) float64 { + var result float64 + // PR数 0.20 + // commit数 0.20 + // 提出任务数 0.20 + // 评论数 0.20 + // 关注项目数 0.10 + // 点赞项目数 0.10 + // 登录次数 0.10 + result = float64(dateRecord.CodeMergeCount) * getParaWeightValue("CodeMergeCount", ParaWeight, 0.2) + result += float64(dateRecord.CommitCount) * getParaWeightValue("CommitCount", ParaWeight, 0.2) + log.Info("2 result=" + fmt.Sprint(result)) + result += float64(dateRecord.IssueCount) * getParaWeightValue("IssueCount", ParaWeight, 0.2) + result += float64(dateRecord.CommentCount) * getParaWeightValue("CommentCount", ParaWeight, 0.2) + result += float64(dateRecord.FocusRepoCount) * getParaWeightValue("FocusRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.StarRepoCount) * getParaWeightValue("StarRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.LoginCount) * getParaWeightValue("LoginCount", ParaWeight, 0.1) + result += float64(dateRecord.WatchedCount) * getParaWeightValue("WatchedCount", ParaWeight, 0.3) + result += float64(dateRecord.CommitCodeSize) * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1) + result += float64(dateRecord.SolveIssueCount) * getParaWeightValue("SolveIssueCount", ParaWeight, 0.2) + result += float64(dateRecord.EncyclopediasCount) * getParaWeightValue("EncyclopediasCount", ParaWeight, 0.1) + result += float64(dateRecord.CreateRepoCount) * getParaWeightValue("CreateRepoCount", ParaWeight, 0.05) + result += float64(dateRecord.CloudBrainTaskNum) * getParaWeightValue("CloudBrainTaskNum", ParaWeight, 0.3) + result += float64(dateRecord.CommitModelCount) * getParaWeightValue("CommitModelCount", ParaWeight, 0.2) + result += dateRecord.OpenIIndex * getParaWeightValue("OpenIIndex", ParaWeight, 0.1) + + return result +} + +func getParaWeightValue(key string, valueMap map[string]float64, defaultValue float64) float64 { + if _, ok := valueMap[key]; !ok { + return defaultValue + } else { + return valueMap[key] + } +} + +func getMapKeyStringValue(key string, valueMap map[string]int) int { + if _, ok := valueMap[key]; !ok { + return 0 + } else { + return valueMap[key] + } +} + +func getMapValue(userId int64, valueMap map[int64]int) int { + if _, ok := valueMap[userId]; !ok { + return 0 + } else { + return valueMap[userId] + } +} + func getInt(str string) int { re, err := strconv.ParseInt(str, 10, 32) if err != nil { @@ -1052,16 +1144,17 @@ func queryFollow(start_unix int64, end_unix int64) map[int64]int { return resultMap } -func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { +func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int64]int) { sess := x.NewSession() defer sess.Close() - resultMap := make(map[int64]int) + resultSizeMap := make(map[int64]int) + resultNumMap := make(map[int64]int) cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Attachment)) if err != nil { log.Info("query attachment error. return.") - return resultMap + return resultSizeMap, resultNumMap } var indexTotal int64 indexTotal = 0 @@ -1072,10 +1165,12 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { log.Info("query Attachment size=" + fmt.Sprint(len(attachmentList))) for _, attachRecord := range attachmentList { - if _, ok := resultMap[attachRecord.UploaderID]; !ok { - resultMap[attachRecord.UploaderID] = int(attachRecord.Size / (1024 * 1024)) //MB + if _, ok := resultSizeMap[attachRecord.UploaderID]; !ok { + resultSizeMap[attachRecord.UploaderID] = int(attachRecord.Size / (1024 * 1024)) //MB + resultNumMap[attachRecord.UploaderID] = 1 } else { - resultMap[attachRecord.UploaderID] += int(attachRecord.Size / (1024 * 1024)) //MB + resultSizeMap[attachRecord.UploaderID] += int(attachRecord.Size / (1024 * 1024)) //MB + resultNumMap[attachRecord.UploaderID] += 1 } } @@ -1085,7 +1180,7 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { } } - return resultMap + return resultSizeMap, resultNumMap } func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { @@ -1212,6 +1307,133 @@ func queryLoginCount(start_unix int64, end_unix int64) map[int64]int { return resultMap } +func queryCommitCodeSize(start_unix int64, end_unix int64) map[int64]int { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + + resultMap := make(map[int64]int) + cond := "count_date>=" + fmt.Sprint(start_unix) + " and count_date<=" + fmt.Sprint(end_unix) + count, err := statictisSess.Where(cond).Count(new(UserBusinessAnalysis)) + if err != nil { + log.Info("query commit code size error. return.") + return resultMap + } + var indexTotal int64 + indexTotal = 0 + for { + statictisSess.Select("id,commit_code_size").Table("user_business_analysis").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + userBusinessAnalysisList := make([]*UserBusinessAnalysis, 0) + statictisSess.Find(&userBusinessAnalysisList) + log.Info("query user login size=" + fmt.Sprint(len(userBusinessAnalysisList))) + for _, analysisRecord := range userBusinessAnalysisList { + if _, ok := resultMap[analysisRecord.ID]; !ok { + resultMap[analysisRecord.ID] = analysisRecord.CommitCodeSize + } else { + resultMap[analysisRecord.ID] += analysisRecord.CommitCodeSize + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + log.Info("user commit code size=" + fmt.Sprint(len(resultMap))) + return resultMap +} + +func queryUserModel(start_unix int64, end_unix int64) map[int64]int { + sess := x.NewSession() + defer sess.Close() + resultMap := make(map[int64]int) + cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + count, err := sess.Where(cond).Count(new(AiModelManage)) + if err != nil { + log.Info("query AiModelManage error. return.") + return resultMap + } + var indexTotal int64 + indexTotal = 0 + for { + sess.Select("id,user_id").Table("ai_model_manage").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + aiModelList := make([]*AiModelManage, 0) + sess.Find(&aiModelList) + log.Info("query AiModelManage size=" + fmt.Sprint(len(aiModelList))) + for _, aiModelRecord := range aiModelList { + if _, ok := resultMap[aiModelRecord.UserId]; !ok { + resultMap[aiModelRecord.UserId] = 1 + } else { + resultMap[aiModelRecord.UserId] += 1 + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + return resultMap +} + +func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[string]int) { + sess := x.NewSession() + defer sess.Close() + resultMap := make(map[int64]int) + resultItemMap := make(map[string]int) + + cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + count, err := sess.Where(cond).Count(new(Cloudbrain)) + if err != nil { + log.Info("query cloudbrain error. return.") + return resultMap, resultItemMap + } + var indexTotal int64 + indexTotal = 0 + for { + sess.Select("id,job_type,user_id,duration,train_job_duration,type").Table("cloudbrain").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + cloudTaskList := make([]*Cloudbrain, 0) + sess.Find(&cloudTaskList) + log.Info("query cloudbrain size=" + fmt.Sprint(len(cloudTaskList))) + for _, cloudTaskRecord := range cloudTaskList { + if _, ok := resultMap[cloudTaskRecord.UserID]; !ok { + resultMap[cloudTaskRecord.UserID] = 1 + } else { + resultMap[cloudTaskRecord.UserID] += 1 + } + setMapKey("CloudBrainRunTime", cloudTaskRecord.UserID, int(cloudTaskRecord.Duration), resultItemMap) + if cloudTaskRecord.Type == 1 { //npu + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("NpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else if cloudTaskRecord.JobType == "INFERENCE" { + setMapKey("NpuInferenceJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("NpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } else { //type=0 gpu + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("GpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else if cloudTaskRecord.JobType == "BENCHMARK" { + setMapKey("GpuBenchMarkJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("GpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + + return resultMap, resultItemMap +} +func setMapKey(key string, userId int64, value int, resultItemMap map[string]int) { + newKey := fmt.Sprint(userId) + "_" + key + if _, ok := resultItemMap[newKey]; !ok { + resultItemMap[newKey] = value + } else { + resultItemMap[newKey] += value + } +} + func subMonth(t1, t2 time.Time) (month int) { y1 := t1.Year() y2 := t2.Year() diff --git a/models/user_business_struct.go b/models/user_business_struct.go index c435c0b07..17d9f046f 100644 --- a/models/user_business_struct.go +++ b/models/user_business_struct.go @@ -44,6 +44,18 @@ type UserBusinessAnalysisCurrentYear struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisLast30Day struct { @@ -88,6 +100,18 @@ type UserBusinessAnalysisLast30Day struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisLastMonth struct { @@ -132,6 +156,18 @@ type UserBusinessAnalysisLastMonth struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisCurrentMonth struct { @@ -176,6 +212,18 @@ type UserBusinessAnalysisCurrentMonth struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisCurrentWeek struct { @@ -220,6 +268,18 @@ type UserBusinessAnalysisCurrentWeek struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisYesterday struct { @@ -264,4 +324,30 @@ type UserBusinessAnalysisYesterday struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` +} + +type UserAnalysisPara struct { + Key string `xorm:"NOT NULL"` + Value float64 `xorm:"NOT NULL DEFAULT 0"` +} + +type UserMetrics struct { + CountDate int64 `xorm:"pk"` + ActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + NotActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + HasActivityUser int `xorm:"NOT NULL DEFAULT 0"` + TotalActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + TotalHasActivityUser int `xorm:"NOT NULL DEFAULT 0"` } diff --git a/modules/cron/tasks_basic.go b/modules/cron/tasks_basic.go index b9838e66f..b3a6c02a1 100755 --- a/modules/cron/tasks_basic.go +++ b/modules/cron/tasks_basic.go @@ -185,6 +185,17 @@ func registerHandleSummaryStatistic() { }) } +func registerHandleOrgStatistic() { + RegisterTaskFatal("handle_org_statistic", &BaseConfig{ + Enabled: true, + RunAtStart: false, + Schedule: "0 0 2 * * ?", + }, func(ctx context.Context, _ *models.User, _ Config) error { + models.UpdateOrgStatistics() + return nil + }) +} + func registerSyncCloudbrainStatus() { RegisterTaskFatal("sync_cloudbrain_status", &BaseConfig{ Enabled: true, @@ -215,4 +226,5 @@ func initBasicTasks() { registerHandleSummaryStatistic() registerSyncCloudbrainStatus() + registerHandleOrgStatistic() } diff --git a/modules/modelarts/modelarts.go b/modules/modelarts/modelarts.go index 538fcfbd9..b1e7b269e 100755 --- a/modules/modelarts/modelarts.go +++ b/modules/modelarts/modelarts.go @@ -280,6 +280,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc RepoID: ctx.Repo.Repository.ID, JobID: jobResult.ID, JobName: jobName, + FlavorCode: flavor, DisplayJobName: displayJobName, JobType: string(models.JobTypeDebug), Type: models.TypeCloudBrainTwo, diff --git a/modules/setting/repository.go b/modules/setting/repository.go index 8af3eaaf4..dceb48f16 100644 --- a/modules/setting/repository.go +++ b/modules/setting/repository.go @@ -40,6 +40,7 @@ var ( DisabledRepoUnits []string DefaultRepoUnits []string PrefixArchiveFiles bool + RepoMaxSize int64 // Repository editor settings Editor struct { @@ -54,6 +55,7 @@ var ( AllowedTypes []string `delim:"|"` FileMaxSize int64 MaxFiles int + TotalMaxSize int64 } `ini:"-"` // Repository local settings @@ -104,6 +106,7 @@ var ( DisabledRepoUnits: []string{}, DefaultRepoUnits: []string{}, PrefixArchiveFiles: true, + RepoMaxSize: 1024, // Repository editor settings Editor: struct { @@ -121,12 +124,14 @@ var ( AllowedTypes []string `delim:"|"` FileMaxSize int64 MaxFiles int + TotalMaxSize int64 }{ Enabled: true, TempPath: "data/tmp/uploads", AllowedTypes: []string{}, - FileMaxSize: 3, - MaxFiles: 5, + FileMaxSize: 30, + MaxFiles: 10, + TotalMaxSize: 1024, }, // Repository local settings diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 0515a6af7..db108e6d6 100755 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -493,6 +493,11 @@ static.encyclopediascount=Encyclopedias Count static.createrepocount=Create Repo Count static.openiindex=OpenI Index static.registdate=Regist Date +static.CloudBrainTaskNum=CloudBrain Task Count +static.CloudBrainRunTime=CloudBrain Run Time +static.CommitDatasetNum=Commit Dataset Count +static.CommitModelCount=Commit Model Count +static.UserIndex=User Index static.countdate=Count Date static.all=All static.public.user_business_analysis_current_month=Current_Month @@ -911,7 +916,13 @@ language_other = Other datasets = Datasets datasets.desc = Enable Dataset cloudbrain_helper=Use GPU/NPU resources to open notebooks, model training tasks, etc. - +cloudbrain.exitinfo=Exit Information +cloudbrain.platform=Platform +cloudbrain.endtime=End Time +cloudbrain.runinfo=Task Runtime Information +cloudbrain.time.starttime=Start run time +cloudbrain.time.endtime=End run time +cloudbrain.datasetdownload=Dataset download url model_manager = Model model_noright=No right model_rename=Duplicate model name, please modify model name. @@ -1250,6 +1261,10 @@ editor.cannot_commit_to_protected_branch = Cannot commit to protected branch '%s editor.no_commit_to_branch = Unable to commit directly to branch because: editor.user_no_push_to_branch = User cannot push to branch editor.require_signed_commit = Branch requires a signed commit +editor.repo_too_large = Repository can not exceed %d MB +editor.repo_file_invalid = Upload files are invalid +editor.upload_file_too_much = Can not upload more than %d files at a time + commits.desc = Browse source code change history. commits.commits = Commits @@ -2192,6 +2207,16 @@ customize = Customize selected_project=Selected Projects fold = Fold unfold = Unfold +org_member = Member +org_members = Members +org_team = Team +org_teams = Teams +org_repository = Repository +org_repositories = Repositories + +star = Star Top10 +member = Members Top10 +active = Active Top10 form.name_reserved = The organization name '%s' is reserved. form.name_pattern_not_allowed = The pattern '%s' is not allowed in an organization name. @@ -2847,6 +2872,8 @@ uploading = Uploading upload_complete = Uploading complete failed = Upload Failed enable_minio_support = Enable minio support to use the dataset service +max_file_tooltips= Upload a maximum of ? files at a time, each file does not exceed ? MB. +max_size_tooltips= You can only upload a maximum of ? files at a time. The upload limit has been reached, please do not add more files. [notification] notifications = Notifications diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index 314b1ecaa..84c735c10 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -496,6 +496,11 @@ static.encyclopediascount=百科页面贡献次数 static.createrepocount=创建项目数 static.openiindex=OpenI指数 static.registdate=用户注册时间 +static.CloudBrainTaskNum=云脑任务数 +static.CloudBrainRunTime=云脑运行时间(小时) +static.CommitDatasetNum=上传(提交)数据集文件数 +static.CommitModelCount=提交模型数 +static.UserIndex=用户指数 static.countdate=系统统计时间 static.all=所有 static.public.user_business_analysis_current_month=本月 @@ -959,7 +964,13 @@ cloudbrain_jobname_err=只能以小写字母或数字开头且只包含小写字 cloudbrain_query_fail=查询云脑任务失败。 cloudbrain.mirror_tag = 镜像标签 cloudbrain.mirror_description = 镜像描述 - +cloudbrain.exitinfo=退出信息 +cloudbrain.platform=平台 +cloudbrain.endtime=结束时间 +cloudbrain.runinfo=任务运行简况 +cloudbrain.time.starttime=开始运行时间 +cloudbrain.time.endtime=结束运行时间 +cloudbrain.datasetdownload=数据集下载地址 record_begintime_get_err=无法获取统计开始时间。 parameter_is_wrong=输入参数错误,请检查输入参数。 total_count_get_error=查询总页数失败。 @@ -1257,6 +1268,9 @@ editor.cannot_commit_to_protected_branch=不可以提交到受保护的分支 '% editor.no_commit_to_branch=无法直接提交分支,因为: editor.user_no_push_to_branch=用户不能推送到分支 editor.require_signed_commit=分支需要签名提交 +editor.repo_too_large = 代码仓总大小不能超过%dMB +editor.repo_file_invalid = 提交的文件非法 +editor.upload_file_too_much = 不能同时提交超过%d个文件 commits.desc=浏览代码修改历史 commits.commits=次代码提交 @@ -2197,6 +2211,16 @@ customize = 自定义 selected_project=精选项目 fold = 收起 unfold = 展开 +org_member = 成员 +org_members = 成员 +org_team = 团队 +org_teams = 团队 +org_repository = 项目 +org_repositories = 项目 + +star = 点赞榜 +member = 成员榜 +active = 活跃榜 form.name_reserved=组织名称 '%s' 是被保留的。 form.name_pattern_not_allowed=组织名称中不允许使用 "%s"。 @@ -2853,6 +2877,8 @@ uploading=正在上传 upload_complete=上传完成 failed=上传失败 enable_minio_support=启用minio支持以使用数据集服务 +max_file_tooltips=单次最多上传?个文件,每个文件不超过? MB。 +max_size_tooltips=一次最多只能上传?个文件, 上传已达到上限,请勿再添加文件。 [notification] notifications=通知 diff --git a/public/home/search.js b/public/home/search.js index 70b5d4ef9..e23d27549 100644 --- a/public/home/search.js +++ b/public/home/search.js @@ -108,8 +108,9 @@ function searchItem(type,sortType){ currentSearchSortBy = sortBy[sortType]; currentSearchAscending = sortAscending[sortType]; OnlySearchLabel =false; - page(currentPage); + }else{ + emptySearch(); } } @@ -121,49 +122,31 @@ function search(){ if(!isEmpty(currentSearchKeyword)){ currentSearchKeyword = currentSearchKeyword.trim(); } - $('#searchForm').addClass("hiddenSearch"); - initPageInfo(); if(!isEmpty(currentSearchKeyword)){ - document.getElementById("find_id").innerHTML=getLabel(isZh,"search_finded"); - currentSearchSortBy = sortBy[10]; - currentSearchAscending = "false"; - OnlySearchLabel =false; - page(currentPage); - if(currentSearchTableName != "repository"){ - doSearch("repository",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "issue"){ - doSearch("issue",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "user"){ - doSearch("user",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "org"){ - doSearch("org",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "dataset"){ - doSearch("dataset",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "pr"){ - doSearch("pr",currentSearchKeyword,1,pageSize,true,"",false); - } + doSpcifySearch(currentSearchTableName,currentSearchKeyword,sortBy[10],"false"); }else{ - initDiv(false); - document.getElementById("find_id").innerHTML=getLabel(isZh,"search_empty"); - $('#find_title').html(""); - document.getElementById("sort_type").innerHTML=""; - document.getElementById("child_search_item").innerHTML=""; - document.getElementById("page_menu").innerHTML=""; - $('#repo_total').text(""); - $('#pr_total').text(""); - $('#issue_total').text(""); - $('#dataset_total').text(""); - $('#user_total').text(""); - $('#org_total').text(""); - setActivate(null); + emptySearch(); } } +function emptySearch(){ + initDiv(false); + initPageInfo(); + $('#searchForm').addClass("hiddenSearch"); + document.getElementById("find_id").innerHTML=getLabel(isZh,"search_empty"); + $('#find_title').html(""); + document.getElementById("sort_type").innerHTML=""; + document.getElementById("child_search_item").innerHTML=""; + document.getElementById("page_menu").innerHTML=""; + $('#repo_total').text(""); + $('#pr_total').text(""); + $('#issue_total').text(""); + $('#dataset_total').text(""); + $('#user_total').text(""); + $('#org_total').text(""); + setActivate(null); +} + function initDiv(isSearchLabel=false){ if(isSearchLabel){ document.getElementById("search_div").style.display="none"; @@ -174,7 +157,6 @@ function initDiv(isSearchLabel=false){ document.getElementById("user_item").style.display="none"; document.getElementById("org_item").style.display="none"; document.getElementById("find_id").innerHTML=""; - }else{ document.getElementById("search_div").style.display="block"; document.getElementById("search_label_div").style.display="none"; @@ -187,6 +169,39 @@ function initDiv(isSearchLabel=false){ } } +function doSpcifySearch(tableName,keyword,sortBy="",ascending="false"){ + initDiv(false); + $('#searchForm').addClass("hiddenSearch"); + document.getElementById("find_id").innerHTML=getLabel(isZh,"search_finded"); + currentSearchKeyword = keyword; + initPageInfo(); + currentSearchTableName = tableName; + currentSearchSortBy = sortBy; + currentSearchAscending = ascending; + OnlySearchLabel =false; + + page(currentPage); + + if(currentSearchTableName != "repository"){ + doSearch("repository",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "issue"){ + doSearch("issue",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "user"){ + doSearch("user",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "org"){ + doSearch("org",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "dataset"){ + doSearch("dataset",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "pr"){ + doSearch("pr",currentSearchKeyword,1,pageSize,true,"",false); + } +} + function doSearchLabel(tableName,keyword,sortBy="",ascending="false"){ initDiv(true); //document.getElementById("search_div").style.display="none"; @@ -1272,8 +1287,17 @@ var zhCN={ sessionStorage.removeItem("searchLabel"); doSearchLabel(sessionStorage.getItem("tableName"),sessionStorage.getItem("keyword"),sessionStorage.getItem("sortBy"),sessionStorage.getItem("ascending")); }else{ - console.log("normal search...."); - search(); + var specifySearch = sessionStorage.getItem("specifySearch"); + if(specifySearch){ + sessionStorage.removeItem("specifySearch"); + console.log("search sepcial keyword=...." + sessionStorage.getItem("keyword")); + document.getElementById("keyword_input").value = sessionStorage.getItem("keyword"); + doSpcifySearch(sessionStorage.getItem("tableName"),sessionStorage.getItem("keyword"),sessionStorage.getItem("sortBy"),sessionStorage.getItem("ascending")); + }else{ + console.log("normal search...."); + search(); + } + } } } diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 1868edcb5..9de65662f 100755 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -547,6 +547,8 @@ func RegisterRoutes(m *macaron.Macaron) { }) }, operationReq) + m.Get("/query_user_metrics", operationReq, repo_ext.QueryMetrics) + m.Get("/query_user_rank_list", operationReq, repo_ext.QueryRankingList) m.Get("/query_user_static_page", operationReq, repo_ext.QueryUserStaticDataPage) m.Get("/query_user_current_month", operationReq, repo_ext.QueryUserStaticCurrentMonth) m.Get("/query_user_current_week", operationReq, repo_ext.QueryUserStaticCurrentWeek) diff --git a/routers/home.go b/routers/home.go index c33d7a049..324bb1032 100755 --- a/routers/home.go +++ b/routers/home.go @@ -49,7 +49,7 @@ func Home(ctx *context.Context) { ctx.HTML(200, tplHome) } -func setRecommendURL(ctx *context.Context) { +func setRecommendURLOnly(ctx *context.Context) { addr := setting.RecommentRepoAddr[10:] start := strings.Index(addr, "/") end := strings.Index(addr, "raw") @@ -58,7 +58,10 @@ func setRecommendURL(ctx *context.Context) { } else { ctx.Data["RecommendURL"] = setting.RecommentRepoAddr } +} +func setRecommendURL(ctx *context.Context) { + setRecommendURLOnly(ctx) ctx.Data["page_title"] = ctx.Tr("home.page_title") ctx.Data["page_small_title"] = ctx.Tr("home.page_small_title") ctx.Data["page_description"] = ctx.Tr("home.page_description") @@ -441,17 +444,39 @@ func ExploreOrganizations(ctx *context.Context) { ctx.Data["PageIsExploreOrganizations"] = true ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled - visibleTypes := []structs.VisibleType{structs.VisibleTypePublic} - if ctx.User != nil { - visibleTypes = append(visibleTypes, structs.VisibleTypeLimited, structs.VisibleTypePrivate) + N := 10 + starInfo, err := models.FindTopNStarsOrgs(N) + if err != nil { + log.Error("GetStarOrgInfos failed:%v", err.Error(), ctx.Data["MsgID"]) + ctx.ServerError("GetStarOrgInfos", err) + return + } + memberInfo, err := models.FindTopNMembersOrgs(N) + if err != nil { + log.Error("GetMemberOrgInfos failed:%v", err.Error(), ctx.Data["MsgID"]) + ctx.ServerError("GetMemberOrgInfos", err) + return + } + openIInfo, err := models.FindTopNOpenIOrgs(N) + if err != nil { + log.Error("GetOpenIOrgInfos failed:%v", err.Error(), ctx.Data["MsgID"]) + ctx.ServerError("GetOpenIOrgInfos", err) + return } - RenderUserSearch(ctx, &models.SearchUserOptions{ - Actor: ctx.User, - Type: models.UserTypeOrganization, - ListOptions: models.ListOptions{PageSize: setting.UI.ExplorePagingNum}, - Visible: visibleTypes, - }, tplExploreOrganizations) + recommendOrgs, err := GetRecommendOrg() + if err != nil { + log.Error("GetRecommendOrgInfos failed:%v", err.Error(), ctx.Data["MsgID"]) + ctx.ServerError("GetRecommendOrgInfos", err) + return + } + setRecommendURLOnly(ctx) + ctx.Data["RecommendOrgs"] = recommendOrgs + ctx.Data["StarOrgs"] = starInfo + ctx.Data["MemberOrgs"] = memberInfo + ctx.Data["ActiveOrgs"] = openIInfo + + ctx.HTML(http.StatusOK, tplExploreOrganizations) } // ExploreCode render explore code page @@ -583,12 +608,12 @@ func NotFound(ctx *context.Context) { ctx.NotFound("home.NotFound", nil) } -func RecommendOrgFromPromote(ctx *context.Context) { +func GetRecommendOrg() ([]map[string]interface{}, error) { url := setting.RecommentRepoAddr + "organizations" result, err := repository.RecommendFromPromote(url) + if err != nil { - ctx.ServerError("500", err) - return + return nil, err } resultOrg := make([]map[string]interface{}, 0) for _, userName := range result { @@ -598,6 +623,7 @@ func RecommendOrgFromPromote(ctx *context.Context) { userMap["Name"] = user.Name userMap["Description"] = user.Description userMap["FullName"] = user.FullName + userMap["HomeLink"] = user.HomeLink() userMap["ID"] = user.ID userMap["Avatar"] = user.RelAvatarLink() userMap["NumRepos"] = user.NumRepos @@ -608,7 +634,15 @@ func RecommendOrgFromPromote(ctx *context.Context) { log.Info("query user error," + err.Error()) } } + return resultOrg, nil +} +func RecommendOrgFromPromote(ctx *context.Context) { + resultOrg, err := GetRecommendOrg() + if err != nil { + ctx.ServerError("500", err) + return + } ctx.JSON(200, resultOrg) } diff --git a/routers/private/internal.go b/routers/private/internal.go index d80a706cc..ace25c809 100755 --- a/routers/private/internal.go +++ b/routers/private/internal.go @@ -45,6 +45,8 @@ func RegisterRoutes(m *macaron.Macaron) { m.Post("/manager/flush-queues", bind(private.FlushOptions{}), FlushQueues) m.Post("/tool/update_all_repo_commit_cnt", UpdateAllRepoCommitCnt) m.Post("/tool/repo_stat/:date", RepoStatisticManually) + + m.Get("/tool/org_stat", OrgStatisticManually) m.Post("/tool/update_repo_visit/:date", UpdateRepoVisit) m.Post("/task/history_handle/duration", repo.HandleTaskWithNoDuration) diff --git a/routers/private/tool.go b/routers/private/tool.go index d01c5b2ab..122a41afe 100755 --- a/routers/private/tool.go +++ b/routers/private/tool.go @@ -45,6 +45,10 @@ func RepoStatisticManually(ctx *macaron.Context) { repo.TimingCountDataByDate(date) } +func OrgStatisticManually() { + models.UpdateOrgStatistics() +} + func UpdateRepoVisit(ctx *macaron.Context) { date := ctx.Params("date") log.Info("date(%s)", date) diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index 898f3844f..0d007a27d 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -2,11 +2,9 @@ package repo import ( "bufio" - "code.gitea.io/gitea/modules/timeutil" "encoding/json" "errors" "fmt" - "github.com/unknwon/i18n" "io" "net/http" "os" @@ -16,6 +14,9 @@ import ( "strings" "time" + "code.gitea.io/gitea/modules/timeutil" + "github.com/unknwon/i18n" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" @@ -345,6 +346,24 @@ func CloudBrainRestart(ctx *context.Context) { break } + var hasSameResource bool + if gpuInfos == nil { + json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) + } + for _, resourceType := range gpuInfos.GpuInfo { + if resourceType.Queue == task.GpuQueue { + hasSameResource = true + continue + } + } + + if !hasSameResource { + log.Error("has no same resource, can not restart", ctx.Data["MsgID"]) + resultCode = "-1" + errorMsg = "the job's version is too old and can not be restarted" + break + } + count, err := models.GetCloudbrainCountByUserID(ctx.User.ID, string(models.JobTypeDebug)) if err != nil { log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) @@ -400,18 +419,34 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo if jobType == models.JobTypeTrain { task, err = models.GetCloudbrainByJobID(ctx.Params(":jobid")) } else { - task, err = models.GetCloudbrainByID(ctx.Params(":id")) + task, err = models.GetCloudbrainByIDWithDeleted(ctx.Params(":id")) } if err != nil { log.Info("error:" + err.Error()) ctx.Data["error"] = err.Error() + return } + result, err := cloudbrain.GetJob(task.JobID) if err != nil { log.Info("error:" + err.Error()) ctx.Data["error"] = err.Error() + return } + + if cloudbrain.ResourceSpecs == nil { + json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs) + } + for _, tmp := range cloudbrain.ResourceSpecs.ResourceSpec { + if tmp.Id == task.ResourceSpecId { + ctx.Data["GpuNum"] = tmp.GpuNum + ctx.Data["CpuNum"] = tmp.CpuNum + ctx.Data["MemMiB"] = tmp.MemMiB + ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB + } + } + if result != nil { jobRes, _ := models.ConvertToJobResultPayload(result.Payload) jobRes.Resource.Memory = strings.ReplaceAll(jobRes.Resource.Memory, "Mi", "MB") @@ -426,6 +461,15 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo ctx.Data["resource_type"] = resourceType.Value } } + } else { + if gpuInfos == nil { + json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) + } + for _, resourceType := range gpuInfos.GpuInfo { + if resourceType.Queue == jobRes.Config.GpuType { + ctx.Data["resource_type"] = resourceType.Value + } + } } taskRoles := jobRes.TaskRoles if jobRes.JobStatus.State != string(models.JobFailed) { @@ -436,9 +480,15 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo task.ContainerID = taskRes.TaskStatuses[0].ContainerID task.ContainerIp = taskRes.TaskStatuses[0].ContainerIP models.ParseAndSetDurationFromCloudBrainOne(jobRes, task) - err = models.UpdateJob(task) - if err != nil { - ctx.Data["error"] = err.Error() + + if task.DeletedAt.IsZero() { //normal record + err = models.UpdateJob(task) + if err != nil { + ctx.Data["error"] = err.Error() + return + } + } else { //deleted record + } } else { task.Status = jobRes.JobStatus.State @@ -455,7 +505,9 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo ctx.Data["result"] = jobRes } else { log.Info("error:" + err.Error()) + return } + user, err := models.GetUserByID(task.UserID) if err == nil { task.User = user @@ -510,6 +562,12 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo } } + attachment, err := models.GetAttachmentByUUID(task.Uuid) + if err == nil { + ctx.Data["datasetname"] = attachment.Name + } else { + ctx.Data["datasetname"] = "" + } ctx.Data["task"] = task ctx.Data["jobName"] = task.JobName @@ -518,7 +576,10 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo version_list_task = append(version_list_task, task) ctx.Data["version_list_task"] = version_list_task ctx.Data["debugListType"] = debugListType - ctx.Data["canDownload"] = cloudbrain.CanDeleteJob(ctx, task) + ctx.Data["code_path"] = cloudbrain.CodeMountPath + ctx.Data["dataset_path"] = cloudbrain.DataSetMountPath + ctx.Data["model_path"] = cloudbrain.ModelMountPath + ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) ctx.HTML(200, tpName) } diff --git a/routers/repo/editor.go b/routers/repo/editor.go index 2fa7976e0..8e13735df 100644 --- a/routers/repo/editor.go +++ b/routers/repo/editor.go @@ -5,6 +5,7 @@ package repo import ( + repo_service "code.gitea.io/gitea/services/repository" "encoding/json" "fmt" "io/ioutil" @@ -614,6 +615,19 @@ func UploadFilePost(ctx *context.Context, form auth.UploadRepoFileForm) { message += "\n\n" + form.CommitMessage } + if err := repo_service.CheckPushSizeLimit4Web(ctx.Repo.Repository, form.Files); err != nil { + if repo_service.IsRepoTooLargeErr(err) { + ctx.RenderWithErr(ctx.Tr("repo.editor.repo_too_large", setting.Repository.RepoMaxSize), tplUploadFile, &form) + } else if repo_service.IsUploadFileInvalidErr(err) { + ctx.RenderWithErr(ctx.Tr("repo.editor.repo_file_invalid"), tplUploadFile, &form) + } else if repo_service.IsUploadFileTooMuchErr(err) { + ctx.RenderWithErr(ctx.Tr("repo.editor.upload_file_too_much", setting.Repository.Upload.MaxFiles), tplUploadFile, &form) + } else { + ctx.RenderWithErr(err.Error(), tplUploadFile, &form) + } + return + } + if err := repofiles.UploadRepoFiles(ctx.Repo.Repository, ctx.User, &repofiles.UploadRepoFileOptions{ LastCommitID: ctx.Repo.CommitID, OldBranch: oldBranchName, diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index 32d9db9ce..933571a0b 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -2,10 +2,9 @@ package repo import ( "archive/zip" - "code.gitea.io/gitea/modules/notification" - "code.gitea.io/gitea/modules/timeutil" "encoding/json" "errors" + "fmt" "io" "io/ioutil" "net/http" @@ -16,6 +15,9 @@ import ( "time" "unicode/utf8" + "code.gitea.io/gitea/modules/notification" + "code.gitea.io/gitea/modules/timeutil" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" @@ -247,7 +249,7 @@ func NotebookShow(ctx *context.Context) { debugListType := ctx.Query("debugListType") var ID = ctx.Params(":id") - task, err := models.GetCloudbrainByID(ID) + task, err := models.GetCloudbrainByIDWithDeleted(ID) if err != nil { ctx.Data["error"] = err.Error() ctx.RenderWithErr(err.Error(), tplModelArtsNotebookShow, nil) @@ -262,16 +264,19 @@ func NotebookShow(ctx *context.Context) { } if result != nil { - task.Status = result.Status - err = models.UpdateJob(task) - if err != nil { - ctx.Data["error"] = err.Error() - ctx.RenderWithErr(err.Error(), tplModelArtsNotebookShow, nil) - return - } + if task.DeletedAt.IsZero() { //normal record + if task.Status != result.Status { + task.Status = result.Status + err = models.UpdateJob(task) + if err != nil { + ctx.Data["error"] = err.Error() + ctx.RenderWithErr(err.Error(), tplModelArtsNotebookShow, nil) + return + } + } + } else { //deleted record - result.CreateTime = time.Unix(int64(result.CreateAt/1000), 0).Format("2006-01-02 15:04:05") - result.LatestUpdateTime = time.Unix(int64(result.UpdateAt/1000), 0).Format("2006-01-02 15:04:05") + } } datasetDownloadLink := "-" @@ -279,16 +284,44 @@ func NotebookShow(ctx *context.Context) { if task.Uuid != "" && task.UserID == ctx.User.ID { attachment, err := models.GetAttachmentByUUID(task.Uuid) if err == nil { + task.DatasetName = attachment.Name datasetDownloadLink = attachment.S3DownloadURL() } } } - + user, err := models.GetUserByID(task.UserID) + if err == nil { + task.User = user + } + if modelarts.FlavorInfos == nil { + json.Unmarshal([]byte(setting.FlavorInfos), &modelarts.FlavorInfos) + } + if modelarts.FlavorInfos != nil { + ctx.Data["resource_spec"] = modelarts.FlavorInfos.FlavorInfo[0].Desc + for _, f := range modelarts.FlavorInfos.FlavorInfo { + if fmt.Sprint(f.Value) == task.FlavorCode { + ctx.Data["resource_spec"] = f.Desc + break + } + } + } + if task.TrainJobDuration == "" { + if task.Duration == 0 { + var duration int64 + if task.Status == string(models.JobRunning) { + duration = time.Now().Unix() - int64(task.CreatedUnix) + } else { + duration = int64(task.UpdatedUnix) - int64(task.CreatedUnix) + } + task.Duration = duration + } + task.TrainJobDuration = models.ConvertDurationToStr(task.Duration) + } + ctx.Data["duration"] = task.TrainJobDuration ctx.Data["datasetDownloadLink"] = datasetDownloadLink ctx.Data["task"] = task ctx.Data["ID"] = ID ctx.Data["jobName"] = task.JobName - ctx.Data["result"] = result ctx.Data["debugListType"] = debugListType ctx.HTML(200, tplModelArtsNotebookShow) } @@ -1553,7 +1586,7 @@ func TrainJobShow(ctx *context.Context) { ctx.Data["displayJobName"] = VersionListTasks[0].DisplayJobName ctx.Data["version_list_task"] = VersionListTasks ctx.Data["version_list_count"] = VersionListCount - ctx.Data["canDownload"] = cloudbrain.CanDeleteJob(ctx, &VersionListTasks[0].Cloudbrain) + ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, &VersionListTasks[0].Cloudbrain) ctx.HTML(http.StatusOK, tplModelArtsTrainJobShow) } @@ -2184,7 +2217,7 @@ func InferenceJobShow(ctx *context.Context) { ctx.Data["jobName"] = task.JobName ctx.Data["displayJobName"] = task.DisplayJobName ctx.Data["task"] = task - ctx.Data["canDownload"] = cloudbrain.CanDeleteJob(ctx, task) + ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) tempUids := []int64{} tempUids = append(tempUids, task.UserID) diff --git a/routers/repo/user_data_analysis.go b/routers/repo/user_data_analysis.go index b4adfc347..9d906270f 100755 --- a/routers/repo/user_data_analysis.go +++ b/routers/repo/user_data_analysis.go @@ -54,7 +54,12 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac "N1": ctx.Tr("user.static.createrepocount"), "O1": ctx.Tr("user.static.openiindex"), "P1": ctx.Tr("user.static.registdate"), - "Q1": ctx.Tr("user.static.countdate"), + "Q1": ctx.Tr("user.static.CloudBrainTaskNum"), + "R1": ctx.Tr("user.static.CloudBrainRunTime"), + "S1": ctx.Tr("user.static.CommitDatasetNum"), + "T1": ctx.Tr("user.static.CommitModelCount"), + "U1": ctx.Tr("user.static.UserIndex"), + "V1": ctx.Tr("user.static.countdate"), } for k, v := range dataHeader { //设置单元格的值 @@ -89,8 +94,14 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) + xlsx.SetCellValue(sheetName, "Q"+rows, userRecord.CloudBrainTaskNum) + xlsx.SetCellValue(sheetName, "R"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) + xlsx.SetCellValue(sheetName, "S"+rows, userRecord.CommitDatasetNum) + xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitModelCount) + xlsx.SetCellValue(sheetName, "U"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) + formatTime = userRecord.DataDate - xlsx.SetCellValue(sheetName, "Q"+rows, formatTime) + xlsx.SetCellValue(sheetName, "V"+rows, formatTime) } indexTotal += PAGE_SIZE @@ -115,6 +126,30 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac } } +func QueryMetrics(ctx *context.Context) { + startDate := ctx.Query("startDate") + endDate := ctx.Query("endDate") + startTime, _ := time.ParseInLocation("2006-01-02", startDate, time.Local) + endTime, _ := time.ParseInLocation("2006-01-02", endDate, time.Local) + result, count := models.QueryMetrics(startTime.Unix(), endTime.Unix()) + mapInterface := make(map[string]interface{}) + mapInterface["data"] = result + mapInterface["count"] = count + ctx.JSON(http.StatusOK, mapInterface) +} + +func QueryRankingList(ctx *context.Context) { + key := ctx.Query("key") + tableName := ctx.Query("tableName") + limit := ctx.QueryInt("limit") + + result, count := models.QueryRankList(key, tableName, limit) + mapInterface := make(map[string]interface{}) + mapInterface["data"] = result + mapInterface["count"] = count + ctx.JSON(http.StatusOK, mapInterface) +} + func QueryUserStaticCurrentMonth(ctx *context.Context) { queryUserDataPage(ctx, "public.user_business_analysis_current_month", new(models.UserBusinessAnalysisCurrentMonth)) } @@ -221,7 +256,12 @@ func QueryUserStaticDataPage(ctx *context.Context) { "N1": ctx.Tr("user.static.createrepocount"), "O1": ctx.Tr("user.static.openiindex"), "P1": ctx.Tr("user.static.registdate"), - "Q1": ctx.Tr("user.static.countdate"), + "Q1": ctx.Tr("user.static.CloudBrainTaskNum"), + "R1": ctx.Tr("user.static.CloudBrainRunTime"), + "S1": ctx.Tr("user.static.CommitDatasetNum"), + "T1": ctx.Tr("user.static.CommitModelCount"), + "U1": ctx.Tr("user.static.UserIndex"), + "V1": ctx.Tr("user.static.countdate"), } for k, v := range dataHeader { //设置单元格的值 @@ -249,9 +289,13 @@ func QueryUserStaticDataPage(ctx *context.Context) { formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) - + xlsx.SetCellValue(sheetName, "Q"+rows, userRecord.CloudBrainTaskNum) + xlsx.SetCellValue(sheetName, "R"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) + xlsx.SetCellValue(sheetName, "S"+rows, userRecord.CommitDatasetNum) + xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitModelCount) + xlsx.SetCellValue(sheetName, "U"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) formatTime = userRecord.DataDate - xlsx.SetCellValue(sheetName, "Q"+rows, formatTime+" 00:01") + xlsx.SetCellValue(sheetName, "V"+rows, formatTime) } //设置默认打开的表单 diff --git a/routers/repo/view.go b/routers/repo/view.go index 320102ba4..b28e21aa1 100755 --- a/routers/repo/view.go +++ b/routers/repo/view.go @@ -247,7 +247,11 @@ func renderDirectory(ctx *context.Context, treeLink string) { ctx.Data["ReadmeInList"] = true ctx.Data["ReadmeExist"] = true ctx.Data["FileIsSymlink"] = readmeFile.isSymlink - ctx.Data["ReadmeName"] = readmeFile.name + if ctx.Repo.TreePath == "" { + ctx.Data["ReadmeRelativePath"] = readmeFile.name + } else { + ctx.Data["ReadmeRelativePath"] = ctx.Repo.TreePath + "/" + readmeFile.name + } if ctx.Repo.CanEnableEditor() { ctx.Data["CanEditFile"] = true @@ -579,11 +583,11 @@ func safeURL(address string) string { } type ContributorInfo struct { - UserInfo *models.User // nil for contributor who is not a registered user - RelAvatarLink string `json:"rel_avatar_link"` - UserName string `json:"user_name"` - Email string `json:"email"` - CommitCnt int `json:"commit_cnt"` + UserInfo *models.User // nil for contributor who is not a registered user + RelAvatarLink string `json:"rel_avatar_link"` + UserName string `json:"user_name"` + Email string `json:"email"` + CommitCnt int `json:"commit_cnt"` } type GetContributorsInfo struct { @@ -642,7 +646,7 @@ func Home(ctx *context.Context) { existedContributorInfo.CommitCnt += c.CommitCnt } else { var newContributor = &ContributorInfo{ - user, "", "",c.Email, c.CommitCnt, + user, "", "", c.Email, c.CommitCnt, } count++ contributorInfos = append(contributorInfos, newContributor) @@ -839,7 +843,7 @@ func renderCode(ctx *context.Context) { compareInfo, err = baseGitRepo.GetCompareInfo(ctx.Repo.Repository.RepoPath(), ctx.Repo.BranchName, ctx.Repo.Repository.BaseRepo.DefaultBranch) ctx.Data["UpstreamSameBranchName"] = false } - if err==nil && compareInfo != nil { + if err == nil && compareInfo != nil { if compareInfo.Commits != nil { log.Info("compareInfoCommits数量:%d", compareInfo.Commits.Len()) ctx.Data["FetchUpstreamCnt"] = compareInfo.Commits.Len() @@ -950,7 +954,7 @@ func ContributorsAPI(ctx *context.Context) { } else { // new committer info var newContributor = &ContributorInfo{ - user, user.RelAvatarLink(),user.Name, user.Email,c.CommitCnt, + user, user.RelAvatarLink(), user.Name, user.Email, c.CommitCnt, } count++ contributorInfos = append(contributorInfos, newContributor) @@ -963,7 +967,7 @@ func ContributorsAPI(ctx *context.Context) { existedContributorInfo.CommitCnt += c.CommitCnt } else { var newContributor = &ContributorInfo{ - user, "", "",c.Email,c.CommitCnt, + user, "", "", c.Email, c.CommitCnt, } count++ contributorInfos = append(contributorInfos, newContributor) diff --git a/routers/search.go b/routers/search.go index bc1bc5fac..c5655b9e1 100644 --- a/routers/search.go +++ b/routers/search.go @@ -573,7 +573,8 @@ func trimFontHtml(text []rune) string { startRune := rune('<') endRune := rune('>') count := 0 - for i := 0; i < len(text); i++ { + i := 0 + for ; i < len(text); i++ { if text[i] == startRune { //start < re := false j := i + 1 @@ -592,11 +593,14 @@ func trimFontHtml(text []rune) string { } else { return string(text[0:i]) } - } } } - return string(text) + if count%2 == 1 { + return string(text[0:i]) + "" + } else { + return string(text[0:i]) + } } func trimHrefHtml(result string) string { @@ -1125,7 +1129,7 @@ func makePrivateIssueOrPr(issues []*models.Issue, res *SearchRes, Key string, la record["num_comments"] = issue.NumComments record["is_closed"] = issue.IsClosed record["updated_unix"] = issue.UpdatedUnix - record["updated_html"] = timeutil.TimeSinceUnix(repo.UpdatedUnix, language) + record["updated_html"] = timeutil.TimeSinceUnix(issue.UpdatedUnix, language) res.Result = append(res.Result, record) } } diff --git a/services/repository/repository.go b/services/repository/repository.go index cea16516a..d0cd52653 100644 --- a/services/repository/repository.go +++ b/services/repository/repository.go @@ -8,6 +8,7 @@ import ( "fmt" "io/ioutil" "net/http" + "os" "strings" "code.gitea.io/gitea/models" @@ -172,3 +173,137 @@ func RecommendFromPromote(url string) ([]string, error) { } return result, nil } + +func CheckPushSizeLimit4Web(repo *models.Repository, fileIds []string) error { + if err := CheckRepoNumOnceLimit(len(fileIds)); err != nil { + return err + } + totalSize, err := CountUploadFileSizeByIds(fileIds) + if err != nil { + return UploadFileInvalidErr{} + } + if err := CheckRepoTotalSizeLimit(repo, totalSize); err != nil { + return err + } + return nil +} + +func CheckPushSizeLimit4Http(repo *models.Repository, uploadFileSize int64) error { + if err := CheckRepoOnceTotalSizeLimit(uploadFileSize); err != nil { + return err + } + if err := CheckRepoTotalSizeLimit(repo, uploadFileSize); err != nil { + return err + } + return nil +} + +func CheckRepoTotalSizeLimit(repo *models.Repository, uploadFileSize int64) error { + if repo.Size+uploadFileSize > setting.Repository.RepoMaxSize*1024*1024 { + return RepoTooLargeErr{} + } + return nil +} + +func CheckRepoOnceTotalSizeLimit(uploadFileSize int64) error { + if uploadFileSize > setting.Repository.Upload.TotalMaxSize*1024*1024 { + return UploadFileTooLargeErr{} + } + return nil +} + +func CheckRepoNumOnceLimit(uploadFileNum int) error { + if uploadFileNum > setting.Repository.Upload.MaxFiles { + return UploadFileTooMuchErr{} + } + return nil +} + +func CountUploadFileSizeByIds(fileIds []string) (int64, error) { + if len(fileIds) == 0 { + return 0, nil + } + uploads, err := models.GetUploadsByUUIDs(fileIds) + if err != nil { + return 0, fmt.Errorf("CountUploadFileSizeByIds error [uuids: %v]: %v", fileIds, err) + } + var totalSize int64 + for _, upload := range uploads { + size, err := GetUploadFileSize(upload) + if err != nil { + return 0, err + } + totalSize += size + } + return totalSize, nil +} + +func GetUploadFileSize(upload *models.Upload) (int64, error) { + info, err := os.Lstat(upload.LocalPath()) + + if err != nil { + return 0, err + } + return info.Size(), nil + +} + +type RepoTooLargeErr struct { +} + +func (RepoTooLargeErr) Error() string { + return fmt.Sprintf("Repository can not exceed %d MB. Please remove some unnecessary files and try again", setting.Repository.RepoMaxSize) +} + +func IsRepoTooLargeErr(err error) bool { + _, ok := err.(RepoTooLargeErr) + return ok +} + +type UploadFileTooLargeErr struct { +} + +func (UploadFileTooLargeErr) Error() string { + return fmt.Sprintf("Upload files can not exceed %d MB at a time", setting.Repository.Upload.TotalMaxSize) +} + +func IsUploadFileTooLargeErr(err error) bool { + _, ok := err.(UploadFileTooLargeErr) + return ok +} + +type RepoFileTooLargeErr struct { +} + +func (RepoFileTooLargeErr) Error() string { + return "repository file is too large" +} + +func IsRepoFileTooLargeErr(err error) bool { + _, ok := err.(RepoFileTooLargeErr) + return ok +} + +type UploadFileTooMuchErr struct { +} + +func (UploadFileTooMuchErr) Error() string { + return "upload files are too lmuch" +} + +func IsUploadFileTooMuchErr(err error) bool { + _, ok := err.(UploadFileTooMuchErr) + return ok +} + +type UploadFileInvalidErr struct { +} + +func (UploadFileInvalidErr) Error() string { + return "upload files are invalid" +} + +func IsUploadFileInvalidErr(err error) bool { + _, ok := err.(UploadFileInvalidErr) + return ok +} diff --git a/templates/admin/cloudbrain/list.tmpl b/templates/admin/cloudbrain/list.tmpl old mode 100644 new mode 100755 index 39b2c21de..6fea2eef7 --- a/templates/admin/cloudbrain/list.tmpl +++ b/templates/admin/cloudbrain/list.tmpl @@ -81,7 +81,7 @@ {{.DisplayJobName}} {{else if eq .JobType "TRAIN"}} - + {{.DisplayJobName}} {{else if eq .JobType "BENCHMARK"}} @@ -155,13 +155,13 @@ {{else}} - + {{$.i18n.Tr "repo.stop"}} {{end}} -