diff --git a/models/attachment.go b/models/attachment.go index a3fc6fa01..7c95a73dd 100755 --- a/models/attachment.go +++ b/models/attachment.go @@ -51,6 +51,7 @@ type Attachment struct { FileChunk *FileChunk `xorm:"-"` CanDel bool `xorm:"-"` Uploader *User `xorm:"-"` + Md5 string `xorm:"-"` } type AttachmentUsername struct { diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 1662dcd96..f1d136aaf 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -1,13 +1,14 @@ package models import ( - "code.gitea.io/gitea/modules/util" "encoding/json" "fmt" "strconv" "strings" "time" + "code.gitea.io/gitea/modules/util" + "xorm.io/builder" "xorm.io/xorm" @@ -111,7 +112,7 @@ type Cloudbrain struct { SubTaskName string ContainerID string ContainerIp string - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX"` UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` Duration int64 `xorm:"DEFAULT 0"` //运行时长 单位秒 TrainJobDuration string `xorm:"DEFAULT '00:00:00'"` @@ -184,6 +185,12 @@ func (task *Cloudbrain) ComputeAndSetDuration() { task.TrainJobDuration = ConvertDurationToStr(d) } +func (task *Cloudbrain) CorrectCreateUnix() { + if task.StartTime > 0 && task.CreatedUnix > task.StartTime { + task.CreatedUnix = task.StartTime + } +} + func (task *Cloudbrain) IsTerminal() bool { status := task.Status return status == string(ModelArtsTrainJobCompleted) || status == string(ModelArtsTrainJobFailed) || status == string(ModelArtsTrainJobKilled) || status == string(ModelArtsStopped) || status == string(JobStopped) || status == string(JobFailed) || status == string(JobSucceeded) @@ -218,6 +225,7 @@ func ParseAndSetDurationFromCloudBrainOne(result JobResultPayload, task *Cloudbr task.EndTime = timeutil.TimeStamp(result.JobStatus.CompletedTime / 1000) } } + task.CorrectCreateUnix() task.ComputeAndSetDuration() } @@ -388,7 +396,7 @@ type JobResultPayload struct { AppProgress string `json:"appProgress"` AppTrackingURL string `json:"appTrackingUrl"` AppLaunchedTime int64 `json:"appLaunchedTime"` - AppCompletedTime int64 `json:"appCompletedTime"` + AppCompletedTime interface{} `json:"appCompletedTime"` AppExitCode int `json:"appExitCode"` AppExitDiagnostics string `json:"appExitDiagnostics"` AppExitType interface{} `json:"appExitType"` @@ -1354,7 +1362,7 @@ func CloudbrainsVersionList(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int, e func CreateCloudbrain(cloudbrain *Cloudbrain) (err error) { cloudbrain.TrainJobDuration = DURATION_STR_ZERO - if _, err = x.Insert(cloudbrain); err != nil { + if _, err = x.NoAutoTime().Insert(cloudbrain); err != nil { return err } return nil @@ -1370,6 +1378,16 @@ func getRepoCloudBrain(cb *Cloudbrain) (*Cloudbrain, error) { return cb, nil } +func getRepoCloudBrainWithDeleted(cb *Cloudbrain) (*Cloudbrain, error) { + has, err := x.Unscoped().Get(cb) + if err != nil { + return nil, err + } else if !has { + return nil, ErrJobNotExist{} + } + return cb, nil +} + func GetRepoCloudBrainByJobID(repoID int64, jobID string) (*Cloudbrain, error) { cb := &Cloudbrain{JobID: jobID, RepoID: repoID} return getRepoCloudBrain(cb) @@ -1386,6 +1404,12 @@ func GetCloudbrainByID(id string) (*Cloudbrain, error) { return getRepoCloudBrain(cb) } +func GetCloudbrainByIDWithDeleted(id string) (*Cloudbrain, error) { + idInt64, _ := strconv.ParseInt(id, 10, 64) + cb := &Cloudbrain{ID: idInt64} + return getRepoCloudBrainWithDeleted(cb) +} + func GetCloudbrainByJobIDAndVersionName(jobID string, versionName string) (*Cloudbrain, error) { cb := &Cloudbrain{JobID: jobID, VersionName: versionName} return getRepoCloudBrain(cb) @@ -1448,7 +1472,7 @@ func UpdateTrainJobVersion(job *Cloudbrain) error { func updateJobTrainVersion(e Engine, job *Cloudbrain) error { var sess *xorm.Session sess = e.Where("job_id = ? AND version_name=?", job.JobID, job.VersionName) - _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time").Update(job) + _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix").Update(job) return err } @@ -1537,7 +1561,7 @@ func UpdateInferenceJob(job *Cloudbrain) error { func updateInferenceJob(e Engine, job *Cloudbrain) error { var sess *xorm.Session sess = e.Where("job_id = ?", job.JobID) - _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time").Update(job) + _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix").Update(job) return err } func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) { @@ -1553,7 +1577,7 @@ func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) { return err } - if _, err = sess.Insert(new); err != nil { + if _, err = sess.NoAutoTime().Insert(new); err != nil { sess.Rollback() return err } @@ -1564,3 +1588,64 @@ func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) { return nil } +func CloudbrainAll(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { + sess := x.NewSession() + defer sess.Close() + var cond = builder.NewCond() + if (opts.Type) >= 0 { + cond = cond.And( + builder.Eq{"cloudbrain.type": opts.Type}, + ) + } + + var count int64 + var err error + condition := "cloudbrain.user_id = `user`.id" + if len(opts.Keyword) == 0 { + count, err = sess.Where(cond).Count(new(Cloudbrain)) + } else { + lowerKeyWord := strings.ToLower(opts.Keyword) + + cond = cond.And(builder.Or(builder.Like{"LOWER(cloudbrain.job_name)", lowerKeyWord}, builder.Like{"LOWER(cloudbrain.display_job_name)", lowerKeyWord}, builder.Like{"`user`.lower_name", lowerKeyWord})) + count, err = sess.Table(&Cloudbrain{}).Where(cond). + Join("left", "`user`", condition).Count(new(CloudbrainInfo)) + + } + + if err != nil { + return nil, 0, fmt.Errorf("Count: %v", err) + } + + if opts.Page >= 0 && opts.PageSize > 0 { + var start int + if opts.Page == 0 { + start = 0 + } else { + start = (opts.Page - 1) * opts.PageSize + } + sess.Limit(opts.PageSize, start) + } + + sess.OrderBy("cloudbrain.created_unix DESC") + cloudbrains := make([]*CloudbrainInfo, 0, setting.UI.IssuePagingNum) + if err := sess.Table(&Cloudbrain{}).Unscoped().Where(cond). + Join("left", "`user`", condition). + Find(&cloudbrains); err != nil { + return nil, 0, fmt.Errorf("Find: %v", err) + } + if opts.NeedRepoInfo { + var ids []int64 + for _, task := range cloudbrains { + ids = append(ids, task.RepoID) + } + repositoryMap, err := GetRepositoriesMapByIDs(ids) + if err == nil { + for _, task := range cloudbrains { + task.Repo = repositoryMap[task.RepoID] + } + } + + } + + return cloudbrains, count, nil +} diff --git a/models/dataset.go b/models/dataset.go index c0d82d250..95800100c 100755 --- a/models/dataset.go +++ b/models/dataset.go @@ -1,6 +1,7 @@ package models import ( + "code.gitea.io/gitea/modules/log" "errors" "fmt" "sort" @@ -62,19 +63,20 @@ func (datasets DatasetList) loadAttributes(e Engine) error { } set := make(map[int64]struct{}) + userIdSet := make(map[int64]struct{}) datasetIDs := make([]int64, len(datasets)) for i := range datasets { - set[datasets[i].UserID] = struct{}{} + userIdSet[datasets[i].UserID] = struct{}{} set[datasets[i].RepoID] = struct{}{} datasetIDs[i] = datasets[i].ID } // Load owners. - users := make(map[int64]*User, len(set)) + users := make(map[int64]*User, len(userIdSet)) repos := make(map[int64]*Repository, len(set)) if err := e. Where("id > 0"). - In("id", keysInt64(set)). + In("id", keysInt64(userIdSet)). Find(&users); err != nil { return fmt.Errorf("find users: %v", err) } @@ -139,20 +141,7 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { var cond = builder.NewCond() cond = cond.And(builder.Neq{"dataset.status": DatasetStatusDeleted}) - if len(opts.Keyword) > 0 { - cond = cond.And(builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) - } - - if len(opts.Category) > 0 { - cond = cond.And(builder.Eq{"dataset.category": opts.Category}) - } - - if len(opts.Task) > 0 { - cond = cond.And(builder.Eq{"dataset.task": opts.Task}) - } - if len(opts.License) > 0 { - cond = cond.And(builder.Eq{"dataset.license": opts.License}) - } + cond = generateFilterCond(opts, cond) if opts.RepoID > 0 { cond = cond.And(builder.Eq{"dataset.repo_id": opts.RepoID}) @@ -162,14 +151,12 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic}) cond = cond.And(builder.Eq{"attachment.is_private": false}) if opts.OwnerID > 0 { - if len(opts.Keyword) == 0 { - cond = cond.Or(builder.Eq{"repository.owner_id": opts.OwnerID}) - } else { - subCon := builder.NewCond() - subCon = subCon.And(builder.Eq{"repository.owner_id": opts.OwnerID}, builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) - cond = cond.Or(subCon) - - } + + subCon := builder.NewCond() + subCon = subCon.And(builder.Eq{"repository.owner_id": opts.OwnerID}) + subCon = generateFilterCond(opts, subCon) + cond = cond.Or(subCon) + } } else if opts.OwnerID > 0 { cond = cond.And(builder.Eq{"repository.owner_id": opts.OwnerID}) @@ -182,6 +169,25 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { return cond } +func generateFilterCond(opts *SearchDatasetOptions, cond builder.Cond) builder.Cond { + if len(opts.Keyword) > 0 { + cond = cond.And(builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) + } + + if len(opts.Category) > 0 { + cond = cond.And(builder.Eq{"dataset.category": opts.Category}) + } + + if len(opts.Task) > 0 { + cond = cond.And(builder.Eq{"dataset.task": opts.Task}) + } + if len(opts.License) > 0 { + cond = cond.And(builder.Eq{"dataset.license": opts.License}) + } + + return cond +} + func SearchDatasetByCondition(opts *SearchDatasetOptions, cond builder.Cond) (DatasetList, int64, error) { if opts.Page <= 0 { opts.Page = 1 @@ -292,7 +298,13 @@ func getDatasetAttachments(e Engine, typeCloudBrain int, isSigned bool, user *Us if err != nil { return err } - attachment.FileChunk = fileChunks[0] + if len(fileChunks) > 0 { + attachment.Md5 = fileChunks[0].Md5 + } else { + log.Error("has attachment record, but has no file_chunk record") + attachment.Md5 = "no_record" + } + attachment.CanDel = CanDelAttachment(isSigned, user, attachment) sortedRels.Rel[currentIndex].Attachments = append(sortedRels.Rel[currentIndex].Attachments, attachment) } diff --git a/models/dbsql/dataset_foreigntable_for_es.sql b/models/dbsql/dataset_foreigntable_for_es.sql index 815b89d02..02e5f0ddf 100644 --- a/models/dbsql/dataset_foreigntable_for_es.sql +++ b/models/dbsql/dataset_foreigntable_for_es.sql @@ -1,4 +1,17 @@ +DELETE FROM public.dataset_es; DROP FOREIGN TABLE public.dataset_es; +DROP TRIGGER IF EXISTS es_insert_dataset on public.dataset; +DROP FUNCTION public.insert_dataset_data(); +DROP TRIGGER IF EXISTS es_udpate_dataset_file_name on public.attachment; +DROP FUNCTION public.udpate_dataset_file_name; + +DROP TRIGGER IF EXISTS es_update_dataset on public.dataset; +DROP FUNCTION public.update_dataset; + +DROP TRIGGER IF EXISTS es_delete_dataset on public.dataset; +DROP FUNCTION public.delete_dataset; + + CREATE FOREIGN TABLE public.dataset_es ( id bigint NOT NULL, diff --git a/models/dbsql/issue_foreigntable_for_es.sql b/models/dbsql/issue_foreigntable_for_es.sql index bb5c1634e..d6a16cd27 100644 --- a/models/dbsql/issue_foreigntable_for_es.sql +++ b/models/dbsql/issue_foreigntable_for_es.sql @@ -1,4 +1,15 @@ +delete from public.issue_es; DROP FOREIGN TABLE public.issue_es; +DROP TRIGGER IF EXISTS es_insert_issue on public.issue; +DROP FUNCTION public.insert_issue_data; +DROP TRIGGER IF EXISTS es_udpate_issue_comment on public.comment; +DROP FUNCTION udpate_issue_comment; +DROP TRIGGER IF EXISTS es_update_issue on public.issue; +DROP FUNCTION public.update_issue; +DROP TRIGGER IF EXISTS es_delete_issue on public.issue; +DROP FUNCTION public.delete_issue; + + CREATE FOREIGN TABLE public.issue_es ( id bigint NOT NULL, @@ -182,6 +193,7 @@ $def$ name=NEW.name, is_closed=NEW.is_closed, num_comments=NEW.num_comments, + updated_unix=NEW.updated_unix, comment=(select array_to_string(array_agg(content order by created_unix desc),',') from public.comment where issue_id=NEW.id) where id=NEW.id; return new; diff --git a/models/dbsql/repo_foreigntable_for_es.sql b/models/dbsql/repo_foreigntable_for_es.sql index f51155ccf..7e06fd99e 100644 --- a/models/dbsql/repo_foreigntable_for_es.sql +++ b/models/dbsql/repo_foreigntable_for_es.sql @@ -1,5 +1,18 @@ -- 要处理项目从私有变为公有,并且从公有变成私有的情况 +DELETE FROM public.repository_es; DROP FOREIGN table if exists public.repository_es; +DROP TRIGGER IF EXISTS es_insert_repository on public.repository; +DROP FUNCTION public.insert_repository_data; +DROP TRIGGER IF EXISTS es_update_repository on public.repository; +DROP FUNCTION public.update_repository; + +DROP TRIGGER IF EXISTS es_delete_repository on public.repository; +DROP FUNCTION public.delete_repository; + +DROP TRIGGER IF EXISTS es_udpate_repository_lang on public.language_stat; +DROP FUNCTION public.udpate_repository_lang; + + CREATE FOREIGN TABLE public.repository_es ( id bigint NOT NULL, owner_id bigint, diff --git a/models/dbsql/user_foreigntable_for_es.sql b/models/dbsql/user_foreigntable_for_es.sql index c3d21b92a..5d77757f0 100644 --- a/models/dbsql/user_foreigntable_for_es.sql +++ b/models/dbsql/user_foreigntable_for_es.sql @@ -1,4 +1,13 @@ +DELETE FROM public.user_es; DROP FOREIGN table if exists public.user_es; +DROP TRIGGER IF EXISTS es_insert_user on public.user; +DROP FUNCTION public.insert_user_data; +DROP TRIGGER IF EXISTS es_update_user on public.user; +DROP FUNCTION public.update_user; + +DROP TRIGGER IF EXISTS es_delete_user on public.user; +DROP FUNCTION public.delete_user; + CREATE FOREIGN TABLE public.user_es ( id bigint NOT NULL , diff --git a/models/models.go b/models/models.go index 362d46618..2ec61941d 100755 --- a/models/models.go +++ b/models/models.go @@ -138,6 +138,7 @@ func init() { new(OfficialTag), new(OfficialTagRepos), new(WechatBindLog), + new(OrgStatistic), new(SearchRecord), ) @@ -153,6 +154,8 @@ func init() { new(UserBusinessAnalysisCurrentWeek), new(UserBusinessAnalysisYesterday), new(UserLoginLog), + new(UserMetrics), + new(UserAnalysisPara), ) gonicNames := []string{"SSL", "UID"} diff --git a/models/org.go b/models/org.go index 85fb157ae..2a6528023 100755 --- a/models/org.go +++ b/models/org.go @@ -8,6 +8,7 @@ package models import ( "fmt" "os" + "strconv" "strings" "code.gitea.io/gitea/modules/log" @@ -19,6 +20,17 @@ import ( "xorm.io/xorm" ) +type OrgStatistic struct { + ID int64 `xorm:"pk autoincr"` + OrgID int64 `xorm:"UNIQUE"` + NumScore int `xorm:"INDEX NOT NULL DEFAULT 0"` +} + +type OrgScore struct { + *User + Score string +} + // IsOwnedBy returns true if given user is in the owner team. func (org *User) IsOwnedBy(uid int64) (bool, error) { return IsOrganizationOwner(org.ID, uid) @@ -135,6 +147,93 @@ func (org *User) RemoveOrgRepo(repoID int64) error { return org.removeOrgRepo(x, repoID) } +func UpdateOrgStatistics() { + ids, err := GetOrganizationsId() + if err != nil { + return + } + for _, id := range ids { + org := User{ID: id} + orgStat := &OrgStatistic{OrgID: id} + numScore, err := org.getOrgStatistics() + if err == nil { + has, _ := x.Get(orgStat) + + orgStat.NumScore = numScore + if has { + x.ID(orgStat.ID).Cols("num_score").Update(&orgStat) + } else { + x.Insert(orgStat) + } + + } + } + +} + +func (org *User) getOrgStatistics() (int, error) { + count, err := getRepositoryCount(x, org) + if err != nil { + return 0, err + } + + err = org.GetRepositories(ListOptions{int(count), 1}) + + if err != nil { + return 0, err + } + var numScore = 0 + for _, repo := range org.Repos { + + numScore += int(getOpenIByRepoId(repo.ID)) + } + + return numScore, nil + +} + +func FindTopNStarsOrgs(n int) ([]*OrgScore, error) { + sql := "select a.id,sum(b.num_stars) score from \"user\" a ,repository b where a.id=b.owner_id and a.type=1 and a.visibility=0 group by a.id order by score desc limit " + strconv.Itoa(n) + + return findTopNOrgs(sql) +} +func FindTopNMembersOrgs(n int) ([]*OrgScore, error) { + sql := "select id, count(user_id) score from" + + " (select org_id as id, uid as user_id from org_user o, \"user\" u where o.org_id=u.id and u.visibility=0 " + + "union select a.id,b.user_id from \"user\" a,collaboration b,repository c " + + "where a.type=1 and a.visibility=0 and a.id=c.owner_id and b.repo_id=c.id) d " + + "group by id order by score desc limit " + strconv.Itoa(n) + + return findTopNOrgs(sql) +} + +func FindTopNOpenIOrgs(n int) ([]*OrgScore, error) { + sql := "select org_id id,num_score score from org_statistic a, \"user\" b where a.org_id=b.id and b.visibility=0 order by num_score desc limit " + strconv.Itoa(n) + + return findTopNOrgs(sql) +} + +func findTopNOrgs(sql string) ([]*OrgScore, error) { + resutls, err := x.QueryString(sql) + + if err != nil { + return nil, err + } + var orgScore []*OrgScore + for _, record := range resutls { + id, _ := strconv.ParseInt(record["id"], 10, 64) + user, err := getUserByID(x, id) + if err != nil { + continue + } + orgScore = append(orgScore, &OrgScore{user, record["score"]}) + + } + + return orgScore, nil + +} + // CreateOrganization creates record of a new organization. func CreateOrganization(org, owner *User) (err error) { if !owner.CanCreateOrganization() { diff --git a/models/repo_statistic.go b/models/repo_statistic.go index a9e9593af..4f8f13ed7 100755 --- a/models/repo_statistic.go +++ b/models/repo_statistic.go @@ -73,6 +73,16 @@ func (repo *RepoStatistic) DisplayName() string { return repo.Alias } +func getOpenIByRepoId(repoId int64) float64 { + repoStatistic := new(RepoStatistic) + has, err := xStatistic.Cols("radar_total").Where("repo_id=?", repoId).Desc("id").Limit(1).Get(repoStatistic) + if !has || err != nil { + return 0 + } + return repoStatistic.RadarTotal + +} + func DeleteRepoStatDaily(date string) error { sess := xStatistic.NewSession() defer sess.Close() diff --git a/models/user.go b/models/user.go index f72462051..71885aeb1 100755 --- a/models/user.go +++ b/models/user.go @@ -2104,6 +2104,12 @@ func GetOrganizationsCount() (int64, error) { } +func GetOrganizationsId() ([]int64, error) { + var ids []int64 + err := x.Table("user").Where("type=1").Cols("id").Find(&ids) + return ids, err +} + func GetBlockChainUnSuccessUsers() ([]*User, error) { users := make([]*User, 0, 10) err := x.Where("public_key = ''"). diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go index 288762161..a3a68b1ab 100644 --- a/models/user_business_analysis.go +++ b/models/user_business_analysis.go @@ -6,7 +6,6 @@ import ( "strconv" "time" - "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/timeutil" "xorm.io/builder" @@ -81,6 +80,19 @@ type UserBusinessAnalysisAll struct { Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + //cloudbraintask + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysis struct { @@ -146,6 +158,18 @@ type UserBusinessAnalysis struct { Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisQueryOptions struct { @@ -183,6 +207,29 @@ func getLastCountDate() int64 { return pageStartTime.Unix() } +func QueryMetrics(start int64, end int64) ([]*UserMetrics, int64) { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + userMetricsList := make([]*UserMetrics, 0) + if err := statictisSess.Table(new(UserMetrics)).Where("count_date >" + fmt.Sprint(start) + " and count_date<" + fmt.Sprint(end)).OrderBy("count_date desc"). + Find(&userMetricsList); err != nil { + return nil, 0 + } + return userMetricsList, int64(len(userMetricsList)) +} + +func QueryRankList(key string, tableName string, limit int) ([]*UserBusinessAnalysisAll, int64) { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + + userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0) + if err := statictisSess.Table(tableName).OrderBy(key+" desc,id desc").Limit(limit, 0). + Find(&userBusinessAnalysisAllList); err != nil { + return nil, 0 + } + return userBusinessAnalysisAllList, int64(len(userBusinessAnalysisAllList)) +} + func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, queryObj interface{}, userName string) ([]*UserBusinessAnalysisAll, int64) { statictisSess := xStatistic.NewSession() defer statictisSess.Close() @@ -199,7 +246,7 @@ func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, q } log.Info("query return total:" + fmt.Sprint(allCount)) userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0) - if err := statictisSess.Table(tableName).Where(cond).OrderBy("commit_count desc,id desc").Limit(pageSize, start). + if err := statictisSess.Table(tableName).Where(cond).OrderBy("user_index desc,id desc").Limit(pageSize, start). Find(&userBusinessAnalysisAllList); err != nil { return nil, 0 } @@ -334,6 +381,7 @@ func QueryUserStaticDataPage(opts *UserBusinessAnalysisQueryOptions) ([]*UserBus resultMap[userRecord.ID].WatchedCount += userRecord.WatchedCount resultMap[userRecord.ID].CommitCodeSize += userRecord.CommitCodeSize resultMap[userRecord.ID].CommitDatasetSize += userRecord.CommitDatasetSize + resultMap[userRecord.ID].CommitDatasetNum += userRecord.CommitDatasetNum resultMap[userRecord.ID].CommitModelCount += userRecord.CommitModelCount resultMap[userRecord.ID].SolveIssueCount += userRecord.SolveIssueCount resultMap[userRecord.ID].EncyclopediasCount += userRecord.EncyclopediasCount @@ -353,7 +401,7 @@ func QueryUserStaticDataPage(opts *UserBusinessAnalysisQueryOptions) ([]*UserBus return userBusinessAnalysisReturnList, count } -func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats, tableName string, pageStartTime time.Time, pageEndTime time.Time) { +func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageStartTime time.Time, pageEndTime time.Time, userMetrics map[string]int) { sess := x.NewSession() defer sess.Close() @@ -379,14 +427,15 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s FocusRepoCountMap := queryWatch(start_unix, end_unix) StarRepoCountMap := queryStar(start_unix, end_unix) WatchedCountMap := queryFollow(start_unix, end_unix) - - CommitDatasetSizeMap := queryDatasetSize(start_unix, end_unix) + CommitCodeSizeMap := queryCommitCodeSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(startTime.Unix(), end_unix) - + CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) + AiModelManageMap := queryUserModel(start_unix, end_unix) DataDate := currentTimeNow.Format("2006-01-02") + " 00:01" cond := "type != 1 and is_active=true" @@ -395,9 +444,13 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s log.Info("query user error. return.") return } + ParaWeight := getParaWeight() var indexTotal int64 indexTotal = 0 insertCount := 0 + userIndexMap := make(map[int64]float64, 0) + maxUserIndex := 0.0 + minUserIndex := 100000000.0 dateRecordBatch := make([]UserBusinessAnalysisAll, 0) for { sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) @@ -412,84 +465,22 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s dateRecordAll.Name = userRecord.Name dateRecordAll.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) dateRecordAll.DataDate = DataDate - - if _, ok := CodeMergeCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CodeMergeCount = 0 - } else { - dateRecordAll.CodeMergeCount = CodeMergeCountMap[dateRecordAll.ID] - } - - if _, ok := CommitCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommitCount = 0 - } else { - dateRecordAll.CommitCount = CommitCountMap[dateRecordAll.ID] - } - - if _, ok := IssueCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.IssueCount = 0 - } else { - dateRecordAll.IssueCount = IssueCountMap[dateRecordAll.ID] - } - - if _, ok := CommentCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommentCount = 0 - } else { - dateRecordAll.CommentCount = CommentCountMap[dateRecordAll.ID] - } - - if _, ok := FocusRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.FocusRepoCount = 0 - } else { - dateRecordAll.FocusRepoCount = FocusRepoCountMap[dateRecordAll.ID] - } - - if _, ok := StarRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.StarRepoCount = 0 - } else { - dateRecordAll.StarRepoCount = StarRepoCountMap[dateRecordAll.ID] - } - - if _, ok := WatchedCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.WatchedCount = 0 - } else { - dateRecordAll.WatchedCount = WatchedCountMap[dateRecordAll.ID] - } - - if _, ok := CommitCodeSizeMap[dateRecordAll.Email]; !ok { - dateRecordAll.CommitCodeSize = 0 - } else { - dateRecordAll.CommitCodeSize = int(CommitCodeSizeMap[dateRecordAll.Email].CommitLines) - } - - if _, ok := CommitDatasetSizeMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommitDatasetSize = 0 - } else { - dateRecordAll.CommitDatasetSize = CommitDatasetSizeMap[dateRecordAll.ID] - } - - if _, ok := SolveIssueCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.SolveIssueCount = 0 - } else { - dateRecordAll.SolveIssueCount = SolveIssueCountMap[dateRecordAll.ID] - } - - if _, ok := wikiCountMap[dateRecordAll.Name]; !ok { - dateRecordAll.EncyclopediasCount = 0 - } else { - dateRecordAll.EncyclopediasCount = wikiCountMap[dateRecordAll.Name] - } - - if _, ok := CreateRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CreateRepoCount = 0 - } else { - dateRecordAll.CreateRepoCount = CreateRepoCountMap[dateRecordAll.ID] - } - - if _, ok := LoginCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.LoginCount = 0 - } else { - dateRecordAll.LoginCount = LoginCountMap[dateRecordAll.ID] - } + dateRecordAll.UserLocation = userRecord.Location + + dateRecordAll.CodeMergeCount = getMapValue(dateRecordAll.ID, CodeMergeCountMap) + dateRecordAll.CommitCount = getMapValue(dateRecordAll.ID, CommitCountMap) + dateRecordAll.IssueCount = getMapValue(dateRecordAll.ID, IssueCountMap) + dateRecordAll.CommentCount = getMapValue(dateRecordAll.ID, CommentCountMap) + dateRecordAll.FocusRepoCount = getMapValue(dateRecordAll.ID, FocusRepoCountMap) + dateRecordAll.StarRepoCount = getMapValue(dateRecordAll.ID, StarRepoCountMap) + dateRecordAll.WatchedCount = getMapValue(dateRecordAll.ID, WatchedCountMap) + dateRecordAll.CommitCodeSize = getMapValue(dateRecordAll.ID, CommitCodeSizeMap) + dateRecordAll.CommitDatasetSize = getMapValue(dateRecordAll.ID, CommitDatasetSizeMap) + dateRecordAll.CommitDatasetNum = getMapValue(dateRecordAll.ID, CommitDatasetNumMap) + dateRecordAll.SolveIssueCount = getMapValue(dateRecordAll.ID, SolveIssueCountMap) + dateRecordAll.EncyclopediasCount = getMapKeyStringValue(dateRecordAll.Name, wikiCountMap) + dateRecordAll.CreateRepoCount = getMapValue(dateRecordAll.ID, CreateRepoCountMap) + dateRecordAll.LoginCount = getMapValue(dateRecordAll.ID, LoginCountMap) if _, ok := OpenIIndexMap[dateRecordAll.ID]; !ok { dateRecordAll.OpenIIndex = 0 @@ -497,8 +488,22 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s dateRecordAll.OpenIIndex = OpenIIndexMap[dateRecordAll.ID] } - dateRecordAll.CommitModelCount = 0 - + dateRecordAll.CloudBrainTaskNum = getMapValue(dateRecordAll.ID, CloudBrainTaskMap) + dateRecordAll.GpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuDebugJob", CloudBrainTaskItemMap) + dateRecordAll.NpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuDebugJob", CloudBrainTaskItemMap) + dateRecordAll.GpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuTrainJob", CloudBrainTaskItemMap) + dateRecordAll.NpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuTrainJob", CloudBrainTaskItemMap) + dateRecordAll.NpuInferenceJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuInferenceJob", CloudBrainTaskItemMap) + dateRecordAll.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap) + dateRecordAll.CommitModelCount = getMapValue(dateRecordAll.ID, AiModelManageMap) + dateRecordAll.UserIndex = getUserIndexFromAnalysisAll(dateRecordAll, ParaWeight) + userIndexMap[dateRecordAll.ID] = dateRecordAll.UserIndex + if maxUserIndex < dateRecordAll.UserIndex { + maxUserIndex = dateRecordAll.UserIndex + } + if minUserIndex > dateRecordAll.UserIndex { + minUserIndex = dateRecordAll.UserIndex + } dateRecordBatch = append(dateRecordBatch, dateRecordAll) if len(dateRecordBatch) >= BATCH_INSERT_SIZE { insertTable(dateRecordBatch, tableName, statictisSess) @@ -508,6 +513,11 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s } dateRecordBatch = make([]UserBusinessAnalysisAll, 0) } + if tableName == "user_business_analysis_all" { + if dateRecordAll.UserIndex > 0 || dateRecordAll.LoginCount > 0 { + userMetrics["TotalHasActivityUser"] = getMapKeyStringValue("TotalHasActivityUser", userMetrics) + 1 + } + } } indexTotal += PAGE_SIZE if indexTotal >= count { @@ -522,14 +532,24 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s } } + //normalization + for k, v := range userIndexMap { + tmpResult := (v - minUserIndex) / (maxUserIndex - minUserIndex) + updateUserIndex(tableName, statictisSess, k, tmpResult) + } log.Info("refresh data finished.tableName=" + tableName + " total record:" + fmt.Sprint(insertCount)) } +func updateUserIndex(tableName string, statictisSess *xorm.Session, userId int64, userIndex float64) { + updateSql := "UPDATE public." + tableName + " set user_index=" + fmt.Sprint(userIndex) + " where id=" + fmt.Sprint(userId) + statictisSess.Exec(updateSql) +} + func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, statictisSess *xorm.Session) { insertBatchSql := "INSERT INTO public." + tableName + "(id, count_date, code_merge_count, commit_count, issue_count, comment_count, focus_repo_count, star_repo_count, watched_count, gitea_age_month, commit_code_size, commit_dataset_size, " + - "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date) " + + "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location) " + "VALUES" for i, record := range dateRecords { @@ -537,7 +557,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static ", " + fmt.Sprint(record.IssueCount) + ", " + fmt.Sprint(record.CommentCount) + ", " + fmt.Sprint(record.FocusRepoCount) + ", " + fmt.Sprint(record.StarRepoCount) + ", " + fmt.Sprint(record.WatchedCount) + ", " + fmt.Sprint(record.GiteaAgeMonth) + ", " + fmt.Sprint(record.CommitCodeSize) + ", " + fmt.Sprint(record.CommitDatasetSize) + ", " + fmt.Sprint(record.CommitModelCount) + ", " + fmt.Sprint(record.SolveIssueCount) + ", " + fmt.Sprint(record.EncyclopediasCount) + ", " + fmt.Sprint(record.RegistDate) + - ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "')" + ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "'," + fmt.Sprint(record.CloudBrainTaskNum) + "," + fmt.Sprint(record.GpuDebugJob) + "," + fmt.Sprint(record.NpuDebugJob) + "," + fmt.Sprint(record.GpuTrainJob) + "," + fmt.Sprint(record.NpuTrainJob) + "," + fmt.Sprint(record.NpuInferenceJob) + "," + fmt.Sprint(record.GpuBenchMarkJob) + "," + fmt.Sprint(record.CloudBrainRunTime) + "," + fmt.Sprint(record.CommitDatasetNum) + "," + fmt.Sprint(record.UserIndex) + ",'" + record.UserLocation + "')" if i < (len(dateRecords) - 1) { insertBatchSql += "," } @@ -546,36 +566,36 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static statictisSess.Exec(insertBatchSql) } -func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats) { +func RefreshUserStaticAllTabel(wikiCountMap map[string]int, userMetrics map[string]int) { currentTimeNow := time.Now() pageStartTime := time.Date(2021, 11, 5, 0, 0, 0, 0, currentTimeNow.Location()) pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_all", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_all", pageStartTime, pageEndTime, userMetrics) log.Info("refresh all data finished.") pageStartTime = time.Date(currentTimeNow.Year(), 1, 1, 0, 0, 0, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_year", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_year", pageStartTime, pageEndTime, userMetrics) thisMonth := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 0, 0, 0, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_month", thisMonth, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_month", thisMonth, pageEndTime, userMetrics) offset := int(time.Monday - currentTimeNow.Weekday()) if offset > 0 { offset = -6 } pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, offset) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_week", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_week", pageStartTime, pageEndTime, userMetrics) pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -30) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_last30_day", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_last30_day", pageStartTime, pageEndTime, userMetrics) pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -1) pageEndTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_yesterday", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_yesterday", pageStartTime, pageEndTime, userMetrics) pageStartTime = thisMonth.AddDate(0, -1, 0) pageEndTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_last_month", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_last_month", pageStartTime, pageEndTime, userMetrics) } @@ -613,12 +633,13 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, } else { log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap))) } - CommitDatasetSizeMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix) - + CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) + AiModelManageMap := queryUserModel(start_unix, end_unix) statictisSess := xStatistic.NewSession() defer statictisSess.Close() @@ -628,6 +649,9 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, log.Info("query user error. return.") return err } + + ParaWeight := getParaWeight() + userMetrics := make(map[string]int) var indexTotal int64 indexTotal = 0 for { @@ -648,47 +672,14 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.Name = userRecord.Name dateRecord.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) dateRecord.DataDate = DataDate - if _, ok := CodeMergeCountMap[dateRecord.ID]; !ok { - dateRecord.CodeMergeCount = 0 - } else { - dateRecord.CodeMergeCount = CodeMergeCountMap[dateRecord.ID] - } - - if _, ok := CommitCountMap[dateRecord.ID]; !ok { - dateRecord.CommitCount = 0 - } else { - dateRecord.CommitCount = CommitCountMap[dateRecord.ID] - } - - if _, ok := IssueCountMap[dateRecord.ID]; !ok { - dateRecord.IssueCount = 0 - } else { - dateRecord.IssueCount = IssueCountMap[dateRecord.ID] - } - - if _, ok := CommentCountMap[dateRecord.ID]; !ok { - dateRecord.CommentCount = 0 - } else { - dateRecord.CommentCount = CommentCountMap[dateRecord.ID] - } - if _, ok := FocusRepoCountMap[dateRecord.ID]; !ok { - dateRecord.FocusRepoCount = 0 - } else { - dateRecord.FocusRepoCount = FocusRepoCountMap[dateRecord.ID] - } - - if _, ok := StarRepoCountMap[dateRecord.ID]; !ok { - dateRecord.StarRepoCount = 0 - } else { - dateRecord.StarRepoCount = StarRepoCountMap[dateRecord.ID] - } - - if _, ok := WatchedCountMap[dateRecord.ID]; !ok { - dateRecord.WatchedCount = 0 - } else { - dateRecord.WatchedCount = WatchedCountMap[dateRecord.ID] - } + dateRecord.CodeMergeCount = getMapValue(dateRecord.ID, CodeMergeCountMap) + dateRecord.CommitCount = getMapValue(dateRecord.ID, CommitCountMap) + dateRecord.IssueCount = getMapValue(dateRecord.ID, IssueCountMap) + dateRecord.CommentCount = getMapValue(dateRecord.ID, CommentCountMap) + dateRecord.FocusRepoCount = getMapValue(dateRecord.ID, FocusRepoCountMap) + dateRecord.StarRepoCount = getMapValue(dateRecord.ID, StarRepoCountMap) + dateRecord.WatchedCount = getMapValue(dateRecord.ID, WatchedCountMap) if _, ok := CommitCodeSizeMap[dateRecord.Email]; !ok { dateRecord.CommitCodeSize = 0 @@ -696,35 +687,15 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.CommitCodeSize = int(CommitCodeSizeMap[dateRecord.Email].CommitLines) } - if _, ok := CommitDatasetSizeMap[dateRecord.ID]; !ok { - dateRecord.CommitDatasetSize = 0 - } else { - dateRecord.CommitDatasetSize = CommitDatasetSizeMap[dateRecord.ID] - } - - if _, ok := SolveIssueCountMap[dateRecord.ID]; !ok { - dateRecord.SolveIssueCount = 0 - } else { - dateRecord.SolveIssueCount = SolveIssueCountMap[dateRecord.ID] - } + dateRecord.CommitDatasetSize = getMapValue(dateRecord.ID, CommitDatasetSizeMap) + dateRecord.CommitDatasetNum = getMapValue(dateRecord.ID, CommitDatasetNumMap) + dateRecord.SolveIssueCount = getMapValue(dateRecord.ID, SolveIssueCountMap) - if _, ok := wikiCountMap[dateRecord.Name]; !ok { - dateRecord.EncyclopediasCount = 0 - } else { - dateRecord.EncyclopediasCount = wikiCountMap[dateRecord.Name] - } + dateRecord.EncyclopediasCount = getMapKeyStringValue(dateRecord.Name, wikiCountMap) - if _, ok := CreateRepoCountMap[dateRecord.ID]; !ok { - dateRecord.CreateRepoCount = 0 - } else { - dateRecord.CreateRepoCount = CreateRepoCountMap[dateRecord.ID] - } + dateRecord.CreateRepoCount = getMapValue(dateRecord.ID, CreateRepoCountMap) - if _, ok := LoginCountMap[dateRecord.ID]; !ok { - dateRecord.LoginCount = 0 - } else { - dateRecord.LoginCount = LoginCountMap[dateRecord.ID] - } + dateRecord.LoginCount = getMapValue(dateRecord.ID, LoginCountMap) if _, ok := OpenIIndexMap[dateRecord.ID]; !ok { dateRecord.OpenIIndex = 0 @@ -732,8 +703,17 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.OpenIIndex = OpenIIndexMap[dateRecord.ID] } - dateRecord.CommitModelCount = 0 - + dateRecord.CloudBrainTaskNum = getMapValue(dateRecord.ID, CloudBrainTaskMap) + dateRecord.GpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuDebugJob", CloudBrainTaskItemMap) + dateRecord.NpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuDebugJob", CloudBrainTaskItemMap) + dateRecord.GpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuTrainJob", CloudBrainTaskItemMap) + dateRecord.NpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuTrainJob", CloudBrainTaskItemMap) + dateRecord.NpuInferenceJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuInferenceJob", CloudBrainTaskItemMap) + dateRecord.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap) + dateRecord.CloudBrainRunTime = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_CloudBrainRunTime", CloudBrainTaskItemMap) + dateRecord.CommitModelCount = getMapValue(dateRecord.ID, AiModelManageMap) + dateRecord.UserIndex = getUserIndex(dateRecord, ParaWeight) + setUserMetrics(userMetrics, userRecord, start_unix, end_unix, dateRecord) _, err = statictisSess.Insert(&dateRecord) if err != nil { log.Info("insert daterecord failed." + err.Error()) @@ -747,11 +727,142 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, } } - RefreshUserStaticAllTabel(wikiCountMap, CommitCodeSizeMap) + RefreshUserStaticAllTabel(wikiCountMap, userMetrics) + + //insert userMetrics table + var useMetrics UserMetrics + useMetrics.CountDate = CountDate.Unix() + statictisSess.Delete(&useMetrics) + + useMetrics.ActivateRegistUser = getMapKeyStringValue("ActivateRegistUser", userMetrics) + useMetrics.HasActivityUser = getMapKeyStringValue("HasActivityUser", userMetrics) + useMetrics.NotActivateRegistUser = getMapKeyStringValue("NotActivateRegistUser", userMetrics) + useMetrics.TotalActivateRegistUser = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) + useMetrics.TotalHasActivityUser = getMapKeyStringValue("TotalHasActivityUser", userMetrics) + statictisSess.Insert(&useMetrics) return nil } +func setUserMetrics(userMetrics map[string]int, user *User, start_time int64, end_time int64, dateRecord UserBusinessAnalysis) { + //ActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //NotActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //HasActivityUser int `xorm:"NOT NULL DEFAULT 0"` + //TotalActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //TotalHasActivityUser + regist_time := user.CreatedUnix.AsTime().Unix() + if regist_time >= start_time && regist_time <= end_time { + if user.IsActive { + userMetrics["ActivateRegistUser"] = getMapKeyStringValue("ActivateRegistUser", userMetrics) + 1 + } else { + userMetrics["NotActivateRegistUser"] = getMapKeyStringValue("NotActivateRegistUser", userMetrics) + 1 + } + } + if user.IsActive { + userMetrics["TotalActivateRegistUser"] = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) + 1 + } + + if dateRecord.UserIndex > 0 || dateRecord.LoginCount > 0 { + userMetrics["HasActivityUser"] = getMapKeyStringValue("HasActivityUser", userMetrics) + 1 + } + +} + +func getParaWeight() map[string]float64 { + result := make(map[string]float64) + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + statictisSess.Select("*").Table(new(UserAnalysisPara)) + paraList := make([]*UserAnalysisPara, 0) + statictisSess.Find(¶List) + for _, paraRecord := range paraList { + result[paraRecord.Key] = paraRecord.Value + } + return result +} + +func getUserIndexFromAnalysisAll(dateRecord UserBusinessAnalysisAll, ParaWeight map[string]float64) float64 { + var result float64 + // PR数 0.20 + // commit数 0.20 + // 提出任务数 0.20 + // 评论数 0.20 + // 关注项目数 0.10 + // 点赞项目数 0.10 + // 登录次数 0.10 + result = float64(dateRecord.CodeMergeCount) * getParaWeightValue("CodeMergeCount", ParaWeight, 0.2) + result += float64(dateRecord.CommitCount) * getParaWeightValue("CommitCount", ParaWeight, 0.2) + log.Info("1 result=" + fmt.Sprint(result)) + result += float64(dateRecord.IssueCount) * getParaWeightValue("IssueCount", ParaWeight, 0.2) + result += float64(dateRecord.CommentCount) * getParaWeightValue("CommentCount", ParaWeight, 0.2) + result += float64(dateRecord.FocusRepoCount) * getParaWeightValue("FocusRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.StarRepoCount) * getParaWeightValue("StarRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.LoginCount) * getParaWeightValue("LoginCount", ParaWeight, 0.1) + result += float64(dateRecord.WatchedCount) * getParaWeightValue("WatchedCount", ParaWeight, 0.3) + result += float64(dateRecord.CommitCodeSize) * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1) + result += float64(dateRecord.SolveIssueCount) * getParaWeightValue("SolveIssueCount", ParaWeight, 0.2) + result += float64(dateRecord.EncyclopediasCount) * getParaWeightValue("EncyclopediasCount", ParaWeight, 0.1) + result += float64(dateRecord.CreateRepoCount) * getParaWeightValue("CreateRepoCount", ParaWeight, 0.05) + result += float64(dateRecord.CloudBrainTaskNum) * getParaWeightValue("CloudBrainTaskNum", ParaWeight, 0.3) + result += float64(dateRecord.CommitModelCount) * getParaWeightValue("CommitModelCount", ParaWeight, 0.2) + result += dateRecord.OpenIIndex * getParaWeightValue("OpenIIndex", ParaWeight, 0.1) + + return result +} + +func getUserIndex(dateRecord UserBusinessAnalysis, ParaWeight map[string]float64) float64 { + var result float64 + // PR数 0.20 + // commit数 0.20 + // 提出任务数 0.20 + // 评论数 0.20 + // 关注项目数 0.10 + // 点赞项目数 0.10 + // 登录次数 0.10 + result = float64(dateRecord.CodeMergeCount) * getParaWeightValue("CodeMergeCount", ParaWeight, 0.2) + result += float64(dateRecord.CommitCount) * getParaWeightValue("CommitCount", ParaWeight, 0.2) + log.Info("2 result=" + fmt.Sprint(result)) + result += float64(dateRecord.IssueCount) * getParaWeightValue("IssueCount", ParaWeight, 0.2) + result += float64(dateRecord.CommentCount) * getParaWeightValue("CommentCount", ParaWeight, 0.2) + result += float64(dateRecord.FocusRepoCount) * getParaWeightValue("FocusRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.StarRepoCount) * getParaWeightValue("StarRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.LoginCount) * getParaWeightValue("LoginCount", ParaWeight, 0.1) + result += float64(dateRecord.WatchedCount) * getParaWeightValue("WatchedCount", ParaWeight, 0.3) + result += float64(dateRecord.CommitCodeSize) * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1) + result += float64(dateRecord.SolveIssueCount) * getParaWeightValue("SolveIssueCount", ParaWeight, 0.2) + result += float64(dateRecord.EncyclopediasCount) * getParaWeightValue("EncyclopediasCount", ParaWeight, 0.1) + result += float64(dateRecord.CreateRepoCount) * getParaWeightValue("CreateRepoCount", ParaWeight, 0.05) + result += float64(dateRecord.CloudBrainTaskNum) * getParaWeightValue("CloudBrainTaskNum", ParaWeight, 0.3) + result += float64(dateRecord.CommitModelCount) * getParaWeightValue("CommitModelCount", ParaWeight, 0.2) + result += dateRecord.OpenIIndex * getParaWeightValue("OpenIIndex", ParaWeight, 0.1) + + return result +} + +func getParaWeightValue(key string, valueMap map[string]float64, defaultValue float64) float64 { + if _, ok := valueMap[key]; !ok { + return defaultValue + } else { + return valueMap[key] + } +} + +func getMapKeyStringValue(key string, valueMap map[string]int) int { + if _, ok := valueMap[key]; !ok { + return 0 + } else { + return valueMap[key] + } +} + +func getMapValue(userId int64, valueMap map[int64]int) int { + if _, ok := valueMap[userId]; !ok { + return 0 + } else { + return valueMap[userId] + } +} + func getInt(str string) int { re, err := strconv.ParseInt(str, 10, 32) if err != nil { @@ -1052,16 +1163,17 @@ func queryFollow(start_unix int64, end_unix int64) map[int64]int { return resultMap } -func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { +func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int64]int) { sess := x.NewSession() defer sess.Close() - resultMap := make(map[int64]int) + resultSizeMap := make(map[int64]int) + resultNumMap := make(map[int64]int) cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Attachment)) if err != nil { log.Info("query attachment error. return.") - return resultMap + return resultSizeMap, resultNumMap } var indexTotal int64 indexTotal = 0 @@ -1072,10 +1184,12 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { log.Info("query Attachment size=" + fmt.Sprint(len(attachmentList))) for _, attachRecord := range attachmentList { - if _, ok := resultMap[attachRecord.UploaderID]; !ok { - resultMap[attachRecord.UploaderID] = int(attachRecord.Size / (1024 * 1024)) //MB + if _, ok := resultSizeMap[attachRecord.UploaderID]; !ok { + resultSizeMap[attachRecord.UploaderID] = int(attachRecord.Size / (1024 * 1024)) //MB + resultNumMap[attachRecord.UploaderID] = 1 } else { - resultMap[attachRecord.UploaderID] += int(attachRecord.Size / (1024 * 1024)) //MB + resultSizeMap[attachRecord.UploaderID] += int(attachRecord.Size / (1024 * 1024)) //MB + resultNumMap[attachRecord.UploaderID] += 1 } } @@ -1085,7 +1199,7 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { } } - return resultMap + return resultSizeMap, resultNumMap } func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { @@ -1212,6 +1326,133 @@ func queryLoginCount(start_unix int64, end_unix int64) map[int64]int { return resultMap } +func queryCommitCodeSize(start_unix int64, end_unix int64) map[int64]int { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + + resultMap := make(map[int64]int) + cond := "count_date>=" + fmt.Sprint(start_unix) + " and count_date<=" + fmt.Sprint(end_unix) + count, err := statictisSess.Where(cond).Count(new(UserBusinessAnalysis)) + if err != nil { + log.Info("query commit code size error. return.") + return resultMap + } + var indexTotal int64 + indexTotal = 0 + for { + statictisSess.Select("id,commit_code_size").Table("user_business_analysis").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + userBusinessAnalysisList := make([]*UserBusinessAnalysis, 0) + statictisSess.Find(&userBusinessAnalysisList) + log.Info("query user login size=" + fmt.Sprint(len(userBusinessAnalysisList))) + for _, analysisRecord := range userBusinessAnalysisList { + if _, ok := resultMap[analysisRecord.ID]; !ok { + resultMap[analysisRecord.ID] = analysisRecord.CommitCodeSize + } else { + resultMap[analysisRecord.ID] += analysisRecord.CommitCodeSize + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + log.Info("user commit code size=" + fmt.Sprint(len(resultMap))) + return resultMap +} + +func queryUserModel(start_unix int64, end_unix int64) map[int64]int { + sess := x.NewSession() + defer sess.Close() + resultMap := make(map[int64]int) + cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + count, err := sess.Where(cond).Count(new(AiModelManage)) + if err != nil { + log.Info("query AiModelManage error. return.") + return resultMap + } + var indexTotal int64 + indexTotal = 0 + for { + sess.Select("id,user_id").Table("ai_model_manage").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + aiModelList := make([]*AiModelManage, 0) + sess.Find(&aiModelList) + log.Info("query AiModelManage size=" + fmt.Sprint(len(aiModelList))) + for _, aiModelRecord := range aiModelList { + if _, ok := resultMap[aiModelRecord.UserId]; !ok { + resultMap[aiModelRecord.UserId] = 1 + } else { + resultMap[aiModelRecord.UserId] += 1 + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + return resultMap +} + +func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[string]int) { + sess := x.NewSession() + defer sess.Close() + resultMap := make(map[int64]int) + resultItemMap := make(map[string]int) + + cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + count, err := sess.Where(cond).Count(new(Cloudbrain)) + if err != nil { + log.Info("query cloudbrain error. return.") + return resultMap, resultItemMap + } + var indexTotal int64 + indexTotal = 0 + for { + sess.Select("id,job_type,user_id,duration,train_job_duration,type").Table("cloudbrain").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + cloudTaskList := make([]*Cloudbrain, 0) + sess.Find(&cloudTaskList) + log.Info("query cloudbrain size=" + fmt.Sprint(len(cloudTaskList))) + for _, cloudTaskRecord := range cloudTaskList { + if _, ok := resultMap[cloudTaskRecord.UserID]; !ok { + resultMap[cloudTaskRecord.UserID] = 1 + } else { + resultMap[cloudTaskRecord.UserID] += 1 + } + setMapKey("CloudBrainRunTime", cloudTaskRecord.UserID, int(cloudTaskRecord.Duration), resultItemMap) + if cloudTaskRecord.Type == 1 { //npu + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("NpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else if cloudTaskRecord.JobType == "INFERENCE" { + setMapKey("NpuInferenceJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("NpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } else { //type=0 gpu + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("GpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else if cloudTaskRecord.JobType == "BENCHMARK" { + setMapKey("GpuBenchMarkJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("GpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + + return resultMap, resultItemMap +} +func setMapKey(key string, userId int64, value int, resultItemMap map[string]int) { + newKey := fmt.Sprint(userId) + "_" + key + if _, ok := resultItemMap[newKey]; !ok { + resultItemMap[newKey] = value + } else { + resultItemMap[newKey] += value + } +} + func subMonth(t1, t2 time.Time) (month int) { y1 := t1.Year() y2 := t2.Year() diff --git a/models/user_business_struct.go b/models/user_business_struct.go index c435c0b07..17d9f046f 100644 --- a/models/user_business_struct.go +++ b/models/user_business_struct.go @@ -44,6 +44,18 @@ type UserBusinessAnalysisCurrentYear struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisLast30Day struct { @@ -88,6 +100,18 @@ type UserBusinessAnalysisLast30Day struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisLastMonth struct { @@ -132,6 +156,18 @@ type UserBusinessAnalysisLastMonth struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisCurrentMonth struct { @@ -176,6 +212,18 @@ type UserBusinessAnalysisCurrentMonth struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisCurrentWeek struct { @@ -220,6 +268,18 @@ type UserBusinessAnalysisCurrentWeek struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisYesterday struct { @@ -264,4 +324,30 @@ type UserBusinessAnalysisYesterday struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` +} + +type UserAnalysisPara struct { + Key string `xorm:"NOT NULL"` + Value float64 `xorm:"NOT NULL DEFAULT 0"` +} + +type UserMetrics struct { + CountDate int64 `xorm:"pk"` + ActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + NotActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + HasActivityUser int `xorm:"NOT NULL DEFAULT 0"` + TotalActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + TotalHasActivityUser int `xorm:"NOT NULL DEFAULT 0"` } diff --git a/modules/cloudbrain/cloudbrain.go b/modules/cloudbrain/cloudbrain.go index 4a89f9393..dab2941d0 100755 --- a/modules/cloudbrain/cloudbrain.go +++ b/modules/cloudbrain/cloudbrain.go @@ -1,6 +1,7 @@ package cloudbrain import ( + "code.gitea.io/gitea/modules/timeutil" "encoding/json" "errors" "strconv" @@ -194,6 +195,7 @@ func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, datasetName = attach.Name } + createTime := timeutil.TimeStampNow() jobResult, err := CreateJob(jobName, models.CreateJobParams{ JobName: jobName, RetryCount: 1, @@ -294,6 +296,8 @@ func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, BootFile: bootFile, DatasetName: datasetName, Parameters: params, + CreatedUnix: createTime, + UpdatedUnix: createTime, }) if err != nil { @@ -341,6 +345,7 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e return errors.New("no such resourceSpec") } + createTime := timeutil.TimeStampNow() jobResult, err := CreateJob(jobName, models.CreateJobParams{ JobName: jobName, RetryCount: 1, @@ -432,6 +437,8 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e GpuQueue: task.GpuQueue, ResourceSpecId: task.ResourceSpecId, ComputeResource: task.ComputeResource, + CreatedUnix: createTime, + UpdatedUnix: createTime, } err = models.RestartCloudbrain(task, newTask) diff --git a/modules/cron/tasks_basic.go b/modules/cron/tasks_basic.go index b9838e66f..b3a6c02a1 100755 --- a/modules/cron/tasks_basic.go +++ b/modules/cron/tasks_basic.go @@ -185,6 +185,17 @@ func registerHandleSummaryStatistic() { }) } +func registerHandleOrgStatistic() { + RegisterTaskFatal("handle_org_statistic", &BaseConfig{ + Enabled: true, + RunAtStart: false, + Schedule: "0 0 2 * * ?", + }, func(ctx context.Context, _ *models.User, _ Config) error { + models.UpdateOrgStatistics() + return nil + }) +} + func registerSyncCloudbrainStatus() { RegisterTaskFatal("sync_cloudbrain_status", &BaseConfig{ Enabled: true, @@ -215,4 +226,5 @@ func initBasicTasks() { registerHandleSummaryStatistic() registerSyncCloudbrainStatus() + registerHandleOrgStatistic() } diff --git a/modules/labelmsg/redismsgsender.go b/modules/labelmsg/redismsgsender.go index 8b2eae772..c06407588 100644 --- a/modules/labelmsg/redismsgsender.go +++ b/modules/labelmsg/redismsgsender.go @@ -50,6 +50,7 @@ func SendDecompressAttachToLabelOBS(attach string) error { _, err := redisclient.Do("Publish", setting.DecompressOBSTaskName, attach) if err != nil { log.Critical("redis Publish failed.") + return err } log.Info("LabelDecompressOBSQueue(%s) success", attach) diff --git a/modules/modelarts/modelarts.go b/modules/modelarts/modelarts.go index 538fcfbd9..78b40fd56 100755 --- a/modules/modelarts/modelarts.go +++ b/modules/modelarts/modelarts.go @@ -1,6 +1,7 @@ package modelarts import ( + "code.gitea.io/gitea/modules/timeutil" "encoding/json" "errors" "fmt" @@ -197,6 +198,7 @@ func GenerateTask(ctx *context.Context, jobName, uuid, description, flavor strin if poolInfos == nil { json.Unmarshal([]byte(setting.PoolInfos), &poolInfos) } + createTime := timeutil.TimeStampNow() jobResult, err := CreateJob(models.CreateNotebookParams{ JobName: jobName, Description: description, @@ -235,6 +237,8 @@ func GenerateTask(ctx *context.Context, jobName, uuid, description, flavor strin Type: models.TypeCloudBrainTwo, Uuid: uuid, ComputeResource: models.NPUResource, + CreatedUnix: createTime, + UpdatedUnix: createTime, }) if err != nil { @@ -254,7 +258,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc log.Error("GetNotebookImageName failed: %v", err.Error()) return err } - + createTime := timeutil.TimeStampNow() jobResult, err := createNotebook2(models.CreateNotebook2Params{ JobName: jobName, Description: description, @@ -280,6 +284,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc RepoID: ctx.Repo.Repository.ID, JobID: jobResult.ID, JobName: jobName, + FlavorCode: flavor, DisplayJobName: displayJobName, JobType: string(models.JobTypeDebug), Type: models.TypeCloudBrainTwo, @@ -287,6 +292,8 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc ComputeResource: models.NPUResource, Image: imageName, Description: description, + CreatedUnix: createTime, + UpdatedUnix: createTime, }) if err != nil { @@ -303,6 +310,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc } func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error) { + createTime := timeutil.TimeStampNow() jobResult, err := createTrainJob(models.CreateTrainJobParams{ JobName: req.JobName, Description: req.Description, @@ -363,6 +371,8 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error EngineName: req.EngineName, VersionCount: req.VersionCount, TotalVersionCount: req.TotalVersionCount, + CreatedUnix: createTime, + UpdatedUnix: createTime, }) if err != nil { @@ -374,6 +384,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error } func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, jobId string) (err error) { + createTime := timeutil.TimeStampNow() jobResult, err := createTrainJobVersion(models.CreateTrainJobVersionParams{ Description: req.Description, Config: models.TrainJobVersionConfig{ @@ -450,6 +461,8 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job EngineName: req.EngineName, TotalVersionCount: VersionTaskList[0].TotalVersionCount + 1, VersionCount: VersionListCount + 1, + CreatedUnix: createTime, + UpdatedUnix: createTime, }) if err != nil { log.Error("CreateCloudbrain(%s) failed:%v", req.JobName, err.Error()) @@ -525,6 +538,7 @@ func GetOutputPathByCount(TotalVersionCount int) (VersionOutputPath string) { } func GenerateInferenceJob(ctx *context.Context, req *GenerateInferenceJobReq) (err error) { + createTime := timeutil.TimeStampNow() jobResult, err := createInferenceJob(models.CreateInferenceJobParams{ JobName: req.JobName, Description: req.Description, @@ -590,6 +604,8 @@ func GenerateInferenceJob(ctx *context.Context, req *GenerateInferenceJobReq) (e ModelVersion: req.ModelVersion, CkptName: req.CkptName, ResultUrl: req.ResultUrl, + CreatedUnix: createTime, + UpdatedUnix: createTime, }) if err != nil { diff --git a/modules/setting/repository.go b/modules/setting/repository.go index 8af3eaaf4..dceb48f16 100644 --- a/modules/setting/repository.go +++ b/modules/setting/repository.go @@ -40,6 +40,7 @@ var ( DisabledRepoUnits []string DefaultRepoUnits []string PrefixArchiveFiles bool + RepoMaxSize int64 // Repository editor settings Editor struct { @@ -54,6 +55,7 @@ var ( AllowedTypes []string `delim:"|"` FileMaxSize int64 MaxFiles int + TotalMaxSize int64 } `ini:"-"` // Repository local settings @@ -104,6 +106,7 @@ var ( DisabledRepoUnits: []string{}, DefaultRepoUnits: []string{}, PrefixArchiveFiles: true, + RepoMaxSize: 1024, // Repository editor settings Editor: struct { @@ -121,12 +124,14 @@ var ( AllowedTypes []string `delim:"|"` FileMaxSize int64 MaxFiles int + TotalMaxSize int64 }{ Enabled: true, TempPath: "data/tmp/uploads", AllowedTypes: []string{}, - FileMaxSize: 3, - MaxFiles: 5, + FileMaxSize: 30, + MaxFiles: 10, + TotalMaxSize: 1024, }, // Repository local settings diff --git a/modules/setting/setting.go b/modules/setting/setting.go index 26f068193..eee539d0c 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -438,6 +438,7 @@ var ( //home page RecommentRepoAddr string ESSearchURL string + INDEXPOSTFIX string //notice config UserNameOfNoticeRepo string RepoNameOfNoticeRepo string @@ -1268,6 +1269,7 @@ func NewContext() { sec = Cfg.Section("homepage") RecommentRepoAddr = sec.Key("Address").MustString("https://git.openi.org.cn/OpenIOSSG/promote/raw/branch/master/") ESSearchURL = sec.Key("ESSearchURL").MustString("http://192.168.207.94:9200") + INDEXPOSTFIX = sec.Key("INDEXPOSTFIX").MustString("") sec = Cfg.Section("notice") UserNameOfNoticeRepo = sec.Key("USER_NAME").MustString("OpenIOSSG") diff --git a/modules/storage/obs.go b/modules/storage/obs.go index 8e6b4201b..08a354359 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -30,6 +30,8 @@ type FileInfo struct { } type FileInfoList []FileInfo +const MAX_LIST_PARTS = 1000 + func (ulist FileInfoList) Swap(i, j int) { ulist[i], ulist[j] = ulist[j], ulist[i] } func (ulist FileInfoList) Len() int { return len(ulist) } func (ulist FileInfoList) Less(i, j int) bool { @@ -97,29 +99,48 @@ func CompleteObsMultiPartUpload(uuid, uploadID, fileName string) error { input.Bucket = setting.Bucket input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") input.UploadId = uploadID - output, err := ObsCli.ListParts(&obs.ListPartsInput{ - Bucket: setting.Bucket, - Key: input.Key, - UploadId: uploadID, - }) - if err != nil { - log.Error("ListParts failed:", err.Error()) - return err - } - for _, partInfo := range output.Parts { - input.Parts = append(input.Parts, obs.Part{ - PartNumber: partInfo.PartNumber, - ETag: partInfo.ETag, + partNumberMarker := 0 + for { + output, err := ObsCli.ListParts(&obs.ListPartsInput{ + Bucket: setting.Bucket, + Key: input.Key, + UploadId: uploadID, + MaxParts: MAX_LIST_PARTS, + PartNumberMarker: partNumberMarker, }) + if err != nil { + log.Error("ListParts failed:", err.Error()) + return err + } + + partNumberMarker = output.NextPartNumberMarker + log.Info("uuid:%s, MaxParts:%d, PartNumberMarker:%d, NextPartNumberMarker:%d, len:%d", uuid, output.MaxParts, output.PartNumberMarker, output.NextPartNumberMarker, len(output.Parts)) + + for _, partInfo := range output.Parts { + input.Parts = append(input.Parts, obs.Part{ + PartNumber: partInfo.PartNumber, + ETag: partInfo.ETag, + }) + } + + if len(output.Parts) < output.MaxParts { + break + } else { + continue + } + + break } - _, err = ObsCli.CompleteMultipartUpload(input) + output, err := ObsCli.CompleteMultipartUpload(input) if err != nil { log.Error("CompleteMultipartUpload failed:", err.Error()) return err } + log.Info("uuid:%s, RequestId:%s", uuid, output.RequestId) + return nil } @@ -480,7 +501,7 @@ func GetObsCreateSignedUrlByBucketAndKey(bucket, key string) (string, error) { filename = key[comma+1:] } reqParams := make(map[string]string) - filename = url.QueryEscape(filename) + filename = url.PathEscape(filename) reqParams["response-content-disposition"] = "attachment; filename=\"" + filename + "\"" input.QueryParams = reqParams output, err := ObsCli.CreateSignedUrl(input) @@ -503,6 +524,7 @@ func ObsGetPreSignedUrl(uuid, fileName string) (string, error) { input.Bucket = setting.Bucket input.Expires = 60 * 60 + fileName = url.PathEscape(fileName) reqParams := make(map[string]string) reqParams["response-content-disposition"] = "attachment; filename=\"" + fileName + "\"" input.QueryParams = reqParams diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index f06c32f2f..7c0f7609b 100755 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -502,6 +502,11 @@ static.encyclopediascount=Encyclopedias Count static.createrepocount=Create Repo Count static.openiindex=OpenI Index static.registdate=Regist Date +static.CloudBrainTaskNum=CloudBrain Task Count +static.CloudBrainRunTime=CloudBrain Run Time +static.CommitDatasetNum=Commit Dataset Count +static.CommitModelCount=Commit Model Count +static.UserIndex=User Index static.countdate=Count Date static.all=All static.public.user_business_analysis_current_month=Current_Month @@ -920,7 +925,13 @@ language_other = Other datasets = Datasets datasets.desc = Enable Dataset cloudbrain_helper=Use GPU/NPU resources to open notebooks, model training tasks, etc. - +cloudbrain.exitinfo=Exit Information +cloudbrain.platform=Platform +cloudbrain.endtime=End Time +cloudbrain.runinfo=Task Runtime Information +cloudbrain.time.starttime=Start run time +cloudbrain.time.endtime=End run time +cloudbrain.datasetdownload=Dataset download url model_manager = Model model_noright=No right model_rename=Duplicate model name, please modify model name. @@ -1258,6 +1269,10 @@ editor.cannot_commit_to_protected_branch = Cannot commit to protected branch '%s editor.no_commit_to_branch = Unable to commit directly to branch because: editor.user_no_push_to_branch = User cannot push to branch editor.require_signed_commit = Branch requires a signed commit +editor.repo_too_large = Repository can not exceed %d MB +editor.repo_file_invalid = Upload files are invalid +editor.upload_file_too_much = Can not upload more than %d files at a time + commits.desc = Browse source code change history. commits.commits = Commits @@ -2200,6 +2215,16 @@ customize = Customize selected_project=Selected Projects fold = Fold unfold = Unfold +org_member = Member +org_members = Members +org_team = Team +org_teams = Teams +org_repository = Repository +org_repositories = Repositories + +star = Star Top10 +member = Members Top10 +active = Active Top10 form.name_reserved = The organization name '%s' is reserved. form.name_pattern_not_allowed = The pattern '%s' is not allowed in an organization name. @@ -2855,6 +2880,8 @@ uploading = Uploading upload_complete = Uploading complete failed = Upload Failed enable_minio_support = Enable minio support to use the dataset service +max_file_tooltips= Upload a maximum of ? files at a time, each file does not exceed ? MB. +max_size_tooltips= You can only upload a maximum of ? files at a time. The upload limit has been reached, please do not add more files. [notification] notifications = Notifications diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index b2696c570..e2d69570e 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -507,6 +507,11 @@ static.encyclopediascount=百科页面贡献次数 static.createrepocount=创建项目数 static.openiindex=OpenI指数 static.registdate=用户注册时间 +static.CloudBrainTaskNum=云脑任务数 +static.CloudBrainRunTime=云脑运行时间(小时) +static.CommitDatasetNum=上传(提交)数据集文件数 +static.CommitModelCount=提交模型数 +static.UserIndex=用户指数 static.countdate=系统统计时间 static.all=所有 static.public.user_business_analysis_current_month=本月 @@ -969,7 +974,13 @@ cloudbrain_jobname_err=只能以小写字母或数字开头且只包含小写字 cloudbrain_query_fail=查询云脑任务失败。 cloudbrain.mirror_tag = 镜像标签 cloudbrain.mirror_description = 镜像描述 - +cloudbrain.exitinfo=退出信息 +cloudbrain.platform=平台 +cloudbrain.endtime=结束时间 +cloudbrain.runinfo=任务运行简况 +cloudbrain.time.starttime=开始运行时间 +cloudbrain.time.endtime=结束运行时间 +cloudbrain.datasetdownload=数据集下载地址 record_begintime_get_err=无法获取统计开始时间。 parameter_is_wrong=输入参数错误,请检查输入参数。 total_count_get_error=查询总页数失败。 @@ -1015,7 +1026,9 @@ modelarts.train_job.basic_info=基本信息 modelarts.train_job.job_status=任务状态 modelarts.train_job.job_name=任务名称 modelarts.train_job.version=任务版本 -modelarts.train_job.start_time=开始时间 +modelarts.train_job.start_time=开始运行时间 +modelarts.train_job.end_time=运行结束时间 +modelarts.train_job.wait_time=等待时间 modelarts.train_job.dura_time=运行时长 modelarts.train_job.description=任务描述 modelarts.train_job.parameter_setting=参数设置 @@ -1267,6 +1280,9 @@ editor.cannot_commit_to_protected_branch=不可以提交到受保护的分支 '% editor.no_commit_to_branch=无法直接提交分支,因为: editor.user_no_push_to_branch=用户不能推送到分支 editor.require_signed_commit=分支需要签名提交 +editor.repo_too_large = 代码仓总大小不能超过%dMB +editor.repo_file_invalid = 提交的文件非法 +editor.upload_file_too_much = 不能同时提交超过%d个文件 commits.desc=浏览代码修改历史 commits.commits=次代码提交 @@ -2207,6 +2223,16 @@ customize = 自定义 selected_project=精选项目 fold = 收起 unfold = 展开 +org_member = 成员 +org_members = 成员 +org_team = 团队 +org_teams = 团队 +org_repository = 项目 +org_repositories = 项目 + +star = 点赞榜 +member = 成员榜 +active = 活跃榜 form.name_reserved=组织名称 '%s' 是被保留的。 form.name_pattern_not_allowed=组织名称中不允许使用 "%s"。 @@ -2863,6 +2889,8 @@ uploading=正在上传 upload_complete=上传完成 failed=上传失败 enable_minio_support=启用minio支持以使用数据集服务 +max_file_tooltips=单次最多上传?个文件,每个文件不超过? MB。 +max_size_tooltips=一次最多只能上传?个文件, 上传已达到上限,请勿再添加文件。 [notification] notifications=通知 diff --git a/public/home/search.js b/public/home/search.js index 70b5d4ef9..e23d27549 100644 --- a/public/home/search.js +++ b/public/home/search.js @@ -108,8 +108,9 @@ function searchItem(type,sortType){ currentSearchSortBy = sortBy[sortType]; currentSearchAscending = sortAscending[sortType]; OnlySearchLabel =false; - page(currentPage); + }else{ + emptySearch(); } } @@ -121,49 +122,31 @@ function search(){ if(!isEmpty(currentSearchKeyword)){ currentSearchKeyword = currentSearchKeyword.trim(); } - $('#searchForm').addClass("hiddenSearch"); - initPageInfo(); if(!isEmpty(currentSearchKeyword)){ - document.getElementById("find_id").innerHTML=getLabel(isZh,"search_finded"); - currentSearchSortBy = sortBy[10]; - currentSearchAscending = "false"; - OnlySearchLabel =false; - page(currentPage); - if(currentSearchTableName != "repository"){ - doSearch("repository",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "issue"){ - doSearch("issue",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "user"){ - doSearch("user",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "org"){ - doSearch("org",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "dataset"){ - doSearch("dataset",currentSearchKeyword,1,pageSize,true,"",false); - } - if(currentSearchTableName != "pr"){ - doSearch("pr",currentSearchKeyword,1,pageSize,true,"",false); - } + doSpcifySearch(currentSearchTableName,currentSearchKeyword,sortBy[10],"false"); }else{ - initDiv(false); - document.getElementById("find_id").innerHTML=getLabel(isZh,"search_empty"); - $('#find_title').html(""); - document.getElementById("sort_type").innerHTML=""; - document.getElementById("child_search_item").innerHTML=""; - document.getElementById("page_menu").innerHTML=""; - $('#repo_total').text(""); - $('#pr_total').text(""); - $('#issue_total').text(""); - $('#dataset_total').text(""); - $('#user_total').text(""); - $('#org_total').text(""); - setActivate(null); + emptySearch(); } } +function emptySearch(){ + initDiv(false); + initPageInfo(); + $('#searchForm').addClass("hiddenSearch"); + document.getElementById("find_id").innerHTML=getLabel(isZh,"search_empty"); + $('#find_title').html(""); + document.getElementById("sort_type").innerHTML=""; + document.getElementById("child_search_item").innerHTML=""; + document.getElementById("page_menu").innerHTML=""; + $('#repo_total').text(""); + $('#pr_total').text(""); + $('#issue_total').text(""); + $('#dataset_total').text(""); + $('#user_total').text(""); + $('#org_total').text(""); + setActivate(null); +} + function initDiv(isSearchLabel=false){ if(isSearchLabel){ document.getElementById("search_div").style.display="none"; @@ -174,7 +157,6 @@ function initDiv(isSearchLabel=false){ document.getElementById("user_item").style.display="none"; document.getElementById("org_item").style.display="none"; document.getElementById("find_id").innerHTML=""; - }else{ document.getElementById("search_div").style.display="block"; document.getElementById("search_label_div").style.display="none"; @@ -187,6 +169,39 @@ function initDiv(isSearchLabel=false){ } } +function doSpcifySearch(tableName,keyword,sortBy="",ascending="false"){ + initDiv(false); + $('#searchForm').addClass("hiddenSearch"); + document.getElementById("find_id").innerHTML=getLabel(isZh,"search_finded"); + currentSearchKeyword = keyword; + initPageInfo(); + currentSearchTableName = tableName; + currentSearchSortBy = sortBy; + currentSearchAscending = ascending; + OnlySearchLabel =false; + + page(currentPage); + + if(currentSearchTableName != "repository"){ + doSearch("repository",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "issue"){ + doSearch("issue",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "user"){ + doSearch("user",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "org"){ + doSearch("org",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "dataset"){ + doSearch("dataset",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "pr"){ + doSearch("pr",currentSearchKeyword,1,pageSize,true,"",false); + } +} + function doSearchLabel(tableName,keyword,sortBy="",ascending="false"){ initDiv(true); //document.getElementById("search_div").style.display="none"; @@ -1272,8 +1287,17 @@ var zhCN={ sessionStorage.removeItem("searchLabel"); doSearchLabel(sessionStorage.getItem("tableName"),sessionStorage.getItem("keyword"),sessionStorage.getItem("sortBy"),sessionStorage.getItem("ascending")); }else{ - console.log("normal search...."); - search(); + var specifySearch = sessionStorage.getItem("specifySearch"); + if(specifySearch){ + sessionStorage.removeItem("specifySearch"); + console.log("search sepcial keyword=...." + sessionStorage.getItem("keyword")); + document.getElementById("keyword_input").value = sessionStorage.getItem("keyword"); + doSpcifySearch(sessionStorage.getItem("tableName"),sessionStorage.getItem("keyword"),sessionStorage.getItem("sortBy"),sessionStorage.getItem("ascending")); + }else{ + console.log("normal search...."); + search(); + } + } } } diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 1868edcb5..6e5d1d370 100755 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -547,6 +547,8 @@ func RegisterRoutes(m *macaron.Macaron) { }) }, operationReq) + m.Get("/query_user_metrics", operationReq, repo_ext.QueryMetrics) + m.Get("/query_user_rank_list", operationReq, repo_ext.QueryRankingList) m.Get("/query_user_static_page", operationReq, repo_ext.QueryUserStaticDataPage) m.Get("/query_user_current_month", operationReq, repo_ext.QueryUserStaticCurrentMonth) m.Get("/query_user_current_week", operationReq, repo_ext.QueryUserStaticCurrentWeek) @@ -555,6 +557,10 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/query_user_last_month", operationReq, repo_ext.QueryUserStaticLastMonth) m.Get("/query_user_yesterday", operationReq, repo_ext.QueryUserStaticYesterday) m.Get("/query_user_all", operationReq, repo_ext.QueryUserStaticAll) + //cloudbrain board + m.Group("/cloudbrainboard", func() { + m.Get("/downloadAll", repo.DownloadCloudBrainBoard) + }, operationReq) // Users m.Group("/users", func() { m.Get("/search", user.Search) diff --git a/routers/api/v1/repo/cloudbrain_dashboard.go b/routers/api/v1/repo/cloudbrain_dashboard.go new file mode 100644 index 000000000..b979729a8 --- /dev/null +++ b/routers/api/v1/repo/cloudbrain_dashboard.go @@ -0,0 +1,135 @@ +package repo + +import ( + "net/http" + "net/url" + "time" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "github.com/360EntSecGroup-Skylar/excelize/v2" +) + +func DownloadCloudBrainBoard(ctx *context.Context) { + + page := 1 + + pageSize := 300 + + var cloudBrain = ctx.Tr("repo.cloudbrain") + fileName := getCloudbrainFileName(cloudBrain) + + _, total, err := models.CloudbrainAll(&models.CloudbrainsOptions{ + ListOptions: models.ListOptions{ + Page: page, + PageSize: 1, + }, + Type: models.TypeCloudBrainAll, + NeedRepoInfo: false, + }) + + if err != nil { + log.Warn("Can not get cloud brain info", err) + ctx.Error(http.StatusBadRequest, ctx.Tr("repo.cloudbrain_query_fail")) + return + } + + totalPage := getTotalPage(total, pageSize) + + f := excelize.NewFile() + + index := f.NewSheet(cloudBrain) + f.DeleteSheet("Sheet1") + + for k, v := range allCloudbrainHeader(ctx) { + f.SetCellValue(cloudBrain, k, v) + } + + var row = 2 + for i := 0; i < totalPage; i++ { + + pageRecords, _, err := models.CloudbrainAll(&models.CloudbrainsOptions{ + ListOptions: models.ListOptions{ + Page: page, + PageSize: pageSize, + }, + Type: models.TypeCloudBrainAll, + NeedRepoInfo: true, + }) + if err != nil { + log.Warn("Can not get cloud brain info", err) + continue + } + for _, record := range pageRecords { + + for k, v := range allCloudbrainValues(row, record, ctx) { + f.SetCellValue(cloudBrain, k, v) + } + row++ + + } + + page++ + } + f.SetActiveSheet(index) + + ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+url.QueryEscape(fileName)) + ctx.Resp.Header().Set("Content-Type", "application/octet-stream") + + f.WriteTo(ctx.Resp) +} +func getCloudbrainFileName(baseName string) string { + return baseName + "_" + time.Now().Format(EXCEL_DATE_FORMAT) + ".xlsx" + +} +func allCloudbrainHeader(ctx *context.Context) map[string]string { + + return map[string]string{"A1": ctx.Tr("repo.cloudbrain_task"), "B1": ctx.Tr("repo.cloudbrain_task_type"), "C1": ctx.Tr("repo.modelarts.status"), + "D1": ctx.Tr("repo.modelarts.createtime"), "E1": ctx.Tr("repo.modelarts.train_job.wait_time"), "F1": ctx.Tr("repo.modelarts.train_job.dura_time"), + "G1": ctx.Tr("repo.modelarts.train_job.start_time"), + "H1": ctx.Tr("repo.modelarts.train_job.end_time"), "I1": ctx.Tr("repo.modelarts.computing_resources"), + "J1": ctx.Tr("repo.cloudbrain_creator"), "K1": ctx.Tr("repo.repo_name"), "L1": ctx.Tr("repo.cloudbrain_task_name")} + +} +func allCloudbrainValues(row int, rs *models.CloudbrainInfo, ctx *context.Context) map[string]string { + return map[string]string{getCellName("A", row): rs.DisplayJobName, getCellName("B", row): rs.JobType, getCellName("C", row): rs.Status, + getCellName("D", row): time.Unix(int64(rs.Cloudbrain.CreatedUnix), 0).Format(CREATE_TIME_FORMAT), getCellName("E", row): getBrainWaitTime(rs), + getCellName("F", row): rs.TrainJobDuration, getCellName("G", row): getBrainStartTime(rs), + getCellName("H", row): getBrainEndTime(rs), + getCellName("I", row): rs.ComputeResource, getCellName("J", row): rs.Name, getCellName("K", row): getBrainRepo(rs), + getCellName("L", row): rs.JobName, + } +} +func getBrainRepo(rs *models.CloudbrainInfo) string { + if rs.Repo != nil { + return rs.Repo.OwnerName + "/" + rs.Repo.Alias + } + return "" +} +func getBrainStartTime(rs *models.CloudbrainInfo) string { + timeString := time.Unix(int64(rs.Cloudbrain.StartTime), 0).Format(CREATE_TIME_FORMAT) + if timeString != "1970/01/01 08:00:00" { + return timeString + } else { + return "0" + } + +} +func getBrainEndTime(rs *models.CloudbrainInfo) string { + timeString := time.Unix(int64(rs.Cloudbrain.EndTime), 0).Format(CREATE_TIME_FORMAT) + if timeString != "1970/01/01 08:00:00" { + return timeString + } else { + return "0" + } + +} +func getBrainWaitTime(rs *models.CloudbrainInfo) string { + waitTime := rs.Cloudbrain.StartTime - rs.Cloudbrain.CreatedUnix + if waitTime <= 0 { + return "0" + } else { + return models.ConvertDurationToStr(int64(waitTime)) + } +} diff --git a/routers/api/v1/repo/modelarts.go b/routers/api/v1/repo/modelarts.go index e24ac95fb..9e4edea03 100755 --- a/routers/api/v1/repo/modelarts.go +++ b/routers/api/v1/repo/modelarts.go @@ -74,6 +74,7 @@ func GetModelArtsNotebook2(ctx *context.APIContext) { if job.EndTime == 0 && models.IsModelArtsDebugJobTerminal(job.Status) { job.EndTime = timeutil.TimeStampNow() } + job.CorrectCreateUnix() job.ComputeAndSetDuration() err = models.UpdateJob(job) if err != nil { @@ -160,6 +161,7 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { } if result.JobStatus.State != string(models.JobWaiting) { + models.ParseAndSetDurationFromCloudBrainOne(result, job) err = models.UpdateJob(job) if err != nil { log.Error("UpdateJob failed:", err) @@ -177,14 +179,12 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { } job.Status = modelarts.TransTrainJobStatus(result.IntStatus) job.Duration = result.Duration / 1000 - job.TrainJobDuration = result.TrainJobDuration - job.TrainJobDuration = models.ConvertDurationToStr(job.Duration) if job.EndTime == 0 && models.IsTrainJobTerminal(job.Status) && job.StartTime > 0 { job.EndTime = job.StartTime.Add(job.Duration) } - + job.CorrectCreateUnix() err = models.UpdateTrainJobVersion(job) if err != nil { log.Error("UpdateJob failed:", err) @@ -417,7 +417,7 @@ func GetModelArtsInferenceJob(ctx *context.APIContext) { if job.EndTime == 0 && models.IsTrainJobTerminal(job.Status) && job.StartTime > 0 { job.EndTime = job.StartTime.Add(job.Duration) } - + job.CorrectCreateUnix() err = models.UpdateInferenceJob(job) if err != nil { log.Error("UpdateJob failed:", err) diff --git a/routers/home.go b/routers/home.go index c33d7a049..324bb1032 100755 --- a/routers/home.go +++ b/routers/home.go @@ -49,7 +49,7 @@ func Home(ctx *context.Context) { ctx.HTML(200, tplHome) } -func setRecommendURL(ctx *context.Context) { +func setRecommendURLOnly(ctx *context.Context) { addr := setting.RecommentRepoAddr[10:] start := strings.Index(addr, "/") end := strings.Index(addr, "raw") @@ -58,7 +58,10 @@ func setRecommendURL(ctx *context.Context) { } else { ctx.Data["RecommendURL"] = setting.RecommentRepoAddr } +} +func setRecommendURL(ctx *context.Context) { + setRecommendURLOnly(ctx) ctx.Data["page_title"] = ctx.Tr("home.page_title") ctx.Data["page_small_title"] = ctx.Tr("home.page_small_title") ctx.Data["page_description"] = ctx.Tr("home.page_description") @@ -441,17 +444,39 @@ func ExploreOrganizations(ctx *context.Context) { ctx.Data["PageIsExploreOrganizations"] = true ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled - visibleTypes := []structs.VisibleType{structs.VisibleTypePublic} - if ctx.User != nil { - visibleTypes = append(visibleTypes, structs.VisibleTypeLimited, structs.VisibleTypePrivate) + N := 10 + starInfo, err := models.FindTopNStarsOrgs(N) + if err != nil { + log.Error("GetStarOrgInfos failed:%v", err.Error(), ctx.Data["MsgID"]) + ctx.ServerError("GetStarOrgInfos", err) + return + } + memberInfo, err := models.FindTopNMembersOrgs(N) + if err != nil { + log.Error("GetMemberOrgInfos failed:%v", err.Error(), ctx.Data["MsgID"]) + ctx.ServerError("GetMemberOrgInfos", err) + return + } + openIInfo, err := models.FindTopNOpenIOrgs(N) + if err != nil { + log.Error("GetOpenIOrgInfos failed:%v", err.Error(), ctx.Data["MsgID"]) + ctx.ServerError("GetOpenIOrgInfos", err) + return } - RenderUserSearch(ctx, &models.SearchUserOptions{ - Actor: ctx.User, - Type: models.UserTypeOrganization, - ListOptions: models.ListOptions{PageSize: setting.UI.ExplorePagingNum}, - Visible: visibleTypes, - }, tplExploreOrganizations) + recommendOrgs, err := GetRecommendOrg() + if err != nil { + log.Error("GetRecommendOrgInfos failed:%v", err.Error(), ctx.Data["MsgID"]) + ctx.ServerError("GetRecommendOrgInfos", err) + return + } + setRecommendURLOnly(ctx) + ctx.Data["RecommendOrgs"] = recommendOrgs + ctx.Data["StarOrgs"] = starInfo + ctx.Data["MemberOrgs"] = memberInfo + ctx.Data["ActiveOrgs"] = openIInfo + + ctx.HTML(http.StatusOK, tplExploreOrganizations) } // ExploreCode render explore code page @@ -583,12 +608,12 @@ func NotFound(ctx *context.Context) { ctx.NotFound("home.NotFound", nil) } -func RecommendOrgFromPromote(ctx *context.Context) { +func GetRecommendOrg() ([]map[string]interface{}, error) { url := setting.RecommentRepoAddr + "organizations" result, err := repository.RecommendFromPromote(url) + if err != nil { - ctx.ServerError("500", err) - return + return nil, err } resultOrg := make([]map[string]interface{}, 0) for _, userName := range result { @@ -598,6 +623,7 @@ func RecommendOrgFromPromote(ctx *context.Context) { userMap["Name"] = user.Name userMap["Description"] = user.Description userMap["FullName"] = user.FullName + userMap["HomeLink"] = user.HomeLink() userMap["ID"] = user.ID userMap["Avatar"] = user.RelAvatarLink() userMap["NumRepos"] = user.NumRepos @@ -608,7 +634,15 @@ func RecommendOrgFromPromote(ctx *context.Context) { log.Info("query user error," + err.Error()) } } + return resultOrg, nil +} +func RecommendOrgFromPromote(ctx *context.Context) { + resultOrg, err := GetRecommendOrg() + if err != nil { + ctx.ServerError("500", err) + return + } ctx.JSON(200, resultOrg) } diff --git a/routers/private/internal.go b/routers/private/internal.go index d80a706cc..ace25c809 100755 --- a/routers/private/internal.go +++ b/routers/private/internal.go @@ -45,6 +45,8 @@ func RegisterRoutes(m *macaron.Macaron) { m.Post("/manager/flush-queues", bind(private.FlushOptions{}), FlushQueues) m.Post("/tool/update_all_repo_commit_cnt", UpdateAllRepoCommitCnt) m.Post("/tool/repo_stat/:date", RepoStatisticManually) + + m.Get("/tool/org_stat", OrgStatisticManually) m.Post("/tool/update_repo_visit/:date", UpdateRepoVisit) m.Post("/task/history_handle/duration", repo.HandleTaskWithNoDuration) diff --git a/routers/private/tool.go b/routers/private/tool.go index d01c5b2ab..122a41afe 100755 --- a/routers/private/tool.go +++ b/routers/private/tool.go @@ -45,6 +45,10 @@ func RepoStatisticManually(ctx *macaron.Context) { repo.TimingCountDataByDate(date) } +func OrgStatisticManually() { + models.UpdateOrgStatistics() +} + func UpdateRepoVisit(ctx *macaron.Context) { date := ctx.Params("date") log.Info("date(%s)", date) diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 96f17b74b..3c66a3537 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -78,7 +78,7 @@ func UploadAttachmentUI(ctx *context.Context) { } func EditAttachmentUI(ctx *context.Context) { - + id, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64) ctx.Data["PageIsDataset"] = true attachment, _ := models.GetAttachmentByID(id) @@ -986,23 +986,29 @@ func HandleUnDecompressAttachment() { if attach.Type == models.TypeCloudBrainOne { err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name) if err != nil { - log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error()) + log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error()) } else { - attach.DecompressState = models.DecompressStateIng - err = models.UpdateAttachment(attach) - if err != nil { - log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error()) - } + updateAttachmentDecompressStateIng(attach) } } else if attach.Type == models.TypeCloudBrainTwo { attachjson, _ := json.Marshal(attach) - labelmsg.SendDecompressAttachToLabelOBS(string(attachjson)) + err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson)) + if err != nil { + log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attach.UUID, err.Error()) + } else { + updateAttachmentDecompressStateIng(attach) + } } - } - return } +func updateAttachmentDecompressStateIng(attach *models.Attachment) { + attach.DecompressState = models.DecompressStateIng + err := models.UpdateAttachment(attach) + if err != nil { + log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error()) + } +} func QueryAllPublicDataset(ctx *context.Context) { attachs, err := models.GetAllPublicAttachments() diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index 898f3844f..5752466d8 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -2,11 +2,9 @@ package repo import ( "bufio" - "code.gitea.io/gitea/modules/timeutil" "encoding/json" "errors" "fmt" - "github.com/unknwon/i18n" "io" "net/http" "os" @@ -16,6 +14,9 @@ import ( "strings" "time" + "code.gitea.io/gitea/modules/timeutil" + "github.com/unknwon/i18n" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" @@ -345,6 +346,24 @@ func CloudBrainRestart(ctx *context.Context) { break } + var hasSameResource bool + if gpuInfos == nil { + json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) + } + for _, resourceType := range gpuInfos.GpuInfo { + if resourceType.Queue == task.GpuQueue { + hasSameResource = true + continue + } + } + + if !hasSameResource { + log.Error("has no same resource, can not restart", ctx.Data["MsgID"]) + resultCode = "-1" + errorMsg = "the job's version is too old and can not be restarted" + break + } + count, err := models.GetCloudbrainCountByUserID(ctx.User.ID, string(models.JobTypeDebug)) if err != nil { log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) @@ -400,18 +419,48 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo if jobType == models.JobTypeTrain { task, err = models.GetCloudbrainByJobID(ctx.Params(":jobid")) } else { - task, err = models.GetCloudbrainByID(ctx.Params(":id")) + task, err = models.GetCloudbrainByIDWithDeleted(ctx.Params(":id")) } if err != nil { log.Info("error:" + err.Error()) ctx.Data["error"] = err.Error() + return } + result, err := cloudbrain.GetJob(task.JobID) if err != nil { log.Info("error:" + err.Error()) ctx.Data["error"] = err.Error() + return } + + if task.JobType == string(models.JobTypeTrain) { + if cloudbrain.TrainResourceSpecs == nil { + json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs) + } + for _, tmp := range cloudbrain.TrainResourceSpecs.ResourceSpec { + if tmp.Id == task.ResourceSpecId { + ctx.Data["GpuNum"] = tmp.GpuNum + ctx.Data["CpuNum"] = tmp.CpuNum + ctx.Data["MemMiB"] = tmp.MemMiB + ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB + } + } + } else { + if cloudbrain.ResourceSpecs == nil { + json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs) + } + for _, tmp := range cloudbrain.ResourceSpecs.ResourceSpec { + if tmp.Id == task.ResourceSpecId { + ctx.Data["GpuNum"] = tmp.GpuNum + ctx.Data["CpuNum"] = tmp.CpuNum + ctx.Data["MemMiB"] = tmp.MemMiB + ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB + } + } + } + if result != nil { jobRes, _ := models.ConvertToJobResultPayload(result.Payload) jobRes.Resource.Memory = strings.ReplaceAll(jobRes.Resource.Memory, "Mi", "MB") @@ -426,6 +475,15 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo ctx.Data["resource_type"] = resourceType.Value } } + } else { + if gpuInfos == nil { + json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) + } + for _, resourceType := range gpuInfos.GpuInfo { + if resourceType.Queue == jobRes.Config.GpuType { + ctx.Data["resource_type"] = resourceType.Value + } + } } taskRoles := jobRes.TaskRoles if jobRes.JobStatus.State != string(models.JobFailed) { @@ -436,9 +494,15 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo task.ContainerID = taskRes.TaskStatuses[0].ContainerID task.ContainerIp = taskRes.TaskStatuses[0].ContainerIP models.ParseAndSetDurationFromCloudBrainOne(jobRes, task) - err = models.UpdateJob(task) - if err != nil { - ctx.Data["error"] = err.Error() + + if task.DeletedAt.IsZero() { //normal record + err = models.UpdateJob(task) + if err != nil { + ctx.Data["error"] = err.Error() + return + } + } else { //deleted record + } } else { task.Status = jobRes.JobStatus.State @@ -455,7 +519,9 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo ctx.Data["result"] = jobRes } else { log.Info("error:" + err.Error()) + return } + user, err := models.GetUserByID(task.UserID) if err == nil { task.User = user @@ -510,6 +576,12 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo } } + attachment, err := models.GetAttachmentByUUID(task.Uuid) + if err == nil { + ctx.Data["datasetname"] = attachment.Name + } else { + ctx.Data["datasetname"] = "" + } ctx.Data["task"] = task ctx.Data["jobName"] = task.JobName @@ -518,7 +590,10 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo version_list_task = append(version_list_task, task) ctx.Data["version_list_task"] = version_list_task ctx.Data["debugListType"] = debugListType - ctx.Data["canDownload"] = cloudbrain.CanDeleteJob(ctx, task) + ctx.Data["code_path"] = cloudbrain.CodeMountPath + ctx.Data["dataset_path"] = cloudbrain.DataSetMountPath + ctx.Data["model_path"] = cloudbrain.ModelMountPath + ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) ctx.HTML(200, tpName) } @@ -1095,6 +1170,7 @@ func SyncCloudbrainStatus() { if task.EndTime == 0 && models.IsModelArtsDebugJobTerminal(task.Status) { task.EndTime = timeutil.TimeStampNow() } + task.CorrectCreateUnix() task.ComputeAndSetDuration() err = models.UpdateJob(task) if err != nil { @@ -1121,7 +1197,7 @@ func SyncCloudbrainStatus() { if task.EndTime == 0 && models.IsTrainJobTerminal(task.Status) && task.StartTime > 0 { task.EndTime = task.StartTime.Add(task.Duration) } - + task.CorrectCreateUnix() err = models.UpdateJob(task) if err != nil { log.Error("UpdateJob(%s) failed:%v", task.JobName, err) @@ -1243,6 +1319,7 @@ func handleNoDurationTask(cloudBrains []*models.Cloudbrain) { task.StartTime = timeutil.TimeStamp(startTime / 1000) task.EndTime = task.StartTime.Add(duration) } + task.CorrectCreateUnix() task.ComputeAndSetDuration() err = models.UpdateJob(task) if err != nil { diff --git a/routers/repo/editor.go b/routers/repo/editor.go index 2fa7976e0..8e13735df 100644 --- a/routers/repo/editor.go +++ b/routers/repo/editor.go @@ -5,6 +5,7 @@ package repo import ( + repo_service "code.gitea.io/gitea/services/repository" "encoding/json" "fmt" "io/ioutil" @@ -614,6 +615,19 @@ func UploadFilePost(ctx *context.Context, form auth.UploadRepoFileForm) { message += "\n\n" + form.CommitMessage } + if err := repo_service.CheckPushSizeLimit4Web(ctx.Repo.Repository, form.Files); err != nil { + if repo_service.IsRepoTooLargeErr(err) { + ctx.RenderWithErr(ctx.Tr("repo.editor.repo_too_large", setting.Repository.RepoMaxSize), tplUploadFile, &form) + } else if repo_service.IsUploadFileInvalidErr(err) { + ctx.RenderWithErr(ctx.Tr("repo.editor.repo_file_invalid"), tplUploadFile, &form) + } else if repo_service.IsUploadFileTooMuchErr(err) { + ctx.RenderWithErr(ctx.Tr("repo.editor.upload_file_too_much", setting.Repository.Upload.MaxFiles), tplUploadFile, &form) + } else { + ctx.RenderWithErr(err.Error(), tplUploadFile, &form) + } + return + } + if err := repofiles.UploadRepoFiles(ctx.Repo.Repository, ctx.User, &repofiles.UploadRepoFileOptions{ LastCommitID: ctx.Repo.CommitID, OldBranch: oldBranchName, diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index 32d9db9ce..8b785a395 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -2,10 +2,9 @@ package repo import ( "archive/zip" - "code.gitea.io/gitea/modules/notification" - "code.gitea.io/gitea/modules/timeutil" "encoding/json" "errors" + "fmt" "io" "io/ioutil" "net/http" @@ -16,6 +15,9 @@ import ( "time" "unicode/utf8" + "code.gitea.io/gitea/modules/notification" + "code.gitea.io/gitea/modules/timeutil" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" @@ -247,7 +249,7 @@ func NotebookShow(ctx *context.Context) { debugListType := ctx.Query("debugListType") var ID = ctx.Params(":id") - task, err := models.GetCloudbrainByID(ID) + task, err := models.GetCloudbrainByIDWithDeleted(ID) if err != nil { ctx.Data["error"] = err.Error() ctx.RenderWithErr(err.Error(), tplModelArtsNotebookShow, nil) @@ -262,33 +264,64 @@ func NotebookShow(ctx *context.Context) { } if result != nil { - task.Status = result.Status - err = models.UpdateJob(task) - if err != nil { - ctx.Data["error"] = err.Error() - ctx.RenderWithErr(err.Error(), tplModelArtsNotebookShow, nil) - return - } + if task.DeletedAt.IsZero() { //normal record + if task.Status != result.Status { + task.Status = result.Status + err = models.UpdateJob(task) + if err != nil { + ctx.Data["error"] = err.Error() + ctx.RenderWithErr(err.Error(), tplModelArtsNotebookShow, nil) + return + } + } + } else { //deleted record - result.CreateTime = time.Unix(int64(result.CreateAt/1000), 0).Format("2006-01-02 15:04:05") - result.LatestUpdateTime = time.Unix(int64(result.UpdateAt/1000), 0).Format("2006-01-02 15:04:05") + } } - datasetDownloadLink := "-" + datasetDownloadLink := "" if ctx.IsSigned { if task.Uuid != "" && task.UserID == ctx.User.ID { attachment, err := models.GetAttachmentByUUID(task.Uuid) if err == nil { + task.DatasetName = attachment.Name datasetDownloadLink = attachment.S3DownloadURL() } } } - + user, err := models.GetUserByID(task.UserID) + if err == nil { + task.User = user + } + if modelarts.FlavorInfos == nil { + json.Unmarshal([]byte(setting.FlavorInfos), &modelarts.FlavorInfos) + } + if modelarts.FlavorInfos != nil { + ctx.Data["resource_spec"] = modelarts.FlavorInfos.FlavorInfo[0].Desc + for _, f := range modelarts.FlavorInfos.FlavorInfo { + if fmt.Sprint(f.Value) == task.FlavorCode { + ctx.Data["resource_spec"] = f.Desc + break + } + } + } + if task.TrainJobDuration == "" { + if task.Duration == 0 { + var duration int64 + if task.Status == string(models.JobRunning) { + duration = time.Now().Unix() - int64(task.CreatedUnix) + } else { + duration = int64(task.UpdatedUnix) - int64(task.CreatedUnix) + } + task.Duration = duration + } + task.TrainJobDuration = models.ConvertDurationToStr(task.Duration) + } + ctx.Data["duration"] = task.TrainJobDuration ctx.Data["datasetDownloadLink"] = datasetDownloadLink ctx.Data["task"] = task ctx.Data["ID"] = ID ctx.Data["jobName"] = task.JobName - ctx.Data["result"] = result ctx.Data["debugListType"] = debugListType ctx.HTML(200, tplModelArtsNotebookShow) } @@ -406,6 +439,7 @@ func NotebookManage(ctx *context.Context) { param := models.NotebookAction{ Action: action, } + createTime := timeutil.TimeStampNow() res, err := modelarts.ManageNotebook2(task.JobID, param) if err != nil { log.Error("ManageNotebook2(%s) failed:%v", task.JobName, err.Error(), ctx.Data["MsgID"]) @@ -432,6 +466,8 @@ func NotebookManage(ctx *context.Context) { Image: task.Image, ComputeResource: task.ComputeResource, Description: task.Description, + CreatedUnix: createTime, + UpdatedUnix: createTime, } err = models.RestartCloudbrain(task, newTask) @@ -1553,7 +1589,7 @@ func TrainJobShow(ctx *context.Context) { ctx.Data["displayJobName"] = VersionListTasks[0].DisplayJobName ctx.Data["version_list_task"] = VersionListTasks ctx.Data["version_list_count"] = VersionListCount - ctx.Data["canDownload"] = cloudbrain.CanDeleteJob(ctx, &VersionListTasks[0].Cloudbrain) + ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, &VersionListTasks[0].Cloudbrain) ctx.HTML(http.StatusOK, tplModelArtsTrainJobShow) } @@ -2184,7 +2220,7 @@ func InferenceJobShow(ctx *context.Context) { ctx.Data["jobName"] = task.JobName ctx.Data["displayJobName"] = task.DisplayJobName ctx.Data["task"] = task - ctx.Data["canDownload"] = cloudbrain.CanDeleteJob(ctx, task) + ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) tempUids := []int64{} tempUids = append(tempUids, task.UserID) diff --git a/routers/repo/user_data_analysis.go b/routers/repo/user_data_analysis.go index b4adfc347..f8a036feb 100755 --- a/routers/repo/user_data_analysis.go +++ b/routers/repo/user_data_analysis.go @@ -41,20 +41,25 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac "A1": ctx.Tr("user.static.id"), "B1": ctx.Tr("user.static.name"), "C1": ctx.Tr("user.static.codemergecount"), - "D1": ctx.Tr("user.static.commitcount"), - "E1": ctx.Tr("user.static.issuecount"), - "F1": ctx.Tr("user.static.commentcount"), - "G1": ctx.Tr("user.static.focusrepocount"), - "H1": ctx.Tr("user.static.starrepocount"), - "I1": ctx.Tr("user.static.logincount"), - "J1": ctx.Tr("user.static.watchedcount"), - "K1": ctx.Tr("user.static.commitcodesize"), - "L1": ctx.Tr("user.static.solveissuecount"), - "M1": ctx.Tr("user.static.encyclopediascount"), - "N1": ctx.Tr("user.static.createrepocount"), - "O1": ctx.Tr("user.static.openiindex"), - "P1": ctx.Tr("user.static.registdate"), - "Q1": ctx.Tr("user.static.countdate"), + "D1": ctx.Tr("user.static.UserIndex"), + "E1": ctx.Tr("user.static.commitcount"), + "F1": ctx.Tr("user.static.issuecount"), + "G1": ctx.Tr("user.static.commentcount"), + "H1": ctx.Tr("user.static.focusrepocount"), + "I1": ctx.Tr("user.static.starrepocount"), + "J1": ctx.Tr("user.static.logincount"), + "K1": ctx.Tr("user.static.watchedcount"), + "L1": ctx.Tr("user.static.commitcodesize"), + "M1": ctx.Tr("user.static.solveissuecount"), + "N1": ctx.Tr("user.static.encyclopediascount"), + "O1": ctx.Tr("user.static.createrepocount"), + "P1": ctx.Tr("user.static.openiindex"), + "Q1": ctx.Tr("user.static.registdate"), + "R1": ctx.Tr("user.static.CloudBrainTaskNum"), + "S1": ctx.Tr("user.static.CloudBrainRunTime"), + "T1": ctx.Tr("user.static.CommitDatasetNum"), + "U1": ctx.Tr("user.static.CommitModelCount"), + "V1": ctx.Tr("user.static.countdate"), } for k, v := range dataHeader { //设置单元格的值 @@ -73,24 +78,29 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID) xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name) xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount) - xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount) - xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount) - xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount) - xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount) - xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount) - xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount) - xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount) - xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize) - xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount) - xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount) - xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount) - xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) + xlsx.SetCellValue(sheetName, "D"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) + xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount) + xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount) + xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount) + xlsx.SetCellValue(sheetName, "H"+rows, userRecord.FocusRepoCount) + xlsx.SetCellValue(sheetName, "I"+rows, userRecord.StarRepoCount) + xlsx.SetCellValue(sheetName, "J"+rows, userRecord.LoginCount) + xlsx.SetCellValue(sheetName, "K"+rows, userRecord.WatchedCount) + xlsx.SetCellValue(sheetName, "L"+rows, userRecord.CommitCodeSize) + xlsx.SetCellValue(sheetName, "M"+rows, userRecord.SolveIssueCount) + xlsx.SetCellValue(sheetName, "N"+rows, userRecord.EncyclopediasCount) + xlsx.SetCellValue(sheetName, "O"+rows, userRecord.CreateRepoCount) + xlsx.SetCellValue(sheetName, "P"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") - xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) + xlsx.SetCellValue(sheetName, "Q"+rows, formatTime[0:len(formatTime)-3]) + xlsx.SetCellValue(sheetName, "R"+rows, userRecord.CloudBrainTaskNum) + xlsx.SetCellValue(sheetName, "S"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) + xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitDatasetNum) + xlsx.SetCellValue(sheetName, "U"+rows, userRecord.CommitModelCount) formatTime = userRecord.DataDate - xlsx.SetCellValue(sheetName, "Q"+rows, formatTime) + xlsx.SetCellValue(sheetName, "V"+rows, formatTime) } indexTotal += PAGE_SIZE @@ -115,6 +125,30 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac } } +func QueryMetrics(ctx *context.Context) { + startDate := ctx.Query("startDate") + endDate := ctx.Query("endDate") + startTime, _ := time.ParseInLocation("2006-01-02", startDate, time.Local) + endTime, _ := time.ParseInLocation("2006-01-02", endDate, time.Local) + result, count := models.QueryMetrics(startTime.Unix(), endTime.Unix()) + mapInterface := make(map[string]interface{}) + mapInterface["data"] = result + mapInterface["count"] = count + ctx.JSON(http.StatusOK, mapInterface) +} + +func QueryRankingList(ctx *context.Context) { + key := ctx.Query("key") + tableName := ctx.Query("tableName") + limit := ctx.QueryInt("limit") + + result, count := models.QueryRankList(key, tableName, limit) + mapInterface := make(map[string]interface{}) + mapInterface["data"] = result + mapInterface["count"] = count + ctx.JSON(http.StatusOK, mapInterface) +} + func QueryUserStaticCurrentMonth(ctx *context.Context) { queryUserDataPage(ctx, "public.user_business_analysis_current_month", new(models.UserBusinessAnalysisCurrentMonth)) } @@ -208,20 +242,25 @@ func QueryUserStaticDataPage(ctx *context.Context) { "A1": ctx.Tr("user.static.id"), "B1": ctx.Tr("user.static.name"), "C1": ctx.Tr("user.static.codemergecount"), - "D1": ctx.Tr("user.static.commitcount"), - "E1": ctx.Tr("user.static.issuecount"), - "F1": ctx.Tr("user.static.commentcount"), - "G1": ctx.Tr("user.static.focusrepocount"), - "H1": ctx.Tr("user.static.starrepocount"), - "I1": ctx.Tr("user.static.logincount"), - "J1": ctx.Tr("user.static.watchedcount"), - "K1": ctx.Tr("user.static.commitcodesize"), - "L1": ctx.Tr("user.static.solveissuecount"), - "M1": ctx.Tr("user.static.encyclopediascount"), - "N1": ctx.Tr("user.static.createrepocount"), - "O1": ctx.Tr("user.static.openiindex"), - "P1": ctx.Tr("user.static.registdate"), - "Q1": ctx.Tr("user.static.countdate"), + "D1": ctx.Tr("user.static.UserIndex"), + "E1": ctx.Tr("user.static.commitcount"), + "F1": ctx.Tr("user.static.issuecount"), + "G1": ctx.Tr("user.static.commentcount"), + "H1": ctx.Tr("user.static.focusrepocount"), + "I1": ctx.Tr("user.static.starrepocount"), + "J1": ctx.Tr("user.static.logincount"), + "K1": ctx.Tr("user.static.watchedcount"), + "L1": ctx.Tr("user.static.commitcodesize"), + "M1": ctx.Tr("user.static.solveissuecount"), + "N1": ctx.Tr("user.static.encyclopediascount"), + "O1": ctx.Tr("user.static.createrepocount"), + "P1": ctx.Tr("user.static.openiindex"), + "Q1": ctx.Tr("user.static.registdate"), + "R1": ctx.Tr("user.static.CloudBrainTaskNum"), + "S1": ctx.Tr("user.static.CloudBrainRunTime"), + "T1": ctx.Tr("user.static.CommitDatasetNum"), + "U1": ctx.Tr("user.static.CommitModelCount"), + "V1": ctx.Tr("user.static.countdate"), } for k, v := range dataHeader { //设置单元格的值 @@ -234,24 +273,28 @@ func QueryUserStaticDataPage(ctx *context.Context) { xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID) xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name) xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount) - xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount) - xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount) - xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount) - xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount) - xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount) - xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount) - xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount) - xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize) - xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount) - xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount) - xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount) - xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) + xlsx.SetCellValue(sheetName, "D"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex)) + xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount) + xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount) + xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount) + xlsx.SetCellValue(sheetName, "H"+rows, userRecord.FocusRepoCount) + xlsx.SetCellValue(sheetName, "I"+rows, userRecord.StarRepoCount) + xlsx.SetCellValue(sheetName, "J"+rows, userRecord.LoginCount) + xlsx.SetCellValue(sheetName, "K"+rows, userRecord.WatchedCount) + xlsx.SetCellValue(sheetName, "L"+rows, userRecord.CommitCodeSize) + xlsx.SetCellValue(sheetName, "M"+rows, userRecord.SolveIssueCount) + xlsx.SetCellValue(sheetName, "N"+rows, userRecord.EncyclopediasCount) + xlsx.SetCellValue(sheetName, "O"+rows, userRecord.CreateRepoCount) + xlsx.SetCellValue(sheetName, "P"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") - xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) - + xlsx.SetCellValue(sheetName, "Q"+rows, formatTime[0:len(formatTime)-3]) + xlsx.SetCellValue(sheetName, "R"+rows, userRecord.CloudBrainTaskNum) + xlsx.SetCellValue(sheetName, "S"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600)) + xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitDatasetNum) + xlsx.SetCellValue(sheetName, "U"+rows, userRecord.CommitModelCount) formatTime = userRecord.DataDate - xlsx.SetCellValue(sheetName, "Q"+rows, formatTime+" 00:01") + xlsx.SetCellValue(sheetName, "V"+rows, formatTime) } //设置默认打开的表单 diff --git a/routers/repo/view.go b/routers/repo/view.go index 320102ba4..b28e21aa1 100755 --- a/routers/repo/view.go +++ b/routers/repo/view.go @@ -247,7 +247,11 @@ func renderDirectory(ctx *context.Context, treeLink string) { ctx.Data["ReadmeInList"] = true ctx.Data["ReadmeExist"] = true ctx.Data["FileIsSymlink"] = readmeFile.isSymlink - ctx.Data["ReadmeName"] = readmeFile.name + if ctx.Repo.TreePath == "" { + ctx.Data["ReadmeRelativePath"] = readmeFile.name + } else { + ctx.Data["ReadmeRelativePath"] = ctx.Repo.TreePath + "/" + readmeFile.name + } if ctx.Repo.CanEnableEditor() { ctx.Data["CanEditFile"] = true @@ -579,11 +583,11 @@ func safeURL(address string) string { } type ContributorInfo struct { - UserInfo *models.User // nil for contributor who is not a registered user - RelAvatarLink string `json:"rel_avatar_link"` - UserName string `json:"user_name"` - Email string `json:"email"` - CommitCnt int `json:"commit_cnt"` + UserInfo *models.User // nil for contributor who is not a registered user + RelAvatarLink string `json:"rel_avatar_link"` + UserName string `json:"user_name"` + Email string `json:"email"` + CommitCnt int `json:"commit_cnt"` } type GetContributorsInfo struct { @@ -642,7 +646,7 @@ func Home(ctx *context.Context) { existedContributorInfo.CommitCnt += c.CommitCnt } else { var newContributor = &ContributorInfo{ - user, "", "",c.Email, c.CommitCnt, + user, "", "", c.Email, c.CommitCnt, } count++ contributorInfos = append(contributorInfos, newContributor) @@ -839,7 +843,7 @@ func renderCode(ctx *context.Context) { compareInfo, err = baseGitRepo.GetCompareInfo(ctx.Repo.Repository.RepoPath(), ctx.Repo.BranchName, ctx.Repo.Repository.BaseRepo.DefaultBranch) ctx.Data["UpstreamSameBranchName"] = false } - if err==nil && compareInfo != nil { + if err == nil && compareInfo != nil { if compareInfo.Commits != nil { log.Info("compareInfoCommits数量:%d", compareInfo.Commits.Len()) ctx.Data["FetchUpstreamCnt"] = compareInfo.Commits.Len() @@ -950,7 +954,7 @@ func ContributorsAPI(ctx *context.Context) { } else { // new committer info var newContributor = &ContributorInfo{ - user, user.RelAvatarLink(),user.Name, user.Email,c.CommitCnt, + user, user.RelAvatarLink(), user.Name, user.Email, c.CommitCnt, } count++ contributorInfos = append(contributorInfos, newContributor) @@ -963,7 +967,7 @@ func ContributorsAPI(ctx *context.Context) { existedContributorInfo.CommitCnt += c.CommitCnt } else { var newContributor = &ContributorInfo{ - user, "", "",c.Email,c.CommitCnt, + user, "", "", c.Email, c.CommitCnt, } count++ contributorInfos = append(contributorInfos, newContributor) diff --git a/routers/search.go b/routers/search.go index bc1bc5fac..1cf78666e 100644 --- a/routers/search.go +++ b/routers/search.go @@ -68,23 +68,23 @@ func SearchApi(ctx *context.Context) { if OnlySearchLabel { searchRepoByLabel(ctx, Key, Page, PageSize) } else { - searchRepo(ctx, "repository-es-index", Key, Page, PageSize, OnlyReturnNum) + searchRepo(ctx, "repository-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum) } return } else if TableName == "issue" { - searchIssueOrPr(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum, "f") + searchIssueOrPr(ctx, "issue-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum, "f") return } else if TableName == "user" { - searchUserOrOrg(ctx, "user-es-index", Key, Page, PageSize, true, OnlyReturnNum) + searchUserOrOrg(ctx, "user-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, true, OnlyReturnNum) return } else if TableName == "org" { - searchUserOrOrg(ctx, "user-es-index", Key, Page, PageSize, false, OnlyReturnNum) + searchUserOrOrg(ctx, "user-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, false, OnlyReturnNum) return } else if TableName == "dataset" { - searchDataSet(ctx, "dataset-es-index", Key, Page, PageSize, OnlyReturnNum) + searchDataSet(ctx, "dataset-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum) return } else if TableName == "pr" { - searchIssueOrPr(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum, "t") + searchIssueOrPr(ctx, "issue-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum, "t") //searchPR(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum) return } @@ -573,7 +573,8 @@ func trimFontHtml(text []rune) string { startRune := rune('<') endRune := rune('>') count := 0 - for i := 0; i < len(text); i++ { + i := 0 + for ; i < len(text); i++ { if text[i] == startRune { //start < re := false j := i + 1 @@ -592,11 +593,14 @@ func trimFontHtml(text []rune) string { } else { return string(text[0:i]) } - } } } - return string(text) + if count%2 == 1 { + return string(text[0:i]) + "" + } else { + return string(text[0:i]) + } } func trimHrefHtml(result string) string { @@ -1125,7 +1129,7 @@ func makePrivateIssueOrPr(issues []*models.Issue, res *SearchRes, Key string, la record["num_comments"] = issue.NumComments record["is_closed"] = issue.IsClosed record["updated_unix"] = issue.UpdatedUnix - record["updated_html"] = timeutil.TimeSinceUnix(repo.UpdatedUnix, language) + record["updated_html"] = timeutil.TimeSinceUnix(issue.UpdatedUnix, language) res.Result = append(res.Result, record) } } diff --git a/services/repository/repository.go b/services/repository/repository.go index cea16516a..d0cd52653 100644 --- a/services/repository/repository.go +++ b/services/repository/repository.go @@ -8,6 +8,7 @@ import ( "fmt" "io/ioutil" "net/http" + "os" "strings" "code.gitea.io/gitea/models" @@ -172,3 +173,137 @@ func RecommendFromPromote(url string) ([]string, error) { } return result, nil } + +func CheckPushSizeLimit4Web(repo *models.Repository, fileIds []string) error { + if err := CheckRepoNumOnceLimit(len(fileIds)); err != nil { + return err + } + totalSize, err := CountUploadFileSizeByIds(fileIds) + if err != nil { + return UploadFileInvalidErr{} + } + if err := CheckRepoTotalSizeLimit(repo, totalSize); err != nil { + return err + } + return nil +} + +func CheckPushSizeLimit4Http(repo *models.Repository, uploadFileSize int64) error { + if err := CheckRepoOnceTotalSizeLimit(uploadFileSize); err != nil { + return err + } + if err := CheckRepoTotalSizeLimit(repo, uploadFileSize); err != nil { + return err + } + return nil +} + +func CheckRepoTotalSizeLimit(repo *models.Repository, uploadFileSize int64) error { + if repo.Size+uploadFileSize > setting.Repository.RepoMaxSize*1024*1024 { + return RepoTooLargeErr{} + } + return nil +} + +func CheckRepoOnceTotalSizeLimit(uploadFileSize int64) error { + if uploadFileSize > setting.Repository.Upload.TotalMaxSize*1024*1024 { + return UploadFileTooLargeErr{} + } + return nil +} + +func CheckRepoNumOnceLimit(uploadFileNum int) error { + if uploadFileNum > setting.Repository.Upload.MaxFiles { + return UploadFileTooMuchErr{} + } + return nil +} + +func CountUploadFileSizeByIds(fileIds []string) (int64, error) { + if len(fileIds) == 0 { + return 0, nil + } + uploads, err := models.GetUploadsByUUIDs(fileIds) + if err != nil { + return 0, fmt.Errorf("CountUploadFileSizeByIds error [uuids: %v]: %v", fileIds, err) + } + var totalSize int64 + for _, upload := range uploads { + size, err := GetUploadFileSize(upload) + if err != nil { + return 0, err + } + totalSize += size + } + return totalSize, nil +} + +func GetUploadFileSize(upload *models.Upload) (int64, error) { + info, err := os.Lstat(upload.LocalPath()) + + if err != nil { + return 0, err + } + return info.Size(), nil + +} + +type RepoTooLargeErr struct { +} + +func (RepoTooLargeErr) Error() string { + return fmt.Sprintf("Repository can not exceed %d MB. Please remove some unnecessary files and try again", setting.Repository.RepoMaxSize) +} + +func IsRepoTooLargeErr(err error) bool { + _, ok := err.(RepoTooLargeErr) + return ok +} + +type UploadFileTooLargeErr struct { +} + +func (UploadFileTooLargeErr) Error() string { + return fmt.Sprintf("Upload files can not exceed %d MB at a time", setting.Repository.Upload.TotalMaxSize) +} + +func IsUploadFileTooLargeErr(err error) bool { + _, ok := err.(UploadFileTooLargeErr) + return ok +} + +type RepoFileTooLargeErr struct { +} + +func (RepoFileTooLargeErr) Error() string { + return "repository file is too large" +} + +func IsRepoFileTooLargeErr(err error) bool { + _, ok := err.(RepoFileTooLargeErr) + return ok +} + +type UploadFileTooMuchErr struct { +} + +func (UploadFileTooMuchErr) Error() string { + return "upload files are too lmuch" +} + +func IsUploadFileTooMuchErr(err error) bool { + _, ok := err.(UploadFileTooMuchErr) + return ok +} + +type UploadFileInvalidErr struct { +} + +func (UploadFileInvalidErr) Error() string { + return "upload files are invalid" +} + +func IsUploadFileInvalidErr(err error) bool { + _, ok := err.(UploadFileInvalidErr) + return ok +} diff --git a/templates/admin/cloudbrain/list.tmpl b/templates/admin/cloudbrain/list.tmpl old mode 100644 new mode 100755 index 39b2c21de..6fea2eef7 --- a/templates/admin/cloudbrain/list.tmpl +++ b/templates/admin/cloudbrain/list.tmpl @@ -81,7 +81,7 @@ {{.DisplayJobName}} {{else if eq .JobType "TRAIN"}} - + {{.DisplayJobName}} {{else if eq .JobType "BENCHMARK"}} @@ -155,13 +155,13 @@ {{else}} - + {{$.i18n.Tr "repo.stop"}} {{end}} -
+ {{$.CsrfTokenHtml}} {{$.i18n.Tr "repo.delete"}} diff --git a/templates/base/head_notice.tmpl b/templates/base/head_notice.tmpl index 88615cc38..43c581e6e 100644 --- a/templates/base/head_notice.tmpl +++ b/templates/base/head_notice.tmpl @@ -1,7 +1,7 @@ {{if not .IsCourse}} {{ if .notices}}
-
+
{{ $firstTag := true }} {{range .notices.Notices}} @@ -25,7 +25,7 @@
- + {{end}} diff --git a/templates/custom/select_dataset.tmpl b/templates/custom/select_dataset.tmpl index dc5ca6c9e..273477dd2 100644 --- a/templates/custom/select_dataset.tmpl +++ b/templates/custom/select_dataset.tmpl @@ -135,4 +135,4 @@ - \ No newline at end of file + diff --git a/templates/custom/select_dataset_train.tmpl b/templates/custom/select_dataset_train.tmpl new file mode 100644 index 000000000..2771200b6 --- /dev/null +++ b/templates/custom/select_dataset_train.tmpl @@ -0,0 +1,134 @@ + + +
+     + + + {{.i18n.Tr "dataset.select_dataset"}} + +
+ + +
+ + + +
+
+
${dataset.Repo.OwnerName}/${dataset.Repo.Alias} ${dataset.Name}
+
+ + + + ${dataset.Description} +
+
+
+ + + + 解压中 + + + + 解压失败 + +
+
+ + +
+ +
+
+
${dataset.Repo.OwnerName}/${dataset.Repo.Alias}${dataset.Name}
+
+ + + + ${dataset.Description} +
+
+
+ + + + 解压中 + + + + 解压失败 + +
+
+ +
+ +
+
+
${dataset.Repo.OwnerName}/${dataset.Repo.Alias}${dataset.Name}
+
+ + + + ${dataset.Description} +
+
+
+ + + + 解压中 + + + + 解压失败 + +
+
+ +
+ +
+
+
${dataset.Repo.OwnerName}/${dataset.Repo.Alias}${dataset.Name}
+
+ + + + ${dataset.Description} +
+
+
+ + + + 解压中 + + + + 解压失败 + +
+
+ +
+
+
+ + +
+
+ + +
diff --git a/templates/explore/datasets.tmpl b/templates/explore/datasets.tmpl index ff18a3da3..a1692cf35 100644 --- a/templates/explore/datasets.tmpl +++ b/templates/explore/datasets.tmpl @@ -121,10 +121,10 @@ @@ -184,9 +184,15 @@
+ {{if eq .UserID 0}} + {{else}} + + + + {{end}} 创建于:{{TimeSinceUnix1 .CreatedUnix}}
@@ -215,4 +221,4 @@ -{{template "base/footer" .}} +{{template "base/footer" .}} \ No newline at end of file diff --git a/templates/explore/organizations.tmpl b/templates/explore/organizations.tmpl index 1151c5a94..5faf039af 100644 --- a/templates/explore/organizations.tmpl +++ b/templates/explore/organizations.tmpl @@ -1,68 +1,279 @@ + + + + + + {{template "base/head" .}}
- {{template "explore/search" .}} - -
-
- {{template "explore/navbar" .}} -
-

- {{.i18n.Tr "explore.organizations"}} -

-