diff --git a/custom/public/css/git.openi.css b/custom/public/css/git.openi.css index c6ada7b28..a4920eec0 100644 --- a/custom/public/css/git.openi.css +++ b/custom/public/css/git.openi.css @@ -44,12 +44,6 @@ -webkit-line-clamp: 2; -webkit-box-orient: vertical; } -.ui.label{ - font-weight: normal; -} -.active { - color: #0366D6 !important; -} .opacity5{ opacity:0.5;} .radius15{ border-radius:1.5rem !important; } @@ -287,70 +281,6 @@ position: relative; } -/**seach**/ -/**搜索导航条适配窄屏**/ -.seachnav{ - overflow-x: auto; - overflow-y: hidden; - scrollbar-width: none; /* firefox */ - -ms-overflow-style: none; /* IE 10+ */ -} -.seachnav::-webkit-scrollbar { - display: none; /* Chrome Safari */ -} -.ui.green.button, .ui.green.buttons .button{ - background-color: #5BB973; -} -.seach .repos--seach{ - padding-bottom: 0; - border-bottom: none; -} -.seach .ui.secondary.pointing.menu{ - border-bottom: none; -} -.seach .ui.secondary.pointing.menu .item > i{ - margin-right: 5px; -} -.seach .ui.secondary.pointing.menu .active.item{ - border-bottom-width: 2px; - margin: 0 0 -1px; -} -.seach .ui.menu .active.item>.label { - background: #1684FC; - color: #FFF; -} -.seach .ui.menu .item>.label:not(.active.item>.label) { - background: #e8e8e8; - color: rgba(0,0,0,.6); -} - -.highlight{ - color: red; -} -.ui.list .list>.item>img.image+.content, .ui.list>.item>img.image+.content { - width: calc(100% - 3.0em); - margin-left: 0; -} - -.seach .ui.list .list>.item .header, .seach .ui.list>.item .header{ - margin-bottom: 0.5em; - font-size: 1.4rem !important; - font-weight: normal; -} -.seach .time, .seach .time a{ - font-size: 12px; - color: grey; -} - -.seach .list .item.members .ui.avatar.image { - width: 3.2em; - height: 3.2em; -} -.ui.list .list>.item.members>img.image+.content, .ui.list>.item.members>img.image+.content { - width: calc(100% - 4.0em); - margin-left: 0; -} - @media only screen and (max-width: 767px) { .am-mt-30{ margin-top: 1.5rem !important;} .ui.secondary.hometop.segment{ diff --git a/models/action.go b/models/action.go index 2a9d88399..9b92b4192 100755 --- a/models/action.go +++ b/models/action.go @@ -57,6 +57,7 @@ const ( ActionCreateInferenceTask // 28 ActionCreateBenchMarkTask //29 ActionCreateNewModelTask //30 + ActionCreateGPUTrainTask //31 ) // Action represents user operation type and other information to diff --git a/models/attachment.go b/models/attachment.go index a3fc6fa01..7c95a73dd 100755 --- a/models/attachment.go +++ b/models/attachment.go @@ -51,6 +51,7 @@ type Attachment struct { FileChunk *FileChunk `xorm:"-"` CanDel bool `xorm:"-"` Uploader *User `xorm:"-"` + Md5 string `xorm:"-"` } type AttachmentUsername struct { diff --git a/models/cloudbrain.go b/models/cloudbrain.go index ea6d0338e..4a82a2031 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -20,9 +20,17 @@ type CloudbrainStatus string type JobType string type ModelArtsJobStatus string +const ( + TypeCloudBrainOne int = iota + TypeCloudBrainTwo + + TypeCloudBrainAll = -1 +) + const ( NPUResource = "NPU" GPUResource = "CPU/GPU" + AllResource = "all" //notebook storage category EVSCategory = "EVS" @@ -87,6 +95,8 @@ const ( ModelArtsTrainJobCheckRunning ModelArtsJobStatus = "CHECK_RUNNING" //审核作业正在运行中 ModelArtsTrainJobCheckRunningCompleted ModelArtsJobStatus = "CHECK_RUNNING_COMPLETED" //审核作业已经完成 ModelArtsTrainJobCheckFailed ModelArtsJobStatus = "CHECK_FAILED" //审核作业失败 + + DURATION_STR_ZERO = "00:00:00" ) type Cloudbrain struct { @@ -160,7 +170,9 @@ func (task *Cloudbrain) ComputeAndSetDuration() { if task.StartTime == 0 { d = 0 } else if task.EndTime == 0 { - d = time.Now().Unix() - task.StartTime.AsTime().Unix() + if !task.IsTerminal() { + d = time.Now().Unix() - task.StartTime.AsTime().Unix() + } } else { d = task.EndTime.AsTime().Unix() - task.StartTime.AsTime().Unix() } @@ -172,9 +184,14 @@ func (task *Cloudbrain) ComputeAndSetDuration() { task.TrainJobDuration = ConvertDurationToStr(d) } +func (task *Cloudbrain) IsTerminal() bool { + status := task.Status + return status == string(ModelArtsTrainJobCompleted) || status == string(ModelArtsTrainJobFailed) || status == string(ModelArtsTrainJobKilled) || status == string(ModelArtsStopped) || status == string(JobStopped) || status == string(JobFailed) || status == string(JobSucceeded) +} + func ConvertDurationToStr(duration int64) string { if duration == 0 { - return "00:00:00" + return DURATION_STR_ZERO } return util.AddZero(duration/3600) + ":" + util.AddZero(duration%3600/60) + ":" + util.AddZero(duration%60) } @@ -191,6 +208,19 @@ func IsCloudBrainOneDebugJobTerminal(status string) bool { return status == string(JobStopped) || status == string(JobFailed) || status == string(JobSucceeded) } +func ParseAndSetDurationFromCloudBrainOne(result JobResultPayload, task *Cloudbrain) { + isActivated := result.JobStatus.CreatedTime > 0 + if task.StartTime == 0 && isActivated { + task.StartTime = timeutil.TimeStamp(result.JobStatus.CreatedTime / 1000) + } + if task.EndTime == 0 && IsCloudBrainOneDebugJobTerminal(task.Status) && isActivated { + if result.JobStatus.CompletedTime > 0 { + task.EndTime = timeutil.TimeStamp(result.JobStatus.CompletedTime / 1000) + } + } + task.ComputeAndSetDuration() +} + type CloudbrainInfo struct { Cloudbrain `xorm:"extends"` User `xorm:"extends"` @@ -1323,6 +1353,7 @@ func CloudbrainsVersionList(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int, e } func CreateCloudbrain(cloudbrain *Cloudbrain) (err error) { + cloudbrain.TrainJobDuration = DURATION_STR_ZERO if _, err = x.Insert(cloudbrain); err != nil { return err } @@ -1339,6 +1370,16 @@ func getRepoCloudBrain(cb *Cloudbrain) (*Cloudbrain, error) { return cb, nil } +func getRepoCloudBrainWithDeleted(cb *Cloudbrain) (*Cloudbrain, error) { + has, err := x.Unscoped().Get(cb) + if err != nil { + return nil, err + } else if !has { + return nil, ErrJobNotExist{} + } + return cb, nil +} + func GetRepoCloudBrainByJobID(repoID int64, jobID string) (*Cloudbrain, error) { cb := &Cloudbrain{JobID: jobID, RepoID: repoID} return getRepoCloudBrain(cb) @@ -1355,6 +1396,12 @@ func GetCloudbrainByID(id string) (*Cloudbrain, error) { return getRepoCloudBrain(cb) } +func GetCloudbrainByIDWithDeleted(id string) (*Cloudbrain, error) { + idInt64, _ := strconv.ParseInt(id, 10, 64) + cb := &Cloudbrain{ID: idInt64} + return getRepoCloudBrainWithDeleted(cb) +} + func GetCloudbrainByJobIDAndVersionName(jobID string, versionName string) (*Cloudbrain, error) { cb := &Cloudbrain{JobID: jobID, VersionName: versionName} return getRepoCloudBrain(cb) @@ -1467,6 +1514,15 @@ func GetCloudBrainUnStoppedJob() ([]*Cloudbrain, error) { Find(&cloudbrains) } +func GetStoppedJobWithNoDurationJob() ([]*Cloudbrain, error) { + cloudbrains := make([]*Cloudbrain, 0) + return cloudbrains, x. + In("status", ModelArtsTrainJobCompleted, ModelArtsTrainJobFailed, ModelArtsTrainJobKilled, ModelArtsStopped, JobStopped, JobFailed, JobSucceeded). + Where("train_job_duration is null or train_job_duration = '' "). + Limit(100). + Find(&cloudbrains) +} + func GetCloudbrainCountByUserID(userID int64, jobType string) (int, error) { count, err := x.In("status", JobWaiting, JobRunning).And("job_type = ? and user_id = ? and type = ?", jobType, userID, TypeCloudBrainOne).Count(new(Cloudbrain)) return int(count), err diff --git a/models/dataset.go b/models/dataset.go index af47c53fe..95800100c 100755 --- a/models/dataset.go +++ b/models/dataset.go @@ -1,6 +1,7 @@ package models import ( + "code.gitea.io/gitea/modules/log" "errors" "fmt" "sort" @@ -62,19 +63,20 @@ func (datasets DatasetList) loadAttributes(e Engine) error { } set := make(map[int64]struct{}) + userIdSet := make(map[int64]struct{}) datasetIDs := make([]int64, len(datasets)) for i := range datasets { - set[datasets[i].UserID] = struct{}{} + userIdSet[datasets[i].UserID] = struct{}{} set[datasets[i].RepoID] = struct{}{} datasetIDs[i] = datasets[i].ID } // Load owners. - users := make(map[int64]*User, len(set)) + users := make(map[int64]*User, len(userIdSet)) repos := make(map[int64]*Repository, len(set)) if err := e. Where("id > 0"). - In("id", keysInt64(set)). + In("id", keysInt64(userIdSet)). Find(&users); err != nil { return fmt.Errorf("find users: %v", err) } @@ -139,20 +141,7 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { var cond = builder.NewCond() cond = cond.And(builder.Neq{"dataset.status": DatasetStatusDeleted}) - if len(opts.Keyword) > 0 { - cond = cond.And(builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) - } - - if len(opts.Category) > 0 { - cond = cond.And(builder.Eq{"dataset.category": opts.Category}) - } - - if len(opts.Task) > 0 { - cond = cond.And(builder.Eq{"dataset.task": opts.Task}) - } - if len(opts.License) > 0 { - cond = cond.And(builder.Eq{"dataset.license": opts.License}) - } + cond = generateFilterCond(opts, cond) if opts.RepoID > 0 { cond = cond.And(builder.Eq{"dataset.repo_id": opts.RepoID}) @@ -162,14 +151,12 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic}) cond = cond.And(builder.Eq{"attachment.is_private": false}) if opts.OwnerID > 0 { - if len(opts.Keyword) == 0 { - cond = cond.Or(builder.Eq{"repository.owner_id": opts.OwnerID}) - } else { - subCon := builder.NewCond() - subCon = subCon.And(builder.Eq{"repository.owner_id": opts.OwnerID}, builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) - cond = cond.Or(subCon) - - } + + subCon := builder.NewCond() + subCon = subCon.And(builder.Eq{"repository.owner_id": opts.OwnerID}) + subCon = generateFilterCond(opts, subCon) + cond = cond.Or(subCon) + } } else if opts.OwnerID > 0 { cond = cond.And(builder.Eq{"repository.owner_id": opts.OwnerID}) @@ -182,6 +169,25 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { return cond } +func generateFilterCond(opts *SearchDatasetOptions, cond builder.Cond) builder.Cond { + if len(opts.Keyword) > 0 { + cond = cond.And(builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword})) + } + + if len(opts.Category) > 0 { + cond = cond.And(builder.Eq{"dataset.category": opts.Category}) + } + + if len(opts.Task) > 0 { + cond = cond.And(builder.Eq{"dataset.task": opts.Task}) + } + if len(opts.License) > 0 { + cond = cond.And(builder.Eq{"dataset.license": opts.License}) + } + + return cond +} + func SearchDatasetByCondition(opts *SearchDatasetOptions, cond builder.Cond) (DatasetList, int64, error) { if opts.Page <= 0 { opts.Page = 1 @@ -292,7 +298,13 @@ func getDatasetAttachments(e Engine, typeCloudBrain int, isSigned bool, user *Us if err != nil { return err } - attachment.FileChunk = fileChunks[0] + if len(fileChunks) > 0 { + attachment.Md5 = fileChunks[0].Md5 + } else { + log.Error("has attachment record, but has no file_chunk record") + attachment.Md5 = "no_record" + } + attachment.CanDel = CanDelAttachment(isSigned, user, attachment) sortedRels.Rel[currentIndex].Attachments = append(sortedRels.Rel[currentIndex].Attachments, attachment) } @@ -348,7 +360,7 @@ func GetDatasetByRepo(repo *Repository) (*Dataset, error) { if has { return dataset, nil } else { - return nil, errors.New("Not Found") + return nil, ErrNotExist{repo.ID} } } diff --git a/models/dbsql/dataset_foreigntable_for_es.sql b/models/dbsql/dataset_foreigntable_for_es.sql new file mode 100644 index 000000000..02e5f0ddf --- /dev/null +++ b/models/dbsql/dataset_foreigntable_for_es.sql @@ -0,0 +1,199 @@ +DELETE FROM public.dataset_es; +DROP FOREIGN TABLE public.dataset_es; +DROP TRIGGER IF EXISTS es_insert_dataset on public.dataset; +DROP FUNCTION public.insert_dataset_data(); +DROP TRIGGER IF EXISTS es_udpate_dataset_file_name on public.attachment; +DROP FUNCTION public.udpate_dataset_file_name; + +DROP TRIGGER IF EXISTS es_update_dataset on public.dataset; +DROP FUNCTION public.update_dataset; + +DROP TRIGGER IF EXISTS es_delete_dataset on public.dataset; +DROP FUNCTION public.delete_dataset; + + +CREATE FOREIGN TABLE public.dataset_es +( + id bigint NOT NULL, + title character varying(255), + status integer, + category character varying(255), + description text, + download_times bigint, + license character varying(255), + task character varying(255), + release_id bigint, + user_id bigint, + repo_id bigint, + created_unix bigint, + updated_unix bigint, + file_name text, + file_desc text +)SERVER multicorn_es +OPTIONS + ( + host '192.168.207.94', + port '9200', + index 'dataset-es-index', + rowid_column 'id', + default_sort '_id' + ) +; +DELETE FROM public.dataset_es; + INSERT INTO public.dataset_es( + id, + title, + status, + category, + description, + download_times, + license, task, + release_id, + user_id, + repo_id, + created_unix, + updated_unix, + file_name, + file_desc + ) + SELECT + b.id, + b.title, + b.status, + b.category, + b.description, + b.download_times, + b.license, + b.task, + b.release_id, + b.user_id, + b.repo_id, + b.created_unix, + b.updated_unix, + (select array_to_string(array_agg(name order by created_unix desc),'-#,#-') from public.attachment a where a.dataset_id=b.id and a.is_private=false), + (select array_to_string(array_agg(description order by created_unix desc),'-#,#-') from public.attachment a where a.dataset_id=b.id and a.is_private=false) + FROM public.dataset b,public.repository c where b.repo_id=c.id and c.is_private=false; + + +DROP TRIGGER IF EXISTS es_insert_dataset on public.dataset; + +CREATE OR REPLACE FUNCTION public.insert_dataset_data() RETURNS trigger AS +$def$ + DECLARE + privateValue boolean=false; + BEGIN + select into privateValue is_private from public.repository where id=NEW.repo_id; + if not privateValue then + INSERT INTO public.dataset_es( + id, + title, + status, + category, + description, + download_times, + license, + task, + release_id, + user_id, + repo_id, + created_unix, + updated_unix) + VALUES ( + NEW.id, + NEW.title, + NEW.status, + NEW.category, + NEW.description, + NEW.download_times, + NEW.license, + NEW.task, + NEW.release_id, + NEW.user_id, + NEW.repo_id, + NEW.created_unix, + NEW.updated_unix + ); + end if; + RETURN NEW; + END; +$def$ +LANGUAGE plpgsql; + + + +CREATE TRIGGER es_insert_dataset + AFTER INSERT ON public.dataset + FOR EACH ROW EXECUTE PROCEDURE insert_dataset_data(); + +ALTER TABLE public.dataset ENABLE ALWAYS TRIGGER es_insert_dataset; + + +DROP TRIGGER IF EXISTS es_udpate_dataset_file_name on public.attachment; + +CREATE OR REPLACE FUNCTION public.udpate_dataset_file_name() RETURNS trigger AS +$def$ + BEGIN + if (TG_OP = 'UPDATE') then + update public.dataset_es SET file_desc=(select array_to_string(array_agg(description order by created_unix desc),'-#,#-') from public.attachment where dataset_id=NEW.dataset_id and is_private=false) where id=NEW.dataset_id; + elsif (TG_OP = 'INSERT') then + update public.dataset_es SET file_name=(select array_to_string(array_agg(name order by created_unix desc),'-#,#-') from public.attachment where dataset_id=NEW.dataset_id and is_private=false) where id=NEW.dataset_id; + elsif (TG_OP = 'DELETE') then + update public.dataset_es SET file_name=(select array_to_string(array_agg(name order by created_unix desc),'-#,#-') from public.attachment where dataset_id=OLD.dataset_id and is_private=false) where id=OLD.dataset_id; + update public.dataset_es SET file_desc=(select array_to_string(array_agg(description order by created_unix desc),'-#,#-') from public.attachment where dataset_id=OLD.dataset_id and is_private=false) where id=OLD.dataset_id; + end if; + return NEW; + END; +$def$ +LANGUAGE plpgsql; + + +CREATE TRIGGER es_udpate_dataset_file_name + AFTER INSERT OR UPDATE OR DELETE ON public.attachment + FOR EACH ROW EXECUTE PROCEDURE udpate_dataset_file_name(); + +ALTER TABLE public.attachment ENABLE ALWAYS TRIGGER es_udpate_dataset_file_name; + +DROP TRIGGER IF EXISTS es_update_dataset on public.dataset; + +CREATE OR REPLACE FUNCTION public.update_dataset() RETURNS trigger AS +$def$ + BEGIN + UPDATE public.dataset_es + SET description=NEW.description, + title=NEW.title, + category=NEW.category, + task=NEW.task, + download_times=NEW.download_times, + updated_unix=NEW.updated_unix, + file_name=(select array_to_string(array_agg(name order by created_unix desc),'-#,#-') from public.attachment where dataset_id=NEW.id and is_private=false), + file_desc=(select array_to_string(array_agg(description order by created_unix desc),'-#,#-') from public.attachment where dataset_id=NEW.id and is_private=false) + where id=NEW.id; + return new; + END +$def$ +LANGUAGE plpgsql; + +CREATE TRIGGER es_update_dataset + AFTER UPDATE ON public.dataset + FOR EACH ROW EXECUTE PROCEDURE update_dataset(); + +ALTER TABLE public.dataset ENABLE ALWAYS TRIGGER es_update_dataset; + +DROP TRIGGER IF EXISTS es_delete_dataset on public.dataset; + +CREATE OR REPLACE FUNCTION public.delete_dataset() RETURNS trigger AS +$def$ + declare + BEGIN + DELETE FROM public.dataset_es where id=OLD.id; + return new; + END +$def$ +LANGUAGE plpgsql; + + +CREATE TRIGGER es_delete_dataset + AFTER DELETE ON public.dataset + FOR EACH ROW EXECUTE PROCEDURE delete_dataset(); + +ALTER TABLE public.dataset ENABLE ALWAYS TRIGGER es_delete_dataset; diff --git a/models/dbsql/issue_foreigntable_for_es.sql b/models/dbsql/issue_foreigntable_for_es.sql new file mode 100644 index 000000000..d6a16cd27 --- /dev/null +++ b/models/dbsql/issue_foreigntable_for_es.sql @@ -0,0 +1,227 @@ +delete from public.issue_es; +DROP FOREIGN TABLE public.issue_es; +DROP TRIGGER IF EXISTS es_insert_issue on public.issue; +DROP FUNCTION public.insert_issue_data; +DROP TRIGGER IF EXISTS es_udpate_issue_comment on public.comment; +DROP FUNCTION udpate_issue_comment; +DROP TRIGGER IF EXISTS es_update_issue on public.issue; +DROP FUNCTION public.update_issue; +DROP TRIGGER IF EXISTS es_delete_issue on public.issue; +DROP FUNCTION public.delete_issue; + + +CREATE FOREIGN TABLE public.issue_es +( + id bigint NOT NULL, + repo_id bigint, + index bigint, + poster_id bigint, + original_author character varying(255), + original_author_id bigint, + name character varying(255) , + content text, + comment text, + milestone_id bigint, + priority integer, + is_closed boolean, + is_pull boolean, + pr_id bigint, + num_comments integer, + ref character varying(255), + deadline_unix bigint, + created_unix bigint, + updated_unix bigint, + closed_unix bigint, + is_locked boolean NOT NULL, + amount bigint, + is_transformed boolean NOT NULL +)SERVER multicorn_es +OPTIONS + ( + host '192.168.207.94', + port '9200', + index 'issue-es-index', + rowid_column 'id', + default_sort '_id' + ) +; + +delete from public.issue_es; +INSERT INTO public.issue_es( + id, + repo_id, + index, + poster_id, + original_author, + original_author_id, + name, + content, + milestone_id, + priority, + is_closed, + is_pull, + num_comments, + ref, + deadline_unix, + created_unix, + updated_unix, + closed_unix, + is_locked, + amount, + is_transformed,comment,pr_id) + SELECT + b.id, + b.repo_id, + b.index, + b.poster_id, + b.original_author, + b.original_author_id, + b.name, + b.content, + b.milestone_id, + b.priority, + b.is_closed, + b.is_pull, + b.num_comments, + b.ref, + b.deadline_unix, + b.created_unix, + b.updated_unix, + b.closed_unix, + b.is_locked, + b.amount, + b.is_transformed, + (select array_to_string(array_agg(content order by created_unix desc),',') from public.comment a where a.issue_id=b.id), + (select id from public.pull_request d where b.id=d.issue_id and b.is_pull=true) + FROM public.issue b,public.repository c where b.repo_id=c.id and c.is_private=false; + + +CREATE OR REPLACE FUNCTION public.insert_issue_data() RETURNS trigger AS +$def$ + DECLARE + privateValue boolean=false; + BEGIN + select into privateValue is_private from public.repository where id=NEW.repo_id; + if not privateValue then + INSERT INTO public.issue_es( + id, + repo_id, + index, + poster_id, + original_author, + original_author_id, + name, + content, + milestone_id, + priority, + is_closed, + is_pull, + num_comments, + ref, + deadline_unix, + created_unix, + updated_unix, + closed_unix, + is_locked, + amount, + is_transformed) + VALUES ( + NEW.id, + NEW.repo_id, + NEW.index, + NEW.poster_id, + NEW.original_author, + NEW.original_author_id, + NEW.name, + NEW.content, + NEW.milestone_id, + NEW.priority, + NEW.is_closed, + NEW.is_pull, + NEW.num_comments, + NEW.ref, + NEW.deadline_unix, + NEW.created_unix, + NEW.updated_unix, + NEW.closed_unix, + NEW.is_locked, + NEW.amount, + NEW.is_transformed + ); + end if; + RETURN NEW; + END; +$def$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS es_insert_issue on public.issue; + +CREATE TRIGGER es_insert_issue + AFTER INSERT ON public.issue + FOR EACH ROW EXECUTE PROCEDURE insert_issue_data(); + +ALTER TABLE public.issue ENABLE ALWAYS TRIGGER es_insert_issue; + +CREATE OR REPLACE FUNCTION public.udpate_issue_comment() RETURNS trigger AS +$def$ + BEGIN + if (TG_OP = 'DELETE') then + update public.issue_es SET comment=(select array_to_string(array_agg(content order by created_unix desc),',') from public.comment where issue_id=OLD.issue_id) where id=OLD.issue_id; + elsif (TG_OP = 'UPDATE') then + update public.issue_es SET comment=(select array_to_string(array_agg(content order by created_unix desc),',') from public.comment where issue_id=NEW.issue_id) where id=NEW.issue_id; + end if; + + return null; + END; +$def$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS es_udpate_issue_comment on public.comment; +CREATE TRIGGER es_udpate_issue_comment + AFTER DELETE OR UPDATE ON public.comment + FOR EACH ROW EXECUTE PROCEDURE udpate_issue_comment(); + +ALTER TABLE public.comment ENABLE ALWAYS TRIGGER es_udpate_issue_comment; + + +CREATE OR REPLACE FUNCTION public.update_issue() RETURNS trigger AS +$def$ + declare + BEGIN + UPDATE public.issue_es + SET content=NEW.content, + name=NEW.name, + is_closed=NEW.is_closed, + num_comments=NEW.num_comments, + updated_unix=NEW.updated_unix, + comment=(select array_to_string(array_agg(content order by created_unix desc),',') from public.comment where issue_id=NEW.id) + where id=NEW.id; + return new; + END +$def$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS es_update_issue on public.issue; + +CREATE TRIGGER es_update_issue + AFTER UPDATE ON public.issue + FOR EACH ROW EXECUTE PROCEDURE update_issue(); + +ALTER TABLE public.issue ENABLE ALWAYS TRIGGER es_update_issue; + +CREATE OR REPLACE FUNCTION public.delete_issue() RETURNS trigger AS +$def$ + declare + BEGIN + DELETE FROM public.issue_es where id=OLD.id; + return new; + END +$def$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS es_delete_issue on public.issue; +CREATE TRIGGER es_delete_issue + AFTER DELETE ON public.issue + FOR EACH ROW EXECUTE PROCEDURE delete_issue(); + +ALTER TABLE public.issue ENABLE ALWAYS TRIGGER es_delete_issue; \ No newline at end of file diff --git a/models/dbsql/repo_foreigntable_for_es.sql b/models/dbsql/repo_foreigntable_for_es.sql new file mode 100644 index 000000000..7e06fd99e --- /dev/null +++ b/models/dbsql/repo_foreigntable_for_es.sql @@ -0,0 +1,545 @@ +-- 要处理项目从私有变为公有,并且从公有变成私有的情况 +DELETE FROM public.repository_es; +DROP FOREIGN table if exists public.repository_es; +DROP TRIGGER IF EXISTS es_insert_repository on public.repository; +DROP FUNCTION public.insert_repository_data; +DROP TRIGGER IF EXISTS es_update_repository on public.repository; +DROP FUNCTION public.update_repository; + +DROP TRIGGER IF EXISTS es_delete_repository on public.repository; +DROP FUNCTION public.delete_repository; + +DROP TRIGGER IF EXISTS es_udpate_repository_lang on public.language_stat; +DROP FUNCTION public.udpate_repository_lang; + + +CREATE FOREIGN TABLE public.repository_es ( + id bigint NOT NULL, + owner_id bigint, + owner_name character varying(255), + lower_name character varying(255) NOT NULL, + name character varying(255) NOT NULL, + description text, + website character varying(2048), + original_service_type integer, + original_url character varying(2048), + default_branch character varying(255), + num_watches integer, + num_stars integer, + num_forks integer, + num_issues integer, + num_closed_issues integer, + num_pulls integer, + num_closed_pulls integer, + num_milestones integer DEFAULT 0 NOT NULL, + num_closed_milestones integer DEFAULT 0 NOT NULL, + is_private boolean, + is_empty boolean, + is_archived boolean, + is_mirror boolean, + status integer DEFAULT 0 NOT NULL, + is_fork boolean DEFAULT false NOT NULL, + fork_id bigint, + is_template boolean DEFAULT false NOT NULL, + template_id bigint, + size bigint DEFAULT 0 NOT NULL, + is_fsck_enabled boolean DEFAULT true NOT NULL, + close_issues_via_commit_in_any_branch boolean DEFAULT false NOT NULL, + topics text, + avatar character varying(64), + created_unix bigint, + updated_unix bigint, + contract_address character varying(255), + block_chain_status integer DEFAULT 0 NOT NULL, + balance character varying(255) DEFAULT '0'::character varying NOT NULL, + clone_cnt bigint DEFAULT 0 NOT NULL, + license character varying(100), + download_cnt bigint DEFAULT 0 NOT NULL, + num_commit bigint DEFAULT 0 NOT NULL, + git_clone_cnt bigint DEFAULT 0 NOT NULL, + creator_id bigint NOT NULL DEFAULT 0, + repo_type integer NOT NULL DEFAULT 0, + lang character varying(2048), + alias character varying(255), + lower_alias character varying(255) +) SERVER multicorn_es +OPTIONS + ( + host '192.168.207.94', + port '9200', + index 'repository-es-index', + rowid_column 'id', + default_sort '_id' + ) +; +delete from public.repository_es; + INSERT INTO public.repository_es (id, + owner_id, + owner_name, + lower_name, + name, + description, + website, + original_service_type, + original_url, + default_branch, + num_watches, + num_stars, + num_forks, + num_issues, + num_closed_issues, + num_pulls, + num_closed_pulls, + num_milestones, + num_closed_milestones, + is_private, + is_empty, + is_archived, + is_mirror, + status, + is_fork, + fork_id, + is_template, + template_id, + size, + is_fsck_enabled, + close_issues_via_commit_in_any_branch, + topics, + avatar, + created_unix, + updated_unix, + contract_address, + block_chain_status, + balance, + clone_cnt, + num_commit, + git_clone_cnt, + creator_id, + repo_type, + lang, + alias, + lower_alias + ) + SELECT + id, + owner_id, + owner_name, + lower_name, + name, + description, + website, + original_service_type, + original_url, + default_branch, + num_watches, + num_stars, + num_forks, + num_issues, + num_closed_issues, + num_pulls, + num_closed_pulls, + num_milestones, + num_closed_milestones, + is_private, + is_empty, + is_archived, + is_mirror, + status, + is_fork, + fork_id, + is_template, + template_id, + size, + is_fsck_enabled, + close_issues_via_commit_in_any_branch, + topics, + avatar, + created_unix, + updated_unix, + contract_address, + block_chain_status, + balance, + clone_cnt, + num_commit, + git_clone_cnt, + creator_id, + repo_type, + (select array_to_string(array_agg(language order by percentage desc),',') from public.language_stat a where a.repo_id=b.id), + alias, + lower_alias + FROM public.repository b where b.is_private=false; + +DROP TRIGGER IF EXISTS es_insert_repository on public.repository; + +CREATE OR REPLACE FUNCTION public.insert_repository_data() RETURNS trigger AS +$def$ + BEGIN + if not NEW.is_private then + INSERT INTO public.repository_es (id, + owner_id, + owner_name, + lower_name, + name, + description, + website, + original_service_type, + original_url, + default_branch, + num_watches, + num_stars, + num_forks, + num_issues, + num_closed_issues, + num_pulls, + num_closed_pulls, + num_milestones, + num_closed_milestones, + is_private, + is_empty, + is_archived, + is_mirror, + status, + is_fork, + fork_id, + is_template, + template_id, + size, + is_fsck_enabled, + close_issues_via_commit_in_any_branch, + topics, + avatar, + created_unix, + updated_unix, + contract_address, + block_chain_status, + balance, + clone_cnt, + num_commit, + git_clone_cnt, + creator_id, + repo_type, + alias, + lower_alias) VALUES + (NEW.id, + NEW.owner_id, + NEW.owner_name, + NEW.lower_name, + NEW.name, + NEW.description, + NEW.website, + NEW.original_service_type, + NEW.original_url, + NEW.default_branch, + NEW.num_watches, + NEW.num_stars, + NEW.num_forks, + NEW.num_issues, + NEW.num_closed_issues, + NEW.num_pulls, + NEW.num_closed_pulls, + NEW.num_milestones, + NEW.num_closed_milestones, + NEW.is_private, + NEW.is_empty, + NEW.is_archived, + NEW.is_mirror, + NEW.status, + NEW.is_fork, + NEW.fork_id, + NEW.is_template, + NEW.template_id, + NEW.size, + NEW.is_fsck_enabled, + NEW.close_issues_via_commit_in_any_branch, + NEW.topics, + NEW.avatar, + NEW.created_unix, + NEW.updated_unix, + NEW.contract_address, + NEW.block_chain_status, + NEW.balance, + NEW.clone_cnt, + NEW.num_commit, + NEW.git_clone_cnt, + NEW.creator_id, + NEW.repo_type, + NEW.alias, + NEW.lower_alias); + end if; + RETURN NEW; + END; +$def$ +LANGUAGE plpgsql; + + +CREATE TRIGGER es_insert_repository + AFTER INSERT ON public.repository + FOR EACH ROW EXECUTE PROCEDURE insert_repository_data(); + +ALTER TABLE public.repository ENABLE ALWAYS TRIGGER es_insert_repository; + +DROP TRIGGER IF EXISTS es_update_repository on public.repository; + +CREATE OR REPLACE FUNCTION public.update_repository() RETURNS trigger AS +$def$ + BEGIN + if OLD.is_private != NEW.is_private then + if OLD.is_private and not NEW.is_private then + --insert + INSERT INTO public.repository_es (id, + owner_id, + owner_name, + lower_name, + name, + description, + website, + original_service_type, + original_url, + default_branch, + num_watches, + num_stars, + num_forks, + num_issues, + num_closed_issues, + num_pulls, + num_closed_pulls, + num_milestones, + num_closed_milestones, + is_private, + is_empty, + is_archived, + is_mirror, + status, + is_fork, + fork_id, + is_template, + template_id, + size, + is_fsck_enabled, + close_issues_via_commit_in_any_branch, + topics, + avatar, + created_unix, + updated_unix, + contract_address, + block_chain_status, + balance, + clone_cnt, + num_commit, + git_clone_cnt, + creator_id, + repo_type, + lang, + alias, + lower_alias) + SELECT + id, + owner_id, + owner_name, + lower_name, + name, + description, + website, + original_service_type, + original_url, + default_branch, + num_watches, + num_stars, + num_forks, + num_issues, + num_closed_issues, + num_pulls, + num_closed_pulls, + num_milestones, + num_closed_milestones, + is_private, + is_empty, + is_archived, + is_mirror, + status, + is_fork, + fork_id, + is_template, + template_id, + size, + is_fsck_enabled, + close_issues_via_commit_in_any_branch, + topics, + avatar, + created_unix, + updated_unix, + contract_address, + block_chain_status, + balance, + clone_cnt, + num_commit, + git_clone_cnt, + creator_id, + repo_type, + (select array_to_string(array_agg(language order by percentage desc),',') from public.language_stat a where a.repo_id=b.id), + alias, + lower_alias + FROM public.repository b where b.id=NEW.id; + INSERT INTO public.dataset_es( + id, + title, + status, + category, + description, + download_times, + license, task, + release_id, + user_id, + repo_id, + created_unix, + updated_unix,file_name) + SELECT + b.id, + b.title, + b.status, + b.category, + b.description, + b.download_times, + b.license, + b.task, + b.release_id, + b.user_id, + b.repo_id, + b.created_unix, + b.updated_unix,(select array_to_string(array_agg(name order by created_unix desc),',') from public.attachment a where a.dataset_id=b.id and a.is_private=false) + FROM public.dataset b where b.repo_id=NEW.id; + + INSERT INTO public.issue_es( + id, + repo_id, + index, + poster_id, + original_author, + original_author_id, + name, + content, + milestone_id, + priority, + is_closed, + is_pull, + num_comments, + ref, + deadline_unix, + created_unix, + updated_unix, + closed_unix, + is_locked, + amount, + is_transformed,comment,pr_id) + SELECT + b.id, + b.repo_id, + b.index, + b.poster_id, + b.original_author, + b.original_author_id, + b.name, + b.content, + b.milestone_id, + b.priority, + b.is_closed, + b.is_pull, + b.num_comments, + b.ref, + b.deadline_unix, + b.created_unix, + b.updated_unix, + b.closed_unix, + b.is_locked, + b.amount, + b.is_transformed, + (select array_to_string(array_agg(content order by created_unix desc),',') from public.comment a where a.issue_id=b.id), + (select id from public.pull_request d where d.issue_id=b.id) + FROM public.issue b where b.repo_id=NEW.id; + + end if; + + if not OLD.is_private and NEW.is_private then + delete from public.issue_es where repo_id=NEW.id; + delete from public.dataset_es where repo_id=NEW.id; + delete from public.repository_es where id=NEW.id; + end if; + + end if; + + if not NEW.is_private then + raise notice 'update repo,the updated_unix is %',NEW.updated_unix; + update public.repository_es SET description=NEW.description, + name=NEW.name, + lower_name=NEW.lower_name, + owner_name=NEW.owner_name, + website=NEW.website, + updated_unix=NEW.updated_unix, + num_watches=NEW.num_watches, + num_stars=NEW.num_stars, + num_forks=NEW.num_forks, + topics=NEW.topics, + alias = NEW.alias, + lower_alias = NEW.lower_alias, + avatar=NEW.avatar + where id=NEW.id; + end if; + return new; + END +$def$ +LANGUAGE plpgsql; + +CREATE TRIGGER es_update_repository + AFTER UPDATE ON public.repository + FOR EACH ROW EXECUTE PROCEDURE update_repository(); + +ALTER TABLE public.repository ENABLE ALWAYS TRIGGER es_update_repository; + + +DROP TRIGGER IF EXISTS es_delete_repository on public.repository; + +CREATE OR REPLACE FUNCTION public.delete_repository() RETURNS trigger AS +$def$ + declare + BEGIN + delete from public.issue_es where repo_id=OLD.id; + delete from public.dataset_es where repo_id=OLD.id; + DELETE FROM public.repository_es where id=OLD.id; + return new; + END +$def$ +LANGUAGE plpgsql; + + +CREATE TRIGGER es_delete_repository + AFTER DELETE ON public.repository + FOR EACH ROW EXECUTE PROCEDURE delete_repository(); + +ALTER TABLE public.repository ENABLE ALWAYS TRIGGER es_delete_repository; + + + +DROP TRIGGER IF EXISTS es_udpate_repository_lang on public.language_stat; + +CREATE OR REPLACE FUNCTION public.udpate_repository_lang() RETURNS trigger AS +$def$ + BEGIN + if (TG_OP = 'UPDATE') then + update public.repository_es SET lang=(select array_to_string(array_agg(language order by percentage desc),',') from public.language_stat where repo_id=NEW.repo_id) where id=NEW.repo_id; + elsif (TG_OP = 'INSERT') then + update public.repository_es SET lang=(select array_to_string(array_agg(language order by percentage desc),',') from public.language_stat where repo_id=NEW.repo_id) where id=NEW.repo_id; + elsif (TG_OP = 'DELETE') then + if exists(select 1 from public.repository where id=OLD.repo_id) then + update public.repository_es SET lang=(select array_to_string(array_agg(language order by percentage desc),',') from public.language_stat where repo_id=OLD.repo_id) where id=OLD.repo_id; + end if; + end if; + return null; + END; +$def$ +LANGUAGE plpgsql; + +CREATE TRIGGER es_udpate_repository_lang + AFTER INSERT OR UPDATE OR DELETE ON public.language_stat + FOR EACH ROW EXECUTE PROCEDURE udpate_repository_lang(); + +ALTER TABLE public.language_stat ENABLE ALWAYS TRIGGER es_udpate_repository_lang; \ No newline at end of file diff --git a/models/dbsql/user_foreigntable_for_es.sql b/models/dbsql/user_foreigntable_for_es.sql new file mode 100644 index 000000000..5d77757f0 --- /dev/null +++ b/models/dbsql/user_foreigntable_for_es.sql @@ -0,0 +1,317 @@ +DELETE FROM public.user_es; +DROP FOREIGN table if exists public.user_es; +DROP TRIGGER IF EXISTS es_insert_user on public.user; +DROP FUNCTION public.insert_user_data; +DROP TRIGGER IF EXISTS es_update_user on public.user; +DROP FUNCTION public.update_user; + +DROP TRIGGER IF EXISTS es_delete_user on public.user; +DROP FUNCTION public.delete_user; + +CREATE FOREIGN TABLE public.user_es +( + id bigint NOT NULL , + lower_name character varying(255) NULL, + name character varying(255) NULL, + full_name character varying(255), + email character varying(255), + keep_email_private boolean, + email_notifications_preference character varying(20) , + passwd character varying(255) , + passwd_hash_algo character varying(255) , + must_change_password boolean NOT NULL DEFAULT false, + login_type integer, + login_source bigint NOT NULL DEFAULT 0, + login_name character varying(255) , + type integer, + location character varying(255), + website character varying(255), + rands character varying(10), + salt character varying(10), + language character varying(5), + description character varying(255), + created_unix bigint, + updated_unix bigint, + last_login_unix bigint, + last_repo_visibility boolean, + max_repo_creation integer, + is_active boolean, + is_admin boolean, + is_restricted boolean NOT NULL DEFAULT false, + allow_git_hook boolean, + allow_import_local boolean, + allow_create_organization boolean DEFAULT true, + prohibit_login boolean NOT NULL DEFAULT false, + avatar character varying(2048) , + avatar_email character varying(255), + use_custom_avatar boolean, + num_followers integer, + num_following integer NOT NULL DEFAULT 0, + num_stars integer, + num_repos integer, + num_teams integer, + num_members integer, + visibility integer NOT NULL DEFAULT 0, + repo_admin_change_team_access boolean NOT NULL DEFAULT false, + diff_view_style character varying(255), + theme character varying(255), + token character varying(1024) , + public_key character varying(255), + private_key character varying(255), + is_operator boolean NOT NULL DEFAULT false, + num_dataset_stars integer NOT NULL DEFAULT 0 +) SERVER multicorn_es +OPTIONS + ( + host '192.168.207.94', + port '9200', + index 'user-es-index', + rowid_column 'id', + default_sort '_id' + ) +; +delete from public.user_es; + INSERT INTO public.user_es( + id, + lower_name, + name, + full_name, + email, + keep_email_private, + email_notifications_preference, + must_change_password, + login_type, + login_source, + login_name, + type, + location, + website, + rands, + language, + description, + created_unix, + updated_unix, + last_login_unix, + last_repo_visibility, + max_repo_creation, + is_active, + is_restricted, + allow_git_hook, + allow_import_local, + allow_create_organization, + prohibit_login, + avatar, + avatar_email, + use_custom_avatar, + num_followers, + num_following, + num_stars, + num_repos, + num_teams, + num_members, + visibility, + repo_admin_change_team_access, + diff_view_style, + theme, + is_operator, + num_dataset_stars) + SELECT + id, + lower_name, + name, + full_name, + email, + keep_email_private, + email_notifications_preference, + must_change_password, + login_type, + login_source, + login_name, + type, + location, + website, + rands, + language, + description, + created_unix, + updated_unix, + last_login_unix, + last_repo_visibility, + max_repo_creation, + is_active, + is_restricted, + allow_git_hook, + allow_import_local, + allow_create_organization, + prohibit_login, + avatar, + avatar_email, + use_custom_avatar, + num_followers, + num_following, + num_stars, + num_repos, + num_teams, + num_members, + visibility, + repo_admin_change_team_access, + diff_view_style, + theme, + is_operator, + num_dataset_stars + FROM public.user; + +DROP TRIGGER IF EXISTS es_insert_user on public.user; + +CREATE OR REPLACE FUNCTION public.insert_user_data() RETURNS trigger AS +$def$ + BEGIN + INSERT INTO public."user_es"( + id, + lower_name, + name, + full_name, + email, + keep_email_private, + email_notifications_preference, + must_change_password, + login_type, + login_source, + login_name, + type, + location, + website, + rands, + language, + description, + created_unix, + updated_unix, + last_login_unix, + last_repo_visibility, + max_repo_creation, + is_active, + is_restricted, + allow_git_hook, + allow_import_local, + allow_create_organization, + prohibit_login, + avatar, + avatar_email, + use_custom_avatar, + num_followers, + num_following, + num_stars, + num_repos, + num_teams, + num_members, + visibility, + repo_admin_change_team_access, + diff_view_style, + theme, + is_operator, + num_dataset_stars) + VALUES ( + NEW.id, + NEW.lower_name, + NEW.name, + NEW.full_name, + NEW.email, + NEW.keep_email_private, + NEW.email_notifications_preference, + NEW.must_change_password, + NEW.login_type, + NEW.login_source, + NEW.login_name, + NEW.type, + NEW.location, + NEW.website, + NEW.rands, + NEW.language, + NEW.description, + NEW.created_unix, + NEW.updated_unix, + NEW.last_login_unix, + NEW.last_repo_visibility, + NEW.max_repo_creation, + NEW.is_active, + NEW.is_restricted, + NEW.allow_git_hook, + NEW.allow_import_local, + NEW.allow_create_organization, + NEW.prohibit_login, + NEW.avatar, + NEW.avatar_email, + NEW.use_custom_avatar, + NEW.num_followers, + NEW.num_following, + NEW.num_stars, + NEW.num_repos, + NEW.num_teams, + NEW.num_members, + NEW.visibility, + NEW.repo_admin_change_team_access, + NEW.diff_view_style, + NEW.theme, + NEW.is_operator, + NEW.num_dataset_stars + ); + + RETURN NEW; + END; +$def$ +LANGUAGE plpgsql; + + + +CREATE TRIGGER es_insert_user + AFTER INSERT ON public.user + FOR EACH ROW EXECUTE PROCEDURE insert_user_data(); + +ALTER TABLE public.user ENABLE ALWAYS TRIGGER es_insert_user; + +DROP TRIGGER IF EXISTS es_update_user on public.user; + +CREATE OR REPLACE FUNCTION public.update_user() RETURNS trigger AS +$def$ + BEGIN + UPDATE public.user_es + SET description=NEW.description, + name=NEW.name, + full_name=NEW.full_name, + location=NEW.location, + website=NEW.website, + email=NEW.email, + num_dataset_stars=NEW.num_dataset_stars, + updated_unix=NEW.updated_unix + where id=NEW.id; + return new; + END +$def$ +LANGUAGE plpgsql; + + + +CREATE TRIGGER es_update_user + AFTER UPDATE ON public.user + FOR EACH ROW EXECUTE PROCEDURE update_user(); + +ALTER TABLE public.user ENABLE ALWAYS TRIGGER es_update_user; + +DROP TRIGGER IF EXISTS es_delete_user on public.user; + +CREATE OR REPLACE FUNCTION public.delete_user() RETURNS trigger AS +$def$ + declare + BEGIN + DELETE FROM public.user_es where id=OLD.id; + return new; + END +$def$ +LANGUAGE plpgsql; + + +CREATE TRIGGER es_delete_user + AFTER DELETE ON public.user + FOR EACH ROW EXECUTE PROCEDURE delete_user(); + +ALTER TABLE public.user ENABLE ALWAYS TRIGGER es_delete_user; \ No newline at end of file diff --git a/models/file_chunk.go b/models/file_chunk.go index 76c926dc5..0fc3a8879 100755 --- a/models/file_chunk.go +++ b/models/file_chunk.go @@ -13,11 +13,6 @@ const ( FileUploaded ) -const ( - TypeCloudBrainOne int = iota - TypeCloudBrainTwo -) - type FileChunk struct { ID int64 `xorm:"pk autoincr"` UUID string `xorm:"uuid UNIQUE"` diff --git a/models/models.go b/models/models.go index 36527f78d..2ec61941d 100755 --- a/models/models.go +++ b/models/models.go @@ -138,6 +138,8 @@ func init() { new(OfficialTag), new(OfficialTagRepos), new(WechatBindLog), + new(OrgStatistic), + new(SearchRecord), ) tablesStatistic = append(tablesStatistic, @@ -152,6 +154,8 @@ func init() { new(UserBusinessAnalysisCurrentWeek), new(UserBusinessAnalysisYesterday), new(UserLoginLog), + new(UserMetrics), + new(UserAnalysisPara), ) gonicNames := []string{"SSL", "UID"} diff --git a/models/org.go b/models/org.go index 85fb157ae..8b3e60ef8 100755 --- a/models/org.go +++ b/models/org.go @@ -8,6 +8,7 @@ package models import ( "fmt" "os" + "strconv" "strings" "code.gitea.io/gitea/modules/log" @@ -19,6 +20,17 @@ import ( "xorm.io/xorm" ) +type OrgStatistic struct { + ID int64 `xorm:"pk autoincr"` + OrgID int64 `xorm:"UNIQUE"` + NumScore int `xorm:"INDEX NOT NULL DEFAULT 0"` +} + +type OrgScore struct { + *User + Score string +} + // IsOwnedBy returns true if given user is in the owner team. func (org *User) IsOwnedBy(uid int64) (bool, error) { return IsOrganizationOwner(org.ID, uid) @@ -135,6 +147,93 @@ func (org *User) RemoveOrgRepo(repoID int64) error { return org.removeOrgRepo(x, repoID) } +func UpdateOrgStatistics() { + ids, err := GetOrganizationsId() + if err != nil { + return + } + for _, id := range ids { + org := User{ID: id} + orgStat := &OrgStatistic{OrgID: id} + numScore, err := org.getOrgStatistics() + if err == nil { + has, _ := x.Get(orgStat) + + orgStat.NumScore = numScore + if has { + x.ID(orgStat.ID).Cols("num_score").Update(&orgStat) + } else { + x.Insert(orgStat) + } + + } + } + +} + +func (org *User) getOrgStatistics() (int, error) { + count, err := getRepositoryCount(x, org) + if err != nil { + return 0, err + } + + err = org.GetRepositories(ListOptions{int(count), 1}) + + if err != nil { + return 0, err + } + var numScore = 0 + for _, repo := range org.Repos { + + numScore += int(getOpenIByRepoId(repo.ID)) + } + + return numScore, nil + +} + +func FindTopNStarsOrgs(n int) ([]*OrgScore, error) { + sql := "select a.id,sum(b.num_stars) score from \"user\" a ,repository b where a.id=b.owner_id and a.type=1 group by a.id order by score desc limit " + strconv.Itoa(n) + + return findTopNOrgs(sql) +} +func FindTopNMembersOrgs(n int) ([]*OrgScore, error) { + sql := "select id, count(user_id) score from" + + " (select org_id as id, uid as user_id from org_user " + + "union select a.id,b.user_id from \"user\" a,collaboration b,repository c " + + "where a.type=1 and a.id=c.owner_id and b.repo_id=c.id) d " + + "group by id order by score desc limit " + strconv.Itoa(n) + + return findTopNOrgs(sql) +} + +func FindTopNOpenIOrgs(n int) ([]*OrgScore, error) { + sql := "select org_id id,num_score score from org_statistic order by num_score desc limit 10" + strconv.Itoa(n) + + return findTopNOrgs(sql) +} + +func findTopNOrgs(sql string) ([]*OrgScore, error) { + resutls, err := x.QueryString(sql) + + if err != nil { + return nil, err + } + var orgScore []*OrgScore + for _, record := range resutls { + id, _ := strconv.ParseInt(record["id"], 10, 64) + user, err := getUserByID(x, id) + if err != nil { + continue + } + orgScore = append(orgScore, &OrgScore{user, record["score"]}) + + } + + return orgScore, nil + +} + // CreateOrganization creates record of a new organization. func CreateOrganization(org, owner *User) (err error) { if !owner.CanCreateOrganization() { diff --git a/models/repo.go b/models/repo.go index 42e350fbe..25bfb4a74 100755 --- a/models/repo.go +++ b/models/repo.go @@ -6,13 +6,14 @@ package models import ( - "code.gitea.io/gitea/modules/git" "context" "crypto/md5" "errors" "fmt" "html/template" "math/rand" + + "code.gitea.io/gitea/modules/git" "xorm.io/xorm" "code.gitea.io/gitea/modules/blockchain" @@ -1606,14 +1607,16 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e } dataset, err := GetDatasetByRepo(repo) - if err != nil { + if err != nil && !IsErrNotExist(err) { return err } - _, err = e.Where("dataset_id = ?", dataset.ID).Cols("is_private").Update(&Attachment{ - IsPrivate: true, - }) - if err != nil { - return err + if dataset != nil { + _, err = e.Where("dataset_id = ?", dataset.ID).Cols("is_private").Update(&Attachment{ + IsPrivate: true, + }) + if err != nil { + return err + } } } else { diff --git a/models/repo_list.go b/models/repo_list.go index 6fb9380de..5bf0ecf03 100755 --- a/models/repo_list.go +++ b/models/repo_list.go @@ -190,7 +190,8 @@ type SearchRepoOptions struct { // None -> include all repos // True -> include just courses // False -> include just no courses - Course util.OptionalBool + Course util.OptionalBool + OnlySearchPrivate bool } //SearchOrderBy is used to sort the result @@ -219,12 +220,15 @@ const ( SearchOrderByDownloadTimes SearchOrderBy = "download_times DESC" SearchOrderByHot SearchOrderBy = "(num_watches + num_stars + num_forks + clone_cnt) DESC" SearchOrderByActive SearchOrderBy = "(num_issues + num_pulls + num_commit) DESC" + SearchOrderByWatches SearchOrderBy = "num_watches DESC" ) // SearchRepositoryCondition creates a query condition according search repository options func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond { var cond = builder.NewCond() - + if opts.OnlySearchPrivate { + cond = cond.And(builder.Eq{"is_private": true}) + } if opts.Private { if opts.Actor != nil && !opts.Actor.IsAdmin && opts.Actor.ID != opts.OwnerID { // OK we're in the context of a User @@ -337,7 +341,7 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond { if !opts.TopicOnly { var likes = builder.NewCond() for _, v := range strings.Split(opts.Keyword, ",") { - likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)}) + likes = likes.Or(builder.Like{"lower_alias", strings.ToLower(v)}) likes = likes.Or(builder.Like{"alias", v}) if opts.IncludeDescription { likes = likes.Or(builder.Like{"LOWER(description)", strings.ToLower(v)}) diff --git a/models/repo_statistic.go b/models/repo_statistic.go index a9e9593af..4f8f13ed7 100755 --- a/models/repo_statistic.go +++ b/models/repo_statistic.go @@ -73,6 +73,16 @@ func (repo *RepoStatistic) DisplayName() string { return repo.Alias } +func getOpenIByRepoId(repoId int64) float64 { + repoStatistic := new(RepoStatistic) + has, err := xStatistic.Cols("radar_total").Where("repo_id=?", repoId).Desc("id").Limit(1).Get(repoStatistic) + if !has || err != nil { + return 0 + } + return repoStatistic.RadarTotal + +} + func DeleteRepoStatDaily(date string) error { sess := xStatistic.NewSession() defer sess.Close() diff --git a/models/search_record.go b/models/search_record.go new file mode 100644 index 000000000..d9d85a591 --- /dev/null +++ b/models/search_record.go @@ -0,0 +1,83 @@ +package models + +import ( + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" +) + +type SearchRecord struct { + ID int64 `xorm:"pk autoincr"` + //user + Keyword string `xorm:"NOT NULL"` + // + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` +} + +func SaveSearchKeywordToDb(keyword string) error { + record := &SearchRecord{ + Keyword: keyword, + } + sess := x.NewSession() + defer sess.Close() + _, err := sess.Insert(record) + if err != nil { + log.Info("insert error." + err.Error()) + return err + } + return nil +} + +func setIssueQueryCondition(sess *xorm.Session, Keyword string, isPull bool, userId int64) { + sess.And("issue.poster_id=?", userId) + sess.And("issue.is_pull=?", isPull) + sess.And("(issue.name like '%" + Keyword + "%' or issue.content like '%" + Keyword + "%')") + sess.Join("INNER", "repository", "issue.repo_id = repository.id").And("repository.is_private = ?", true) +} + +func SearchPrivateIssueOrPr(Page int, PageSize int, Keyword string, isPull bool, userId int64) ([]*Issue, int64, error) { + sess := x.NewSession() + defer sess.Close() + setIssueQueryCondition(sess, Keyword, isPull, userId) + count, err := sess.Count(new(Issue)) + if err != nil { + return nil, 0, err + } + + setIssueQueryCondition(sess, Keyword, isPull, userId) + sess.Desc("issue.created_unix") + sess.Limit(PageSize, (Page-1)*PageSize) + issues := make([]*Issue, 0) + if err := sess.Find(&issues); err != nil { + return nil, 0, err + } else { + return issues, count, nil + } +} + +func setDataSetQueryCondition(sess *xorm.Session, Keyword string, userId int64) { + sess.And("dataset.user_id=?", userId) + sess.And("(dataset.title like '%" + Keyword + "%' or dataset.description like '%" + Keyword + "%')") + sess.Join("INNER", "repository", "dataset.repo_id = repository.id").And("repository.is_private = ?", true) +} + +func SearchDatasetBySQL(Page int, PageSize int, Keyword string, userId int64) ([]*Dataset, int64, error) { + sess := x.NewSession() + defer sess.Close() + setDataSetQueryCondition(sess, Keyword, userId) + count, err := sess.Count(new(Dataset)) + if err != nil { + return nil, 0, err + } + + setDataSetQueryCondition(sess, Keyword, userId) + sess.Desc("dataset.created_unix") + sess.Limit(PageSize, (Page-1)*PageSize) + datasets := make([]*Dataset, 0) + if err := sess.Find(&datasets); err != nil { + return nil, 0, err + } else { + return datasets, count, nil + } + +} diff --git a/models/user.go b/models/user.go index f72462051..71885aeb1 100755 --- a/models/user.go +++ b/models/user.go @@ -2104,6 +2104,12 @@ func GetOrganizationsCount() (int64, error) { } +func GetOrganizationsId() ([]int64, error) { + var ids []int64 + err := x.Table("user").Where("type=1").Cols("id").Find(&ids) + return ids, err +} + func GetBlockChainUnSuccessUsers() ([]*User, error) { users := make([]*User, 0, 10) err := x.Where("public_key = ''"). diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go index 288762161..65ce642d5 100644 --- a/models/user_business_analysis.go +++ b/models/user_business_analysis.go @@ -6,7 +6,6 @@ import ( "strconv" "time" - "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/timeutil" "xorm.io/builder" @@ -81,6 +80,19 @@ type UserBusinessAnalysisAll struct { Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + //cloudbraintask + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysis struct { @@ -146,6 +158,18 @@ type UserBusinessAnalysis struct { Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisQueryOptions struct { @@ -183,6 +207,29 @@ func getLastCountDate() int64 { return pageStartTime.Unix() } +func QueryMetrics(start int64, end int64) ([]*UserMetrics, int64) { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + userMetricsList := make([]*UserMetrics, 0) + if err := statictisSess.Table(new(UserMetrics)).Where("count_date >" + fmt.Sprint(start) + " and count_date<" + fmt.Sprint(end)).OrderBy("count_date desc"). + Find(&userMetricsList); err != nil { + return nil, 0 + } + return userMetricsList, int64(len(userMetricsList)) +} + +func QueryRankList(key string, tableName string, limit int) ([]*UserBusinessAnalysisAll, int64) { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + + userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0) + if err := statictisSess.Table(tableName).OrderBy(key+" desc,id desc").Limit(limit, 0). + Find(&userBusinessAnalysisAllList); err != nil { + return nil, 0 + } + return userBusinessAnalysisAllList, int64(len(userBusinessAnalysisAllList)) +} + func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, queryObj interface{}, userName string) ([]*UserBusinessAnalysisAll, int64) { statictisSess := xStatistic.NewSession() defer statictisSess.Close() @@ -334,6 +381,7 @@ func QueryUserStaticDataPage(opts *UserBusinessAnalysisQueryOptions) ([]*UserBus resultMap[userRecord.ID].WatchedCount += userRecord.WatchedCount resultMap[userRecord.ID].CommitCodeSize += userRecord.CommitCodeSize resultMap[userRecord.ID].CommitDatasetSize += userRecord.CommitDatasetSize + resultMap[userRecord.ID].CommitDatasetNum += userRecord.CommitDatasetNum resultMap[userRecord.ID].CommitModelCount += userRecord.CommitModelCount resultMap[userRecord.ID].SolveIssueCount += userRecord.SolveIssueCount resultMap[userRecord.ID].EncyclopediasCount += userRecord.EncyclopediasCount @@ -353,7 +401,7 @@ func QueryUserStaticDataPage(opts *UserBusinessAnalysisQueryOptions) ([]*UserBus return userBusinessAnalysisReturnList, count } -func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats, tableName string, pageStartTime time.Time, pageEndTime time.Time) { +func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageStartTime time.Time, pageEndTime time.Time, userMetrics map[string]int) { sess := x.NewSession() defer sess.Close() @@ -379,14 +427,15 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s FocusRepoCountMap := queryWatch(start_unix, end_unix) StarRepoCountMap := queryStar(start_unix, end_unix) WatchedCountMap := queryFollow(start_unix, end_unix) - - CommitDatasetSizeMap := queryDatasetSize(start_unix, end_unix) + CommitCodeSizeMap := queryCommitCodeSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(startTime.Unix(), end_unix) - + CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) + AiModelManageMap := queryUserModel(start_unix, end_unix) DataDate := currentTimeNow.Format("2006-01-02") + " 00:01" cond := "type != 1 and is_active=true" @@ -395,6 +444,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s log.Info("query user error. return.") return } + ParaWeight := getParaWeight() var indexTotal int64 indexTotal = 0 insertCount := 0 @@ -412,84 +462,22 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s dateRecordAll.Name = userRecord.Name dateRecordAll.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) dateRecordAll.DataDate = DataDate - - if _, ok := CodeMergeCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CodeMergeCount = 0 - } else { - dateRecordAll.CodeMergeCount = CodeMergeCountMap[dateRecordAll.ID] - } - - if _, ok := CommitCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommitCount = 0 - } else { - dateRecordAll.CommitCount = CommitCountMap[dateRecordAll.ID] - } - - if _, ok := IssueCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.IssueCount = 0 - } else { - dateRecordAll.IssueCount = IssueCountMap[dateRecordAll.ID] - } - - if _, ok := CommentCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommentCount = 0 - } else { - dateRecordAll.CommentCount = CommentCountMap[dateRecordAll.ID] - } - - if _, ok := FocusRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.FocusRepoCount = 0 - } else { - dateRecordAll.FocusRepoCount = FocusRepoCountMap[dateRecordAll.ID] - } - - if _, ok := StarRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.StarRepoCount = 0 - } else { - dateRecordAll.StarRepoCount = StarRepoCountMap[dateRecordAll.ID] - } - - if _, ok := WatchedCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.WatchedCount = 0 - } else { - dateRecordAll.WatchedCount = WatchedCountMap[dateRecordAll.ID] - } - - if _, ok := CommitCodeSizeMap[dateRecordAll.Email]; !ok { - dateRecordAll.CommitCodeSize = 0 - } else { - dateRecordAll.CommitCodeSize = int(CommitCodeSizeMap[dateRecordAll.Email].CommitLines) - } - - if _, ok := CommitDatasetSizeMap[dateRecordAll.ID]; !ok { - dateRecordAll.CommitDatasetSize = 0 - } else { - dateRecordAll.CommitDatasetSize = CommitDatasetSizeMap[dateRecordAll.ID] - } - - if _, ok := SolveIssueCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.SolveIssueCount = 0 - } else { - dateRecordAll.SolveIssueCount = SolveIssueCountMap[dateRecordAll.ID] - } - - if _, ok := wikiCountMap[dateRecordAll.Name]; !ok { - dateRecordAll.EncyclopediasCount = 0 - } else { - dateRecordAll.EncyclopediasCount = wikiCountMap[dateRecordAll.Name] - } - - if _, ok := CreateRepoCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.CreateRepoCount = 0 - } else { - dateRecordAll.CreateRepoCount = CreateRepoCountMap[dateRecordAll.ID] - } - - if _, ok := LoginCountMap[dateRecordAll.ID]; !ok { - dateRecordAll.LoginCount = 0 - } else { - dateRecordAll.LoginCount = LoginCountMap[dateRecordAll.ID] - } + dateRecordAll.UserLocation = userRecord.Location + + dateRecordAll.CodeMergeCount = getMapValue(dateRecordAll.ID, CodeMergeCountMap) + dateRecordAll.CommitCount = getMapValue(dateRecordAll.ID, CommitCountMap) + dateRecordAll.IssueCount = getMapValue(dateRecordAll.ID, IssueCountMap) + dateRecordAll.CommentCount = getMapValue(dateRecordAll.ID, CommentCountMap) + dateRecordAll.FocusRepoCount = getMapValue(dateRecordAll.ID, FocusRepoCountMap) + dateRecordAll.StarRepoCount = getMapValue(dateRecordAll.ID, StarRepoCountMap) + dateRecordAll.WatchedCount = getMapValue(dateRecordAll.ID, WatchedCountMap) + dateRecordAll.CommitCodeSize = getMapValue(dateRecordAll.ID, CommitCodeSizeMap) + dateRecordAll.CommitDatasetSize = getMapValue(dateRecordAll.ID, CommitDatasetSizeMap) + dateRecordAll.CommitDatasetNum = getMapValue(dateRecordAll.ID, CommitDatasetNumMap) + dateRecordAll.SolveIssueCount = getMapValue(dateRecordAll.ID, SolveIssueCountMap) + dateRecordAll.EncyclopediasCount = getMapKeyStringValue(dateRecordAll.Name, wikiCountMap) + dateRecordAll.CreateRepoCount = getMapValue(dateRecordAll.ID, CreateRepoCountMap) + dateRecordAll.LoginCount = getMapValue(dateRecordAll.ID, LoginCountMap) if _, ok := OpenIIndexMap[dateRecordAll.ID]; !ok { dateRecordAll.OpenIIndex = 0 @@ -497,7 +485,15 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s dateRecordAll.OpenIIndex = OpenIIndexMap[dateRecordAll.ID] } - dateRecordAll.CommitModelCount = 0 + dateRecordAll.CloudBrainTaskNum = getMapValue(dateRecordAll.ID, CloudBrainTaskMap) + dateRecordAll.GpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuDebugJob", CloudBrainTaskItemMap) + dateRecordAll.NpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuDebugJob", CloudBrainTaskItemMap) + dateRecordAll.GpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuTrainJob", CloudBrainTaskItemMap) + dateRecordAll.NpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuTrainJob", CloudBrainTaskItemMap) + dateRecordAll.NpuInferenceJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuInferenceJob", CloudBrainTaskItemMap) + dateRecordAll.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap) + dateRecordAll.CommitModelCount = getMapValue(dateRecordAll.ID, AiModelManageMap) + dateRecordAll.UserIndex = getUserIndexFromAnalysisAll(dateRecordAll, ParaWeight) dateRecordBatch = append(dateRecordBatch, dateRecordAll) if len(dateRecordBatch) >= BATCH_INSERT_SIZE { @@ -508,6 +504,11 @@ func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[s } dateRecordBatch = make([]UserBusinessAnalysisAll, 0) } + if tableName == "user_business_analysis_all" { + if dateRecordAll.UserIndex > 0 || dateRecordAll.LoginCount > 0 { + userMetrics["TotalHasActivityUser"] = getMapKeyStringValue("TotalHasActivityUser", userMetrics) + 1 + } + } } indexTotal += PAGE_SIZE if indexTotal >= count { @@ -529,7 +530,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static insertBatchSql := "INSERT INTO public." + tableName + "(id, count_date, code_merge_count, commit_count, issue_count, comment_count, focus_repo_count, star_repo_count, watched_count, gitea_age_month, commit_code_size, commit_dataset_size, " + - "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date) " + + "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location) " + "VALUES" for i, record := range dateRecords { @@ -537,7 +538,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static ", " + fmt.Sprint(record.IssueCount) + ", " + fmt.Sprint(record.CommentCount) + ", " + fmt.Sprint(record.FocusRepoCount) + ", " + fmt.Sprint(record.StarRepoCount) + ", " + fmt.Sprint(record.WatchedCount) + ", " + fmt.Sprint(record.GiteaAgeMonth) + ", " + fmt.Sprint(record.CommitCodeSize) + ", " + fmt.Sprint(record.CommitDatasetSize) + ", " + fmt.Sprint(record.CommitModelCount) + ", " + fmt.Sprint(record.SolveIssueCount) + ", " + fmt.Sprint(record.EncyclopediasCount) + ", " + fmt.Sprint(record.RegistDate) + - ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "')" + ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "'," + fmt.Sprint(record.CloudBrainTaskNum) + "," + fmt.Sprint(record.GpuDebugJob) + "," + fmt.Sprint(record.NpuDebugJob) + "," + fmt.Sprint(record.GpuTrainJob) + "," + fmt.Sprint(record.NpuTrainJob) + "," + fmt.Sprint(record.NpuInferenceJob) + "," + fmt.Sprint(record.GpuBenchMarkJob) + "," + fmt.Sprint(record.CloudBrainRunTime) + "," + fmt.Sprint(record.CommitDatasetNum) + "," + fmt.Sprint(record.UserIndex) + ",'" + record.UserLocation + "')" if i < (len(dateRecords) - 1) { insertBatchSql += "," } @@ -546,36 +547,36 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static statictisSess.Exec(insertBatchSql) } -func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats) { +func RefreshUserStaticAllTabel(wikiCountMap map[string]int, userMetrics map[string]int) { currentTimeNow := time.Now() pageStartTime := time.Date(2021, 11, 5, 0, 0, 0, 0, currentTimeNow.Location()) pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_all", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_all", pageStartTime, pageEndTime, userMetrics) log.Info("refresh all data finished.") pageStartTime = time.Date(currentTimeNow.Year(), 1, 1, 0, 0, 0, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_year", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_year", pageStartTime, pageEndTime, userMetrics) thisMonth := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 0, 0, 0, 0, currentTimeNow.Location()) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_month", thisMonth, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_month", thisMonth, pageEndTime, userMetrics) offset := int(time.Monday - currentTimeNow.Weekday()) if offset > 0 { offset = -6 } pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, offset) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_week", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_current_week", pageStartTime, pageEndTime, userMetrics) pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -30) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_last30_day", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_last30_day", pageStartTime, pageEndTime, userMetrics) pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -1) pageEndTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_yesterday", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_yesterday", pageStartTime, pageEndTime, userMetrics) pageStartTime = thisMonth.AddDate(0, -1, 0) pageEndTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1) - refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_last_month", pageStartTime, pageEndTime) + refreshUserStaticTable(wikiCountMap, "user_business_analysis_last_month", pageStartTime, pageEndTime, userMetrics) } @@ -613,12 +614,13 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, } else { log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap))) } - CommitDatasetSizeMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix) - + CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) + AiModelManageMap := queryUserModel(start_unix, end_unix) statictisSess := xStatistic.NewSession() defer statictisSess.Close() @@ -628,6 +630,9 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, log.Info("query user error. return.") return err } + + ParaWeight := getParaWeight() + userMetrics := make(map[string]int) var indexTotal int64 indexTotal = 0 for { @@ -648,47 +653,14 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.Name = userRecord.Name dateRecord.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) dateRecord.DataDate = DataDate - if _, ok := CodeMergeCountMap[dateRecord.ID]; !ok { - dateRecord.CodeMergeCount = 0 - } else { - dateRecord.CodeMergeCount = CodeMergeCountMap[dateRecord.ID] - } - - if _, ok := CommitCountMap[dateRecord.ID]; !ok { - dateRecord.CommitCount = 0 - } else { - dateRecord.CommitCount = CommitCountMap[dateRecord.ID] - } - - if _, ok := IssueCountMap[dateRecord.ID]; !ok { - dateRecord.IssueCount = 0 - } else { - dateRecord.IssueCount = IssueCountMap[dateRecord.ID] - } - - if _, ok := CommentCountMap[dateRecord.ID]; !ok { - dateRecord.CommentCount = 0 - } else { - dateRecord.CommentCount = CommentCountMap[dateRecord.ID] - } - - if _, ok := FocusRepoCountMap[dateRecord.ID]; !ok { - dateRecord.FocusRepoCount = 0 - } else { - dateRecord.FocusRepoCount = FocusRepoCountMap[dateRecord.ID] - } - if _, ok := StarRepoCountMap[dateRecord.ID]; !ok { - dateRecord.StarRepoCount = 0 - } else { - dateRecord.StarRepoCount = StarRepoCountMap[dateRecord.ID] - } - - if _, ok := WatchedCountMap[dateRecord.ID]; !ok { - dateRecord.WatchedCount = 0 - } else { - dateRecord.WatchedCount = WatchedCountMap[dateRecord.ID] - } + dateRecord.CodeMergeCount = getMapValue(dateRecord.ID, CodeMergeCountMap) + dateRecord.CommitCount = getMapValue(dateRecord.ID, CommitCountMap) + dateRecord.IssueCount = getMapValue(dateRecord.ID, IssueCountMap) + dateRecord.CommentCount = getMapValue(dateRecord.ID, CommentCountMap) + dateRecord.FocusRepoCount = getMapValue(dateRecord.ID, FocusRepoCountMap) + dateRecord.StarRepoCount = getMapValue(dateRecord.ID, StarRepoCountMap) + dateRecord.WatchedCount = getMapValue(dateRecord.ID, WatchedCountMap) if _, ok := CommitCodeSizeMap[dateRecord.Email]; !ok { dateRecord.CommitCodeSize = 0 @@ -696,35 +668,15 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.CommitCodeSize = int(CommitCodeSizeMap[dateRecord.Email].CommitLines) } - if _, ok := CommitDatasetSizeMap[dateRecord.ID]; !ok { - dateRecord.CommitDatasetSize = 0 - } else { - dateRecord.CommitDatasetSize = CommitDatasetSizeMap[dateRecord.ID] - } + dateRecord.CommitDatasetSize = getMapValue(dateRecord.ID, CommitDatasetSizeMap) + dateRecord.CommitDatasetNum = getMapValue(dateRecord.ID, CommitDatasetNumMap) + dateRecord.SolveIssueCount = getMapValue(dateRecord.ID, SolveIssueCountMap) - if _, ok := SolveIssueCountMap[dateRecord.ID]; !ok { - dateRecord.SolveIssueCount = 0 - } else { - dateRecord.SolveIssueCount = SolveIssueCountMap[dateRecord.ID] - } + dateRecord.EncyclopediasCount = getMapKeyStringValue(dateRecord.Name, wikiCountMap) - if _, ok := wikiCountMap[dateRecord.Name]; !ok { - dateRecord.EncyclopediasCount = 0 - } else { - dateRecord.EncyclopediasCount = wikiCountMap[dateRecord.Name] - } + dateRecord.CreateRepoCount = getMapValue(dateRecord.ID, CreateRepoCountMap) - if _, ok := CreateRepoCountMap[dateRecord.ID]; !ok { - dateRecord.CreateRepoCount = 0 - } else { - dateRecord.CreateRepoCount = CreateRepoCountMap[dateRecord.ID] - } - - if _, ok := LoginCountMap[dateRecord.ID]; !ok { - dateRecord.LoginCount = 0 - } else { - dateRecord.LoginCount = LoginCountMap[dateRecord.ID] - } + dateRecord.LoginCount = getMapValue(dateRecord.ID, LoginCountMap) if _, ok := OpenIIndexMap[dateRecord.ID]; !ok { dateRecord.OpenIIndex = 0 @@ -732,8 +684,17 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, dateRecord.OpenIIndex = OpenIIndexMap[dateRecord.ID] } - dateRecord.CommitModelCount = 0 - + dateRecord.CloudBrainTaskNum = getMapValue(dateRecord.ID, CloudBrainTaskMap) + dateRecord.GpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuDebugJob", CloudBrainTaskItemMap) + dateRecord.NpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuDebugJob", CloudBrainTaskItemMap) + dateRecord.GpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuTrainJob", CloudBrainTaskItemMap) + dateRecord.NpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuTrainJob", CloudBrainTaskItemMap) + dateRecord.NpuInferenceJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_NpuInferenceJob", CloudBrainTaskItemMap) + dateRecord.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap) + dateRecord.CloudBrainRunTime = getMapKeyStringValue(fmt.Sprint(dateRecord.ID)+"_CloudBrainRunTime", CloudBrainTaskItemMap) + dateRecord.CommitModelCount = getMapValue(dateRecord.ID, AiModelManageMap) + dateRecord.UserIndex = getUserIndex(dateRecord, ParaWeight) + setUserMetrics(userMetrics, userRecord, start_unix, end_unix, dateRecord) _, err = statictisSess.Insert(&dateRecord) if err != nil { log.Info("insert daterecord failed." + err.Error()) @@ -747,11 +708,142 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, } } - RefreshUserStaticAllTabel(wikiCountMap, CommitCodeSizeMap) + RefreshUserStaticAllTabel(wikiCountMap, userMetrics) + + //insert userMetrics table + var useMetrics UserMetrics + useMetrics.CountDate = CountDate.Unix() + statictisSess.Delete(&useMetrics) + + useMetrics.ActivateRegistUser = getMapKeyStringValue("ActivateRegistUser", userMetrics) + useMetrics.HasActivityUser = getMapKeyStringValue("HasActivityUser", userMetrics) + useMetrics.NotActivateRegistUser = getMapKeyStringValue("NotActivateRegistUser", userMetrics) + useMetrics.TotalActivateRegistUser = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) + useMetrics.TotalHasActivityUser = getMapKeyStringValue("TotalHasActivityUser", userMetrics) + statictisSess.Insert(&useMetrics) return nil } +func setUserMetrics(userMetrics map[string]int, user *User, start_time int64, end_time int64, dateRecord UserBusinessAnalysis) { + //ActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //NotActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //HasActivityUser int `xorm:"NOT NULL DEFAULT 0"` + //TotalActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + //TotalHasActivityUser + regist_time := user.CreatedUnix.AsTime().Unix() + if regist_time >= start_time && regist_time <= end_time { + if user.IsActive { + userMetrics["ActivateRegistUser"] = getMapKeyStringValue("ActivateRegistUser", userMetrics) + 1 + } else { + userMetrics["NotActivateRegistUser"] = getMapKeyStringValue("NotActivateRegistUser", userMetrics) + 1 + } + } + if user.IsActive { + userMetrics["TotalActivateRegistUser"] = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) + 1 + } + + if dateRecord.UserIndex > 0 || dateRecord.LoginCount > 0 { + userMetrics["HasActivityUser"] = getMapKeyStringValue("HasActivityUser", userMetrics) + 1 + } + +} + +func getParaWeight() map[string]float64 { + result := make(map[string]float64) + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + statictisSess.Select("*").Table(new(UserAnalysisPara)) + paraList := make([]*UserAnalysisPara, 0) + statictisSess.Find(¶List) + for _, paraRecord := range paraList { + result[paraRecord.Key] = paraRecord.Value + } + return result +} + +func getUserIndexFromAnalysisAll(dateRecord UserBusinessAnalysisAll, ParaWeight map[string]float64) float64 { + var result float64 + // PR数 0.20 + // commit数 0.20 + // 提出任务数 0.20 + // 评论数 0.20 + // 关注项目数 0.10 + // 点赞项目数 0.10 + // 登录次数 0.10 + result = float64(dateRecord.CodeMergeCount) * getParaWeightValue("CodeMergeCount", ParaWeight, 0.2) + result += float64(dateRecord.CommitCount) * getParaWeightValue("CommitCount", ParaWeight, 0.2) + log.Info("1 result=" + fmt.Sprint(result)) + result += float64(dateRecord.IssueCount) * getParaWeightValue("IssueCount", ParaWeight, 0.2) + result += float64(dateRecord.CommentCount) * getParaWeightValue("CommentCount", ParaWeight, 0.2) + result += float64(dateRecord.FocusRepoCount) * getParaWeightValue("FocusRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.StarRepoCount) * getParaWeightValue("StarRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.LoginCount) * getParaWeightValue("LoginCount", ParaWeight, 0.1) + result += float64(dateRecord.WatchedCount) * getParaWeightValue("WatchedCount", ParaWeight, 0.3) + result += float64(dateRecord.CommitCodeSize) * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1) + result += float64(dateRecord.SolveIssueCount) * getParaWeightValue("SolveIssueCount", ParaWeight, 0.2) + result += float64(dateRecord.EncyclopediasCount) * getParaWeightValue("EncyclopediasCount", ParaWeight, 0.1) + result += float64(dateRecord.CreateRepoCount) * getParaWeightValue("CreateRepoCount", ParaWeight, 0.05) + result += float64(dateRecord.CloudBrainTaskNum) * getParaWeightValue("CloudBrainTaskNum", ParaWeight, 0.3) + result += float64(dateRecord.CommitModelCount) * getParaWeightValue("CommitModelCount", ParaWeight, 0.2) + result += dateRecord.OpenIIndex * getParaWeightValue("OpenIIndex", ParaWeight, 0.1) + + return result +} + +func getUserIndex(dateRecord UserBusinessAnalysis, ParaWeight map[string]float64) float64 { + var result float64 + // PR数 0.20 + // commit数 0.20 + // 提出任务数 0.20 + // 评论数 0.20 + // 关注项目数 0.10 + // 点赞项目数 0.10 + // 登录次数 0.10 + result = float64(dateRecord.CodeMergeCount) * getParaWeightValue("CodeMergeCount", ParaWeight, 0.2) + result += float64(dateRecord.CommitCount) * getParaWeightValue("CommitCount", ParaWeight, 0.2) + log.Info("2 result=" + fmt.Sprint(result)) + result += float64(dateRecord.IssueCount) * getParaWeightValue("IssueCount", ParaWeight, 0.2) + result += float64(dateRecord.CommentCount) * getParaWeightValue("CommentCount", ParaWeight, 0.2) + result += float64(dateRecord.FocusRepoCount) * getParaWeightValue("FocusRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.StarRepoCount) * getParaWeightValue("StarRepoCount", ParaWeight, 0.1) + result += float64(dateRecord.LoginCount) * getParaWeightValue("LoginCount", ParaWeight, 0.1) + result += float64(dateRecord.WatchedCount) * getParaWeightValue("WatchedCount", ParaWeight, 0.3) + result += float64(dateRecord.CommitCodeSize) * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1) + result += float64(dateRecord.SolveIssueCount) * getParaWeightValue("SolveIssueCount", ParaWeight, 0.2) + result += float64(dateRecord.EncyclopediasCount) * getParaWeightValue("EncyclopediasCount", ParaWeight, 0.1) + result += float64(dateRecord.CreateRepoCount) * getParaWeightValue("CreateRepoCount", ParaWeight, 0.05) + result += float64(dateRecord.CloudBrainTaskNum) * getParaWeightValue("CloudBrainTaskNum", ParaWeight, 0.3) + result += float64(dateRecord.CommitModelCount) * getParaWeightValue("CommitModelCount", ParaWeight, 0.2) + result += dateRecord.OpenIIndex * getParaWeightValue("OpenIIndex", ParaWeight, 0.1) + + return result +} + +func getParaWeightValue(key string, valueMap map[string]float64, defaultValue float64) float64 { + if _, ok := valueMap[key]; !ok { + return defaultValue + } else { + return valueMap[key] + } +} + +func getMapKeyStringValue(key string, valueMap map[string]int) int { + if _, ok := valueMap[key]; !ok { + return 0 + } else { + return valueMap[key] + } +} + +func getMapValue(userId int64, valueMap map[int64]int) int { + if _, ok := valueMap[userId]; !ok { + return 0 + } else { + return valueMap[userId] + } +} + func getInt(str string) int { re, err := strconv.ParseInt(str, 10, 32) if err != nil { @@ -1052,16 +1144,17 @@ func queryFollow(start_unix int64, end_unix int64) map[int64]int { return resultMap } -func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { +func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int64]int) { sess := x.NewSession() defer sess.Close() - resultMap := make(map[int64]int) + resultSizeMap := make(map[int64]int) + resultNumMap := make(map[int64]int) cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Attachment)) if err != nil { log.Info("query attachment error. return.") - return resultMap + return resultSizeMap, resultNumMap } var indexTotal int64 indexTotal = 0 @@ -1072,10 +1165,12 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { log.Info("query Attachment size=" + fmt.Sprint(len(attachmentList))) for _, attachRecord := range attachmentList { - if _, ok := resultMap[attachRecord.UploaderID]; !ok { - resultMap[attachRecord.UploaderID] = int(attachRecord.Size / (1024 * 1024)) //MB + if _, ok := resultSizeMap[attachRecord.UploaderID]; !ok { + resultSizeMap[attachRecord.UploaderID] = int(attachRecord.Size / (1024 * 1024)) //MB + resultNumMap[attachRecord.UploaderID] = 1 } else { - resultMap[attachRecord.UploaderID] += int(attachRecord.Size / (1024 * 1024)) //MB + resultSizeMap[attachRecord.UploaderID] += int(attachRecord.Size / (1024 * 1024)) //MB + resultNumMap[attachRecord.UploaderID] += 1 } } @@ -1085,7 +1180,7 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { } } - return resultMap + return resultSizeMap, resultNumMap } func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { @@ -1212,6 +1307,133 @@ func queryLoginCount(start_unix int64, end_unix int64) map[int64]int { return resultMap } +func queryCommitCodeSize(start_unix int64, end_unix int64) map[int64]int { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + + resultMap := make(map[int64]int) + cond := "count_date>=" + fmt.Sprint(start_unix) + " and count_date<=" + fmt.Sprint(end_unix) + count, err := statictisSess.Where(cond).Count(new(UserBusinessAnalysis)) + if err != nil { + log.Info("query commit code size error. return.") + return resultMap + } + var indexTotal int64 + indexTotal = 0 + for { + statictisSess.Select("id,commit_code_size").Table("user_business_analysis").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + userBusinessAnalysisList := make([]*UserBusinessAnalysis, 0) + statictisSess.Find(&userBusinessAnalysisList) + log.Info("query user login size=" + fmt.Sprint(len(userBusinessAnalysisList))) + for _, analysisRecord := range userBusinessAnalysisList { + if _, ok := resultMap[analysisRecord.ID]; !ok { + resultMap[analysisRecord.ID] = analysisRecord.CommitCodeSize + } else { + resultMap[analysisRecord.ID] += analysisRecord.CommitCodeSize + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + log.Info("user commit code size=" + fmt.Sprint(len(resultMap))) + return resultMap +} + +func queryUserModel(start_unix int64, end_unix int64) map[int64]int { + sess := x.NewSession() + defer sess.Close() + resultMap := make(map[int64]int) + cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + count, err := sess.Where(cond).Count(new(AiModelManage)) + if err != nil { + log.Info("query AiModelManage error. return.") + return resultMap + } + var indexTotal int64 + indexTotal = 0 + for { + sess.Select("id,user_id").Table("ai_model_manage").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + aiModelList := make([]*AiModelManage, 0) + sess.Find(&aiModelList) + log.Info("query AiModelManage size=" + fmt.Sprint(len(aiModelList))) + for _, aiModelRecord := range aiModelList { + if _, ok := resultMap[aiModelRecord.UserId]; !ok { + resultMap[aiModelRecord.UserId] = 1 + } else { + resultMap[aiModelRecord.UserId] += 1 + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + return resultMap +} + +func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[string]int) { + sess := x.NewSession() + defer sess.Close() + resultMap := make(map[int64]int) + resultItemMap := make(map[string]int) + + cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + count, err := sess.Where(cond).Count(new(Cloudbrain)) + if err != nil { + log.Info("query cloudbrain error. return.") + return resultMap, resultItemMap + } + var indexTotal int64 + indexTotal = 0 + for { + sess.Select("id,job_type,user_id,duration,train_job_duration,type").Table("cloudbrain").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + cloudTaskList := make([]*Cloudbrain, 0) + sess.Find(&cloudTaskList) + log.Info("query cloudbrain size=" + fmt.Sprint(len(cloudTaskList))) + for _, cloudTaskRecord := range cloudTaskList { + if _, ok := resultMap[cloudTaskRecord.UserID]; !ok { + resultMap[cloudTaskRecord.UserID] = 1 + } else { + resultMap[cloudTaskRecord.UserID] += 1 + } + setMapKey("CloudBrainRunTime", cloudTaskRecord.UserID, int(cloudTaskRecord.Duration), resultItemMap) + if cloudTaskRecord.Type == 1 { //npu + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("NpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else if cloudTaskRecord.JobType == "INFERENCE" { + setMapKey("NpuInferenceJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("NpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } else { //type=0 gpu + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("GpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else if cloudTaskRecord.JobType == "BENCHMARK" { + setMapKey("GpuBenchMarkJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("GpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + + return resultMap, resultItemMap +} +func setMapKey(key string, userId int64, value int, resultItemMap map[string]int) { + newKey := fmt.Sprint(userId) + "_" + key + if _, ok := resultItemMap[newKey]; !ok { + resultItemMap[newKey] = value + } else { + resultItemMap[newKey] += value + } +} + func subMonth(t1, t2 time.Time) (month int) { y1 := t1.Year() y2 := t2.Year() diff --git a/models/user_business_struct.go b/models/user_business_struct.go index c435c0b07..17d9f046f 100644 --- a/models/user_business_struct.go +++ b/models/user_business_struct.go @@ -44,6 +44,18 @@ type UserBusinessAnalysisCurrentYear struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisLast30Day struct { @@ -88,6 +100,18 @@ type UserBusinessAnalysisLast30Day struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisLastMonth struct { @@ -132,6 +156,18 @@ type UserBusinessAnalysisLastMonth struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisCurrentMonth struct { @@ -176,6 +212,18 @@ type UserBusinessAnalysisCurrentMonth struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisCurrentWeek struct { @@ -220,6 +268,18 @@ type UserBusinessAnalysisCurrentWeek struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` } type UserBusinessAnalysisYesterday struct { @@ -264,4 +324,30 @@ type UserBusinessAnalysisYesterday struct { //user Name string `xorm:"NOT NULL"` DataDate string `xorm:"NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserLocation string `xorm:"NULL"` +} + +type UserAnalysisPara struct { + Key string `xorm:"NOT NULL"` + Value float64 `xorm:"NOT NULL DEFAULT 0"` +} + +type UserMetrics struct { + CountDate int64 `xorm:"pk"` + ActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + NotActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + HasActivityUser int `xorm:"NOT NULL DEFAULT 0"` + TotalActivateRegistUser int `xorm:"NOT NULL DEFAULT 0"` + TotalHasActivityUser int `xorm:"NOT NULL DEFAULT 0"` } diff --git a/modules/auth/cloudbrain.go b/modules/auth/cloudbrain.go index 9949feddc..9d3d6290f 100755 --- a/modules/auth/cloudbrain.go +++ b/modules/auth/cloudbrain.go @@ -20,6 +20,9 @@ type CreateCloudBrainForm struct { ResourceSpecId int `form:"resource_spec_id" binding:"Required"` BenchmarkTypeID int `form:"benchmark_types_id"` BenchmarkChildTypeID int `form:"benchmark_child_types_id"` + BootFile string `form:"boot_file"` + Params string `form:"run_para_list"` + BranchName string `form:"branch_name"` } type CommitImageCloudBrainForm struct { diff --git a/modules/cloudbrain/cloudbrain.go b/modules/cloudbrain/cloudbrain.go index 54ac0c7ac..4a89f9393 100755 --- a/modules/cloudbrain/cloudbrain.go +++ b/modules/cloudbrain/cloudbrain.go @@ -15,14 +15,13 @@ import ( ) const ( - Command = `pip3 install jupyterlab==2.2.5 -i https://pypi.tuna.tsinghua.edu.cn/simple; - service ssh stop; - jupyter lab --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --LabApp.token="" --LabApp.allow_origin="self https://cloudbrain.pcl.ac.cn"` + Command = `pip3 install jupyterlab==2.2.5 -i https://pypi.tuna.tsinghua.edu.cn/simple;service ssh stop;jupyter lab --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --LabApp.token="" --LabApp.allow_origin="self https://cloudbrain.pcl.ac.cn"` //CommandBenchmark = `echo "start benchmark";python /code/test.py;echo "end benchmark"` CommandBenchmark = `echo "start benchmark";cd /benchmark && bash run_bk.sh;echo "end benchmark"` CodeMountPath = "/code" DataSetMountPath = "/dataset" ModelMountPath = "/model" + LogFile = "log.txt" BenchMarkMountPath = "/benchmark" BenchMarkResourceID = 1 Snn4imagenetMountPath = "/snn4imagenet" @@ -32,10 +31,13 @@ const ( SubTaskName = "task1" Success = "S000" + + DefaultBranchName = "master" ) var ( - ResourceSpecs *models.ResourceSpecs + ResourceSpecs *models.ResourceSpecs + TrainResourceSpecs *models.ResourceSpecs ) func isAdminOrOwnerOrJobCreater(ctx *context.Context, job *models.Cloudbrain, err error) bool { @@ -147,7 +149,7 @@ func AdminOrJobCreaterRightForTrain(ctx *context.Context) { } -func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, uuid, codePath, modelPath, benchmarkPath, snn4imagenetPath, brainScorePath, jobType, gpuQueue, description string, benchmarkTypeID, benchmarkChildTypeID, resourceSpecId int) error { +func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, uuid, codePath, modelPath, benchmarkPath, snn4imagenetPath, brainScorePath, jobType, gpuQueue, description, branchName, bootFile, params string, benchmarkTypeID, benchmarkChildTypeID, resourceSpecId int) error { dataActualPath := setting.Attachment.Minio.RealPath + setting.Attachment.Minio.Bucket + "/" + setting.Attachment.Minio.BasePath + @@ -155,13 +157,27 @@ func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, uuid var resourceSpec *models.ResourceSpec - if ResourceSpecs == nil { - json.Unmarshal([]byte(setting.ResourceSpecs), &ResourceSpecs) - } - for _, spec := range ResourceSpecs.ResourceSpec { - if resourceSpecId == spec.Id { - resourceSpec = spec + var versionCount int + if jobType == string(models.JobTypeTrain) { + versionCount = 1 + if TrainResourceSpecs == nil { + json.Unmarshal([]byte(setting.TrainResourceSpecs), &TrainResourceSpecs) + } + for _, spec := range TrainResourceSpecs.ResourceSpec { + if resourceSpecId == spec.Id { + resourceSpec = spec + } + } + } else { + if ResourceSpecs == nil { + json.Unmarshal([]byte(setting.ResourceSpecs), &ResourceSpecs) + } + for _, spec := range ResourceSpecs.ResourceSpec { + if resourceSpecId == spec.Id { + resourceSpec = spec + } } + } if resourceSpec == nil { @@ -169,6 +185,15 @@ func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, return errors.New("no such resourceSpec") } + var datasetName string + attach, err := models.GetAttachmentByUUID(uuid) + if err != nil { + //for benchmark, do not return error + log.Error("GetAttachmentByUUID failed:%v", err) + } else { + datasetName = attach.Name + } + jobResult, err := CreateJob(jobName, models.CreateJobParams{ JobName: jobName, RetryCount: 1, @@ -263,13 +288,19 @@ func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, BenchmarkTypeID: benchmarkTypeID, BenchmarkChildTypeID: benchmarkChildTypeID, Description: description, + IsLatestVersion: "1", + VersionCount: versionCount, + BranchName: branchName, + BootFile: bootFile, + DatasetName: datasetName, + Parameters: params, }) if err != nil { return err } - task, err := models.GetCloudbrainByName(jobName) + task, err := models.GetCloudbrainByJobID(jobID) if err != nil { log.Error("GetCloudbrainByName failed: %v", err.Error()) return err @@ -278,6 +309,8 @@ func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, if string(models.JobTypeBenchmark) == jobType { notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, stringId, displayJobName, models.ActionCreateBenchMarkTask) + } else if string(models.JobTypeTrain) == jobType { + notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, jobID, displayJobName, models.ActionCreateGPUTrainTask) } else { notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, stringId, displayJobName, models.ActionCreateDebugGPUTask) } @@ -407,8 +440,10 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e return err } - idString := strconv.FormatInt(newTask.ID, 10) - *newID = idString + stringId := strconv.FormatInt(newTask.ID, 10) + *newID = stringId + + notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, stringId, task.DisplayJobName, models.ActionCreateDebugGPUTask) return nil } diff --git a/modules/cron/tasks_basic.go b/modules/cron/tasks_basic.go index b9838e66f..b3a6c02a1 100755 --- a/modules/cron/tasks_basic.go +++ b/modules/cron/tasks_basic.go @@ -185,6 +185,17 @@ func registerHandleSummaryStatistic() { }) } +func registerHandleOrgStatistic() { + RegisterTaskFatal("handle_org_statistic", &BaseConfig{ + Enabled: true, + RunAtStart: false, + Schedule: "0 0 2 * * ?", + }, func(ctx context.Context, _ *models.User, _ Config) error { + models.UpdateOrgStatistics() + return nil + }) +} + func registerSyncCloudbrainStatus() { RegisterTaskFatal("sync_cloudbrain_status", &BaseConfig{ Enabled: true, @@ -215,4 +226,5 @@ func initBasicTasks() { registerHandleSummaryStatistic() registerSyncCloudbrainStatus() + registerHandleOrgStatistic() } diff --git a/modules/modelarts/modelarts.go b/modules/modelarts/modelarts.go index e30d0100c..b1e7b269e 100755 --- a/modules/modelarts/modelarts.go +++ b/modules/modelarts/modelarts.go @@ -56,7 +56,6 @@ const ( PerPage = 10 IsLatestVersion = "1" NotLatestVersion = "0" - DebugType = -1 VersionCount = 1 SortByCreateTime = "create_time" @@ -281,6 +280,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc RepoID: ctx.Repo.Repository.ID, JobID: jobResult.ID, JobName: jobName, + FlavorCode: flavor, DisplayJobName: displayJobName, JobType: string(models.JobTypeDebug), Type: models.TypeCloudBrainTwo, diff --git a/modules/setting/setting.go b/modules/setting/setting.go index 946fb73b4..26f068193 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -437,7 +437,7 @@ var ( //home page RecommentRepoAddr string - + ESSearchURL string //notice config UserNameOfNoticeRepo string RepoNameOfNoticeRepo string @@ -452,16 +452,18 @@ var ( DecompressOBSTaskName string //cloudbrain config - CBAuthUser string - CBAuthPassword string - RestServerHost string - JobPath string - CBCodePathPrefix string - JobType string - GpuTypes string - DebugServerHost string - ResourceSpecs string - MaxDuration int64 + CBAuthUser string + CBAuthPassword string + RestServerHost string + JobPath string + CBCodePathPrefix string + JobType string + GpuTypes string + DebugServerHost string + ResourceSpecs string + MaxDuration int64 + TrainGpuTypes string + TrainResourceSpecs string //benchmark config IsBenchmarkEnabled bool @@ -1265,6 +1267,7 @@ func NewContext() { sec = Cfg.Section("homepage") RecommentRepoAddr = sec.Key("Address").MustString("https://git.openi.org.cn/OpenIOSSG/promote/raw/branch/master/") + ESSearchURL = sec.Key("ESSearchURL").MustString("http://192.168.207.94:9200") sec = Cfg.Section("notice") UserNameOfNoticeRepo = sec.Key("USER_NAME").MustString("OpenIOSSG") @@ -1285,6 +1288,8 @@ func NewContext() { GpuTypes = sec.Key("GPU_TYPES").MustString("") ResourceSpecs = sec.Key("RESOURCE_SPECS").MustString("") MaxDuration = sec.Key("MAX_DURATION").MustInt64(14400) + TrainGpuTypes = sec.Key("TRAIN_GPU_TYPES").MustString("") + TrainResourceSpecs = sec.Key("TRAIN_RESOURCE_SPECS").MustString("") sec = Cfg.Section("benchmark") IsBenchmarkEnabled = sec.Key("ENABLED").MustBool(false) diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 976387dd9..5d531d6e4 100755 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -254,6 +254,18 @@ page_dev_yunlao_desc3=Developers can freely choose the corresponding computing r page_dev_yunlao_desc4=If your model requires more computing resources, you can also apply for it separately. page_dev_yunlao_apply=Apply Separately +search=Search +search_repo=Repository +search_dataset=DataSet +search_issue=Issue +search_pr=Pull Request +search_user=User +search_org=Organization +search_finded=Find +search_related=related +search_maybe=maybe +search_ge= + [explore] repos = Repositories select_repos = Select the project @@ -481,6 +493,11 @@ static.encyclopediascount=Encyclopedias Count static.createrepocount=Create Repo Count static.openiindex=OpenI Index static.registdate=Regist Date +static.CloudBrainTaskNum=CloudBrain Task Count +static.CloudBrainRunTime=CloudBrain Run Time +static.CommitDatasetNum=Commit Dataset Count +static.CommitModelCount=Commit Model Count +static.UserIndex=User Index static.countdate=Count Date static.all=All static.public.user_business_analysis_current_month=Current_Month @@ -899,7 +916,9 @@ language_other = Other datasets = Datasets datasets.desc = Enable Dataset cloudbrain_helper=Use GPU/NPU resources to open notebooks, model training tasks, etc. - +cloudbrain.exitinfo=Exit Information +cloudbrain.platform=Platform +cloudbrain.endtime=End Time model_manager = Model model_noright=No right model_rename=Duplicate model name, please modify model name. @@ -1010,7 +1029,8 @@ modelarts.train_job.parameter_value=Parameter Value modelarts.train_job.resource_setting=resource_setting modelarts.train_job.resource_setting_info=resource_setting_info modelarts.train_job.resource_pool=resource_pool -modelarts.train_job.resource_type=resource_type +modelarts.train_job.resource_type=Resource Type +modelarts.train_job.train_dataset=Train Dataset modelarts.train_job.standard=Standard modelarts.train_job.NAS_address=NAS Address modelarts.train_job.NAS_mount_path=NAS Mount Path @@ -2182,6 +2202,16 @@ customize = Customize selected_project=Selected Projects fold = Fold unfold = Unfold +org_member = Member +org_members = Members +org_team = Team +org_teams = Teams +org_repository = Repository +org_repositories = Repositories + +star = Star Top10 +member = Members Top10 +active = Active Top10 form.name_reserved = The organization name '%s' is reserved. form.name_pattern_not_allowed = The pattern '%s' is not allowed in an organization name. @@ -2794,10 +2824,11 @@ reject_pull_request = `suggested changes for %s#%[2]s` upload_dataset=`upload dataset %s` task_gpudebugjob=`created CPU/GPU type debugging task%s` task_npudebugjob=`created NPU type debugging task %s` -task_trainjob=`created training task%s` +task_nputrainjob=`created NPU training task%s` task_inferencejob=`created reasoning task %s` task_benchmark=`created profiling task %s` task_createmodel=`created new model %s` +task_gputrainjob=`created CPU/GPU training task%s` [tool] ago = %s ago diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index 6e4b2dc59..3c0a98197 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -256,6 +256,18 @@ page_dev_yunlao_desc3=开发者可以根据使用需求,自由选择相应计 page_dev_yunlao_desc4=如果您的模型需要更多的计算资源,也可以单独申请 page_dev_yunlao_apply=单独申请 +search=搜索 +search_repo=项目 +search_dataset=数据集 +search_issue=任务 +search_pr=合并请求 +search_user=用户 +search_org=组织 +search_finded=找到 +search_related=相关 +search_maybe=约为 +search_ge=个 + [explore] repos=项目 select_repos=精选项目 @@ -484,6 +496,11 @@ static.encyclopediascount=百科页面贡献次数 static.createrepocount=创建项目数 static.openiindex=OpenI指数 static.registdate=用户注册时间 +static.CloudBrainTaskNum=云脑任务数 +static.CloudBrainRunTime=云脑运行时间(小时) +static.CommitDatasetNum=上传(提交)数据集文件数 +static.CommitModelCount=提交模型数 +static.UserIndex=用户指数 static.countdate=系统统计时间 static.all=所有 static.public.user_business_analysis_current_month=本月 @@ -946,7 +963,9 @@ cloudbrain_jobname_err=只能以小写字母或数字开头且只包含小写字 cloudbrain_query_fail=查询云脑任务失败。 cloudbrain.mirror_tag = 镜像标签 cloudbrain.mirror_description = 镜像描述 - +cloudbrain.exitinfo=退出信息 +cloudbrain.platform=平台 +cloudbrain.endtime=结束时间 record_begintime_get_err=无法获取统计开始时间。 parameter_is_wrong=输入参数错误,请检查输入参数。 total_count_get_error=查询总页数失败。 @@ -2187,6 +2206,16 @@ customize = 自定义 selected_project=精选项目 fold = 收起 unfold = 展开 +org_member = 成员 +org_members = 成员 +org_team = 团队 +org_teams = 团队 +org_repository = 项目 +org_repositories = 项目 + +star = 点赞榜 +member = 成员榜 +active = 活跃榜 form.name_reserved=组织名称 '%s' 是被保留的。 form.name_pattern_not_allowed=组织名称中不允许使用 "%s"。 @@ -2800,10 +2829,11 @@ reject_pull_request=`建议变更 %s#%[2]s` upload_dataset=`上传了数据集文件 %s` task_gpudebugjob=`创建了CPU/GPU类型调试任务 %s` task_npudebugjob=`创建了NPU类型调试任务 %s` -task_trainjob=`创建了训练任务 %s` +task_nputrainjob=`创建了NPU类型训练任务 %s` task_inferencejob=`创建了推理任务 %s` task_benchmark=`创建了评测任务 %s` task_createmodel=`导入了新模型 %s` +task_gputrainjob=`创建了CPU/GPU类型训练任务 %s` [tool] ago=%s前 diff --git a/public/home/home.js b/public/home/home.js old mode 100644 new mode 100755 index 7512a4423..478c70f21 --- a/public/home/home.js +++ b/public/home/home.js @@ -135,7 +135,7 @@ socket.onmessage = function (e) { html += recordPrefix + actionName; html += " " + getRepotext(record) + "" } - else if(record.OpType == "24" || record.OpType == "26" || record.OpType == "27" || record.OpType == "28" || record.OpType == "30"){ + else if(record.OpType == "24" || record.OpType == "26" || record.OpType == "27" || record.OpType == "28" || record.OpType == "30" || record.OpType == "31"){ html += recordPrefix + actionName; html += " " + record.RefName + "" } @@ -175,6 +175,8 @@ function getTaskLink(record){ re = re + "/cloudbrain/benchmark/" + record.Content; }else if(record.OpType == 30){ re = re + "/modelmanage/show_model_info?name=" + record.RefName; + }else if(record.OpType == 31){ + re = re + "/cloudbrain/train-job/" + record.Content; } re = encodeURI(re); return re; @@ -321,10 +323,11 @@ var actionNameZH={ "24":"上传了数据集文件", "25":"创建了CPU/GPU类型调试任务", "26":"创建了NPU类型调试任务", - "27":"创建了训练任务", + "27":"创建了NPU类型训练任务", "28":"创建了推理任务", "29":"创建了评测任务", - "30":"导入了新模型" + "30":"导入了新模型", + "31":"创建了CPU/GPU类型训练任务" }; var actionNameEN={ @@ -346,10 +349,11 @@ var actionNameEN={ "24":" upload dataset ", "25":" created CPU/GPU type debugging task ", "26":" created NPU type debugging task ", - "27":" created training task", + "27":" created NPU type training task", "28":" created reasoning task", "29":" created profiling task", - "30":" created new model" + "30":" created new model", + "31":" created CPU/GPU type training task", }; var repoAndOrgZH={ diff --git a/public/home/search.js b/public/home/search.js new file mode 100644 index 000000000..e23d27549 --- /dev/null +++ b/public/home/search.js @@ -0,0 +1,1305 @@ +var token; +if(isEmpty(token)){ + var meta = $("meta[name=_uid]"); + if(!isEmpty(meta)){ + token = meta.attr("content"); + console.log("token is uid:" + token); + } +} + +var html =document.documentElement; +var lang = html.attributes["lang"] +var isZh = true; +if(lang != null && lang.nodeValue =="en-US" ){ + console.log("the language is " + lang.nodeValue); + isZh=false; +}else{ + console.log("default lang=zh"); +} +function isEmpty(str){ + if(typeof str == "undefined" || str == null || str == ""){ + return true; + } + return false; +} + +var itemType={ + "1":"repository", + "2":"issue", + "3":"user", + "4":"org", + "5":"dataset", + "6":"pr" +}; + +var sortBy={ + "10":"default", + "11":"updated_unix.keyword", + "12":"num_watches", + "13":"num_stars", + "14":"num_forks", + "20":"default", + "21":"updated_unix.keyword", + "30":"default", + "31":"name.keyword", + "32":"name.keyword", + "33":"created_unix.keyword", + "34":"created_unix.keyword", + "40":"default", + "41":"name.keyword", + "42":"name.keyword", + "43":"created_unix.keyword", + "44":"created_unix.keyword", + "50":"default", + "51":"download_times", + "60":"default", + "61":"updated_unix.keyword" +}; + +var sortAscending={ + "10":"false", + "11":"false", + "12":"false", + "13":"false", + "14":"false", + "20":"false", + "21":"false", + "30":"false", + "31":"true", + "32":"false", + "33":"false", + "34":"true", + "40":"false", + "41":"true", + "42":"false", + "43":"false", + "44":"true", + "50":"false", + "51":"false", + "60":"false", + "61":"false" +}; + +var currentPage = 1; +var pageSize = 15; +var currentSearchTableName ="repository"; +var currentSearchKeyword=""; +var currentSearchSortBy=""; +var currentSearchAscending="false"; +var OnlySearchLabel=false; +var startIndex =1; +var endIndex = 5; +var totalPage = 1; +var totalNum = 0; +var privateTotal = 0; + +function initPageInfo(){ + currentPage = 1; + startIndex =1; + endIndex = 5; +} + +function searchItem(type,sortType){ + console.log("enter item 2."); + currentSearchKeyword = document.getElementById("keyword_input").value; + if(!isEmpty(currentSearchKeyword)){ + initPageInfo(); + currentSearchTableName = itemType[type]; + currentSearchSortBy = sortBy[sortType]; + currentSearchAscending = sortAscending[sortType]; + OnlySearchLabel =false; + page(currentPage); + }else{ + emptySearch(); + } +} + + + +function search(){ + console.log("enter here 1."); + currentSearchKeyword = document.getElementById("keyword_input").value; + if(!isEmpty(currentSearchKeyword)){ + currentSearchKeyword = currentSearchKeyword.trim(); + } + if(!isEmpty(currentSearchKeyword)){ + doSpcifySearch(currentSearchTableName,currentSearchKeyword,sortBy[10],"false"); + }else{ + emptySearch(); + } +} + +function emptySearch(){ + initDiv(false); + initPageInfo(); + $('#searchForm').addClass("hiddenSearch"); + document.getElementById("find_id").innerHTML=getLabel(isZh,"search_empty"); + $('#find_title').html(""); + document.getElementById("sort_type").innerHTML=""; + document.getElementById("child_search_item").innerHTML=""; + document.getElementById("page_menu").innerHTML=""; + $('#repo_total').text(""); + $('#pr_total').text(""); + $('#issue_total').text(""); + $('#dataset_total').text(""); + $('#user_total').text(""); + $('#org_total').text(""); + setActivate(null); +} + +function initDiv(isSearchLabel=false){ + if(isSearchLabel){ + document.getElementById("search_div").style.display="none"; + document.getElementById("search_label_div").style.display="block"; + document.getElementById("dataset_item").style.display="none"; + document.getElementById("issue_item").style.display="none"; + document.getElementById("pr_item").style.display="none"; + document.getElementById("user_item").style.display="none"; + document.getElementById("org_item").style.display="none"; + document.getElementById("find_id").innerHTML=""; + }else{ + document.getElementById("search_div").style.display="block"; + document.getElementById("search_label_div").style.display="none"; + document.getElementById("dataset_item").style.display="block"; + document.getElementById("issue_item").style.display="block"; + document.getElementById("pr_item").style.display="block"; + document.getElementById("user_item").style.display="block"; + document.getElementById("org_item").style.display="block"; + document.getElementById("find_id").innerHTML=getLabel(isZh,"search_finded"); + } +} + +function doSpcifySearch(tableName,keyword,sortBy="",ascending="false"){ + initDiv(false); + $('#searchForm').addClass("hiddenSearch"); + document.getElementById("find_id").innerHTML=getLabel(isZh,"search_finded"); + currentSearchKeyword = keyword; + initPageInfo(); + currentSearchTableName = tableName; + currentSearchSortBy = sortBy; + currentSearchAscending = ascending; + OnlySearchLabel =false; + + page(currentPage); + + if(currentSearchTableName != "repository"){ + doSearch("repository",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "issue"){ + doSearch("issue",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "user"){ + doSearch("user",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "org"){ + doSearch("org",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "dataset"){ + doSearch("dataset",currentSearchKeyword,1,pageSize,true,"",false); + } + if(currentSearchTableName != "pr"){ + doSearch("pr",currentSearchKeyword,1,pageSize,true,"",false); + } +} + +function doSearchLabel(tableName,keyword,sortBy="",ascending="false"){ + initDiv(true); + //document.getElementById("search_div").style.display="none"; + //document.getElementById("search_label_div").style.display="block"; + document.getElementById("search_label_div").innerHTML="
#" + keyword + "
"; + + currentSearchKeyword = keyword; + initPageInfo(); + currentSearchTableName = tableName; + currentSearchSortBy = sortBy; + currentSearchAscending = ascending; + OnlySearchLabel =true; + + page(currentPage); +} + +function searchLabel(tableName,keyword,sortBy="",ascending="false"){ + + sessionStorage.setItem("keyword",keyword); + sessionStorage.setItem("tableName",tableName); + sessionStorage.setItem("searchLabel",true); + sessionStorage.setItem("sortBy",sortBy); + sessionStorage.setItem("ascending",ascending); + console.log("enter label search."); + window.open("/all/search/"); +} + +function doSearch(tableName,keyword,page,pageSize=15,onlyReturnNum=true,sortBy="",OnlySearchLabel=false){ + var language = "zh-CN"; + if(!isZh){ + language="en-US"; + } + $.ajax({ + type:"GET", + url:"/all/dosearch/", + headers: { + authorization:token, + }, + dataType:"json", + data:{ + 'TableName': tableName, + 'Key': keyword, + 'Page': page, + 'PageSize': pageSize, + 'OnlyReturnNum':onlyReturnNum, + 'SortBy':sortBy, + 'OnlySearchLabel':OnlySearchLabel, + 'Ascending':currentSearchAscending, + 'WebTotal':totalNum, + 'PrivateTotal':privateTotal, + 'language':language + }, + async:true, + success:function(json){ + console.log("tableName=" + tableName); + console.log(json); + displayResult(tableName,page,json,onlyReturnNum,keyword); + }, + error:function(response) { + console.log(response); + } + }); +} + +function displayResult(tableName,page,jsonResult,onlyReturnNum,keyword){ + if(tableName == "repository") { + displayRepoResult(page,jsonResult,onlyReturnNum,keyword); + } else if (tableName == "issue") { + displayIssueResult(page,jsonResult,onlyReturnNum,keyword); + } else if (tableName == "user") { + displayUserResult(page,jsonResult,onlyReturnNum,keyword); + } else if (tableName == "org") { + displayOrgResult(page,jsonResult,onlyReturnNum,keyword); + } else if (tableName == "dataset") { + displayDataSetResult(page,jsonResult,onlyReturnNum,keyword); + } else if (tableName == "pr") { + displayPrResult(page,jsonResult,onlyReturnNum,keyword); + } + if(!onlyReturnNum){ + console.log("set total num." + tableName); + totalPage =Math.ceil(jsonResult.Total/pageSize); + totalNum = jsonResult.Total; + privateTotal = jsonResult.PrivateTotal; + setPage(page); + } + +} + +function displayPrResult(page,jsonResult,onlyReturnNum,keyword){ + var data = jsonResult.Result; + var total = jsonResult.Total; + $('#pr_total').text(total); + if(!onlyReturnNum){ + setActivate("pr_item"); + //$('#keyword_desc').text(keyword); + //$('#obj_desc').text(getLabel(isZh,"search_pr")); + //$('#child_total').text(total); + $('#find_title').html(getLabel(isZh,"find_title").replace('{keyword}',keyword).replace('{tablename}',getLabel(isZh,"search_pr")).replace('{total}',total)); + + setIssueOrPrInnerHtml(data,"pulls"); + } +} + +var categoryDesc={ + "computer_vision":"计算机视觉", + "natural_language_processing":"自然语言处理", + "speech_processing":"语音处理", + "computer_vision_natural_language_processing":"计算机视觉、自然语言处理" +}; + +var categoryENDesc={ + "computer_vision":"computer vision", + "natural_language_processing":"natural language processing", + "speech_processing":"speech processing", + "computer_vision_natural_language_processing":"computer vision and natural language processing" +}; + +var taskDesc={ + "machine_translation":"机器翻译", + "question_answering_system":"问答系统", + "information_retrieval":"信息检索", + "knowledge_graph":"知识图谱", + "text_annotation":"文本标注", + "text_categorization":"文本分类", + "emotion_analysis":"情感分析", + "language_modeling":"语言建模", + "speech_recognition":"语音识别", + "automatic_digest":"自动文摘", + "information_extraction":"信息抽取", + "description_generation":"说明生成", + "image_classification":"图像分类", + "face_recognition":"人脸识别", + "image_search":"图像搜索", + "target_detection":"目标检测", + "image_description_generation":"图像描述生成", + "vehicle_license_plate_recognition":"车辆车牌识别", + "medical_image_analysis":"医学图像分析", + "unmanned":"无人驾驶", + "unmanned_security":"无人安防", + "drone":"无人机", + "vr_ar":"VR/AR", + "2_d_vision":"2-D视觉", + "2_5_d_vision":"2.5-D视觉", + "3_d_reconstruction":"3D重构", + "image_processing":"图像处理", + "video_processing":"视频处理", + "visual_input_system":"视觉输入系统", + "speech_coding":"语音编码", + "speech_enhancement":"语音增强", + "speech_recognition":"语音识别", + "speech_synthesis":"语音合成" +}; + +var taskENDesc={ + "machine_translation":"machine translation", + "question_answering_system":"question answering system", + "information_retrieval":"information retrieval", + "knowledge_graph":"knowledge graph", + "text_annotation":"text annotation", + "text_categorization":"text categorization", + "emotion_analysis":"emotion analysis", + "language_modeling":"language modeling", + "speech_recognition":"speech recognition", + "automatic_digest":"automatic digest", + "information_extraction":"information extraction", + "description_generation":"description generation", + "image_classification":"image classification", + "face_recognition":"face recognition", + "image_search":"image search", + "target_detection":"target detection", + "image_description_generation":"image description generation", + "vehicle_license_plate_recognition":"vehicle license plate recognition", + "medical_image_analysis":"medical image analysis", + "unmanned":"unmanned", + "unmanned_security":"unmanned security", + "drone":"drone", + "vr_ar":"VR/AR", + "2_d_vision":"2.D vision", + "2.5_d_vision":"2.5D vision", + "3_d_reconstruction":"3Dreconstruction", + "image_processing":"image processing", + "video_processing":"video processing", + "visual_input_system":"visual input system", + "speech_coding":"speech coding", + "speech_enhancement":"speech enhancement", + "speech_recognition":"speech recognition", + "speech_synthesis":"speech synthesis" +}; + +function getCategoryDesc(isZh,key){ + var re = key; + if(isZh){ + re = categoryDesc[key]; + }else{ + re = categoryENDesc[key]; + } + if(isEmpty(re)){ + return key; + } + return re; +} + +function getTaskDesc(isZh,key){ + var re = key; + if(isZh){ + re = taskDesc[key]; + }else{ + re = taskENDesc[key]; + } + if(isEmpty(re)){ + return key; + } + return re; +} + +function getActiveItem(sort_type){ + console.log("currentSearchSortBy=" + currentSearchSortBy + " sort_type=" + sortBy[sort_type]); + if(currentSearchSortBy == sortBy[sort_type] && currentSearchAscending == sortAscending[sort_type]){ + return "active "; + }else{ + return ""; + } +} + +function displayDataSetResult(page,jsonResult,onlyReturnNum,keyword){ + var data = jsonResult.Result; + var total = jsonResult.Total; + $('#dataset_total').text(total); + if(!onlyReturnNum){ + setActivate("dataset_item"); + //$('#keyword_desc').text(keyword); + //$('#obj_desc').text(getLabel(isZh,"search_dataset")); + //$('#child_total').text(total); + $('#find_title').html(getLabel(isZh,"find_title").replace('{keyword}',keyword).replace('{tablename}',getLabel(isZh,"search_dataset")).replace('{total}',total)); + + var sortHtml = ""; + sortHtml +=""+ getLabel(isZh,"search_matched") + ""; + sortHtml +=""+ getLabel(isZh,"search_matched_download") + ""; + document.getElementById("sort_type").innerHTML=sortHtml; + + var html = ""; + var currentTime = new Date().getTime(); + for(var i = 0; i < data.length;i++){ + var recordMap = data[i]; + html += "" + recordMap["description"] + "
"; + if(!isEmpty(recordMap["file_name"])){ + html += "" + recordMap["file_name"] + "
"; + } + html +=""; + html +=" "+ getLabel(isZh,"search_lasted_update") + " " + recordMap["updated_html"]; + html +="
"; + html +="" + recordMap["description"] + "
"; + html +=""; + if(!isEmpty(recordMap["location"]) && recordMap["location"] != "null"){ + html +=" " + recordMap["location"]; + } + html +=" "; + if(!isEmpty(recordMap["website"]) && recordMap["website"] != "null"){ + html +=" " + "" + recordMap["website"] + ""; + } + html +=" "+ getLabel(isZh,"search_add_by") + " "; + html += recordMap["add_time"] + html +="
"; + html +="" + recordMap["description"] + "
"; + html +=""; + if(!isEmpty(recordMap["email"]) && recordMap["email"] != "null"){ + html +=" " + recordMap["email"] + ""; + } + html +=" "+ getLabel(isZh,"search_add_by") + " "; + html += recordMap["add_time"] + html +="
"; + html +="" + recordMap["content"] + "
"; + html +=""; + html +=" "; + html +=" " + addBlank(recordMap["repoUrl"]) +" #" + recordMap["index"] + " "; + html +=" "; + if(recordMap["is_closed"] != null && (!(recordMap["is_closed"]) || recordMap["is_closed"]=="f")){ + html += getLabel(isZh,"search_open"); + }else{ + html += getLabel(isZh,"search_closed"); + } + html +=" " + recordMap["num_comments"]; + + html +=" "+ getLabel(isZh,"search_lasted_update") + " "+ recordMap["updated_html"]; + + html +="
"; + html +="" + recordMap["description"] + "
"; + html += ""; + html +=" " + recordMap["num_watches"] + " " + recordMap["num_stars"] + " " + recordMap["num_forks"] +" "; + html +=" "+ getLabel(isZh,"search_lasted_update") + " " + recordMap["updated_html"]; + if(!isEmpty(recordMap["lang"])){ + var lang = recordMap["lang"] + var tmpLang = recordMap["lang"].split(","); + if(tmpLang.length>0){ + lang = tmpLang[0] + } + var backColor = "#3572A5"; + if(LanguagesColor[lang] != null){ + backColor = LanguagesColor[lang]; + } + html +=" " + lang + ""; + } + html +="
"; + html +="" + tip + "
" + $tip.stop(true).prop('class', 'alert alert-' + type).html(html).fadeIn(500).delay(2000).fadeOut(500); + } + + function setPage(currentPage){ + console.log("totalPage=" + totalPage); + var html =""; + console.log("currentPage=" + currentPage); + console.log("privateTotal=" + privateTotal); + // if(totalPage==0){ + // return; + // } + html += "" + getLabel(isZh,"search_input_total") + " " + totalNum + " " + getLabel(isZh,"search_srtip") + "" + if(currentPage > 1){ + html += ""; + html += ""; + }else{ + html += ""; + html += ""; + } + + for(var i=startIndex; i <= endIndex; i++){ + var page_i = i; + if(page_i > totalPage){ + break; + } + if( page_i == currentPage){ + html += "" + page_i + ""; + }else{ + html += "" + page_i + ""; + } + } + + if(currentPage >=totalPage){ + html += ""; + html += ""; + }else{ + html += ""; + html += ""; + } + + html +="