| @@ -74,12 +74,10 @@ pipeline: | |||
| commands: | |||
| - make clean | |||
| - make generate | |||
| - make vet | |||
| - make lint | |||
| - make fmt-check | |||
| - make golangci-lint | |||
| - make revive | |||
| - make swagger-check | |||
| - make swagger-validate | |||
| - make misspell-check | |||
| - make test-vendor | |||
| - make build | |||
| when: | |||
| @@ -0,0 +1,97 @@ | |||
| linters: | |||
| enable: | |||
| - gosimple | |||
| - deadcode | |||
| - typecheck | |||
| - govet | |||
| - errcheck | |||
| - staticcheck | |||
| - unused | |||
| - structcheck | |||
| - varcheck | |||
| - golint | |||
| - dupl | |||
| #- gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time. | |||
| - gofmt | |||
| - misspell | |||
| - gocritic | |||
| enable-all: false | |||
| disable-all: true | |||
| fast: false | |||
| linters-settings: | |||
| gocritic: | |||
| disabled-checks: | |||
| - ifElseChain | |||
| - singleCaseSwitch # Every time this occured in the code, there was no other way. | |||
| issues: | |||
| exclude-rules: | |||
| # Exclude some linters from running on tests files. | |||
| - path: _test\.go | |||
| linters: | |||
| - gocyclo | |||
| - errcheck | |||
| - dupl | |||
| - gosec | |||
| - unparam | |||
| - staticcheck | |||
| - path: models/migrations/v | |||
| linters: | |||
| - gocyclo | |||
| - errcheck | |||
| - dupl | |||
| - gosec | |||
| - linters: | |||
| - dupl | |||
| text: "webhook" | |||
| - linters: | |||
| - gocritic | |||
| text: "`ID' should not be capitalized" | |||
| - path: modules/templates/helper.go | |||
| linters: | |||
| - gocritic | |||
| - linters: | |||
| - unused | |||
| - deadcode | |||
| text: "swagger" | |||
| - path: contrib/pr/checkout.go | |||
| linters: | |||
| - errcheck | |||
| - path: models/issue.go | |||
| linters: | |||
| - errcheck | |||
| - path: models/migrations/ | |||
| linters: | |||
| - errcheck | |||
| - path: modules/log/ | |||
| linters: | |||
| - errcheck | |||
| - path: routers/routes/routes.go | |||
| linters: | |||
| - dupl | |||
| - path: routers/repo/view.go | |||
| linters: | |||
| - dupl | |||
| - path: models/migrations/ | |||
| linters: | |||
| - unused | |||
| - linters: | |||
| - staticcheck | |||
| text: "argument x is overwritten before first use" | |||
| - path: modules/httplib/httplib.go | |||
| linters: | |||
| - staticcheck | |||
| # Enabling this would require refactoring the methods and how they are called. | |||
| - path: models/issue_comment_list.go | |||
| linters: | |||
| - dupl | |||
| # "Destroy" is misspelled in github.com/go-macaron/session/session.go:213 so it's not our responsability to fix it | |||
| - path: modules/session/virtual.go | |||
| linters: | |||
| - misspell | |||
| text: '`Destory` is a misspelling of `Destroy`' | |||
| - path: modules/session/memory.go | |||
| linters: | |||
| - misspell | |||
| text: '`Destory` is a misspelling of `Destroy`' | |||
| @@ -135,6 +135,10 @@ errcheck: | |||
| .PHONY: lint | |||
| lint: | |||
| @echo 'make lint is depricated. Use "make revive" if you want to use the old lint tool, or "make golangci-lint" to run a complete code check.' | |||
| .PHONY: revive | |||
| revive: | |||
| @hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ | |||
| $(GO) get -u github.com/mgechev/revive; \ | |||
| fi | |||
| @@ -461,3 +465,10 @@ generate-images: | |||
| .PHONY: pr | |||
| pr: | |||
| $(GO) run contrib/pr/checkout.go $(PR) | |||
| .PHONY: golangci-lint | |||
| golangci-lint: | |||
| @hash golangci-lint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ | |||
| curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.16.0; \ | |||
| fi | |||
| golangci-lint run | |||
| @@ -481,7 +481,7 @@ func runUpdateOauth(c *cli.Context) error { | |||
| } | |||
| // update custom URL mapping | |||
| var customURLMapping *oauth2.CustomURLMapping | |||
| var customURLMapping = &oauth2.CustomURLMapping{} | |||
| if oAuth2Config.CustomURLMapping != nil { | |||
| customURLMapping.TokenURL = oAuth2Config.CustomURLMapping.TokenURL | |||
| @@ -170,17 +170,28 @@ func runCert(c *cli.Context) error { | |||
| if err != nil { | |||
| log.Fatalf("Failed to open cert.pem for writing: %v", err) | |||
| } | |||
| pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) | |||
| certOut.Close() | |||
| err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) | |||
| if err != nil { | |||
| log.Fatalf("Failed to encode certificate: %v", err) | |||
| } | |||
| err = certOut.Close() | |||
| if err != nil { | |||
| log.Fatalf("Failed to write cert: %v", err) | |||
| } | |||
| log.Println("Written cert.pem") | |||
| keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) | |||
| if err != nil { | |||
| log.Fatalf("Failed to open key.pem for writing: %v", err) | |||
| } | |||
| pem.Encode(keyOut, pemBlockForKey(priv)) | |||
| keyOut.Close() | |||
| err = pem.Encode(keyOut, pemBlockForKey(priv)) | |||
| if err != nil { | |||
| log.Fatalf("Failed to encode key: %v", err) | |||
| } | |||
| err = keyOut.Close() | |||
| if err != nil { | |||
| log.Fatalf("Failed to write key: %v", err) | |||
| } | |||
| log.Println("Written key.pem") | |||
| return nil | |||
| } | |||
| @@ -30,7 +30,6 @@ import ( | |||
| ) | |||
| const ( | |||
| accessDenied = "Repository does not exist or you do not have access" | |||
| lfsAuthenticateVerb = "git-lfs-authenticate" | |||
| ) | |||
| @@ -67,7 +66,7 @@ func checkLFSVersion() { | |||
| } | |||
| func setup(logPath string) { | |||
| log.DelLogger("console") | |||
| _ = log.DelLogger("console") | |||
| setting.NewContext() | |||
| checkLFSVersion() | |||
| } | |||
| @@ -112,7 +111,9 @@ func runServ(c *cli.Context) error { | |||
| } | |||
| if len(c.Args()) < 1 { | |||
| cli.ShowSubcommandHelp(c) | |||
| if err := cli.ShowSubcommandHelp(c); err != nil { | |||
| fmt.Printf("error showing subcommand help: %v\n", err) | |||
| } | |||
| return nil | |||
| } | |||
| @@ -178,11 +178,16 @@ func runWeb(ctx *cli.Context) error { | |||
| } | |||
| err = runHTTPS(listenAddr, setting.CertFile, setting.KeyFile, context2.ClearHandler(m)) | |||
| case setting.FCGI: | |||
| listener, err := net.Listen("tcp", listenAddr) | |||
| var listener net.Listener | |||
| listener, err = net.Listen("tcp", listenAddr) | |||
| if err != nil { | |||
| log.Fatal("Failed to bind %s: %v", listenAddr, err) | |||
| } | |||
| defer listener.Close() | |||
| defer func() { | |||
| if err := listener.Close(); err != nil { | |||
| log.Fatal("Failed to stop server: %v", err) | |||
| } | |||
| }() | |||
| err = fcgi.Serve(listener, context2.ClearHandler(m)) | |||
| case setting.UnixSocket: | |||
| if err := os.Remove(listenAddr); err != nil && !os.IsNotExist(err) { | |||
| @@ -91,8 +91,7 @@ func runPR() { | |||
| routers.NewServices() | |||
| //x, err = xorm.NewEngine("sqlite3", "file::memory:?cache=shared") | |||
| var helper testfixtures.Helper | |||
| helper = &testfixtures.SQLite{} | |||
| var helper testfixtures.Helper = &testfixtures.SQLite{} | |||
| models.NewEngine(func(_ *xorm.Engine) error { | |||
| return nil | |||
| }) | |||
| @@ -62,7 +62,7 @@ func branchAction(t *testing.T, button string) (*HTMLDoc, string) { | |||
| req = NewRequestWithValues(t, "POST", link, map[string]string{ | |||
| "_csrf": getCsrf(t, htmlDoc.doc), | |||
| }) | |||
| resp = session.MakeRequest(t, req, http.StatusOK) | |||
| session.MakeRequest(t, req, http.StatusOK) | |||
| url, err := url.Parse(link) | |||
| assert.NoError(t, err) | |||
| @@ -34,7 +34,7 @@ func TestCreateFile(t *testing.T) { | |||
| "content": "Content", | |||
| "commit_choice": "direct", | |||
| }) | |||
| resp = session.MakeRequest(t, req, http.StatusFound) | |||
| session.MakeRequest(t, req, http.StatusFound) | |||
| }) | |||
| } | |||
| @@ -48,7 +48,7 @@ func TestCreateFileOnProtectedBranch(t *testing.T) { | |||
| "_csrf": csrf, | |||
| "protected": "on", | |||
| }) | |||
| resp := session.MakeRequest(t, req, http.StatusFound) | |||
| session.MakeRequest(t, req, http.StatusFound) | |||
| // Check if master branch has been locked successfully | |||
| flashCookie := session.GetCookie("macaron_flash") | |||
| assert.NotNil(t, flashCookie) | |||
| @@ -56,7 +56,7 @@ func TestCreateFileOnProtectedBranch(t *testing.T) { | |||
| // Request editor page | |||
| req = NewRequest(t, "GET", "/user2/repo1/_new/master/") | |||
| resp = session.MakeRequest(t, req, http.StatusOK) | |||
| resp := session.MakeRequest(t, req, http.StatusOK) | |||
| doc := NewHTMLParser(t, resp.Body) | |||
| lastCommit := doc.GetInputValueByName("last_commit") | |||
| @@ -42,7 +42,7 @@ type NilResponseRecorder struct { | |||
| } | |||
| func (n *NilResponseRecorder) Write(b []byte) (int, error) { | |||
| n.Length = n.Length + len(b) | |||
| n.Length += len(b) | |||
| return len(b), nil | |||
| } | |||
| @@ -141,8 +141,7 @@ func initIntegrationTest() { | |||
| if err != nil { | |||
| log.Fatalf("sql.Open: %v", err) | |||
| } | |||
| rows, err := db.Query(fmt.Sprintf("SELECT 1 FROM pg_database WHERE datname = '%s'", | |||
| models.DbCfg.Name)) | |||
| rows, err := db.Query(fmt.Sprintf("SELECT 1 FROM pg_database WHERE datname = '%s'", models.DbCfg.Name)) | |||
| if err != nil { | |||
| log.Fatalf("db.Query: %v", err) | |||
| } | |||
| @@ -210,7 +209,7 @@ func (s *TestSession) MakeRequest(t testing.TB, req *http.Request, expectedStatu | |||
| resp := MakeRequest(t, req, expectedStatus) | |||
| ch := http.Header{} | |||
| ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";")) | |||
| ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";")) | |||
| cr := http.Request{Header: ch} | |||
| s.jar.SetCookies(baseURL, cr.Cookies()) | |||
| @@ -226,7 +225,7 @@ func (s *TestSession) MakeRequestNilResponseRecorder(t testing.TB, req *http.Req | |||
| resp := MakeRequestNilResponseRecorder(t, req, expectedStatus) | |||
| ch := http.Header{} | |||
| ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";")) | |||
| ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";")) | |||
| cr := http.Request{Header: ch} | |||
| s.jar.SetCookies(baseURL, cr.Cookies()) | |||
| @@ -266,7 +265,7 @@ func loginUserWithPassword(t testing.TB, userName, password string) *TestSession | |||
| resp = MakeRequest(t, req, http.StatusFound) | |||
| ch := http.Header{} | |||
| ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";")) | |||
| ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";")) | |||
| cr := http.Request{Header: ch} | |||
| session := emptyTestSession(t) | |||
| @@ -45,7 +45,7 @@ func storeObjectInRepo(t *testing.T, repositoryID int64, content *[]byte) string | |||
| lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID} | |||
| } | |||
| lfsID = lfsID + 1 | |||
| lfsID++ | |||
| lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject) | |||
| assert.NoError(t, err) | |||
| contentStore := &lfs.ContentStore{BasePath: setting.LFS.ContentPath} | |||
| @@ -57,21 +57,6 @@ func initMigrationTest(t *testing.T) { | |||
| setting.NewLogServices(true) | |||
| } | |||
| func getDialect() string { | |||
| dialect := "sqlite" | |||
| switch { | |||
| case setting.UseSQLite3: | |||
| dialect = "sqlite" | |||
| case setting.UseMySQL: | |||
| dialect = "mysql" | |||
| case setting.UsePostgreSQL: | |||
| dialect = "pgsql" | |||
| case setting.UseMSSQL: | |||
| dialect = "mssql" | |||
| } | |||
| return dialect | |||
| } | |||
| func availableVersions() ([]string, error) { | |||
| migrationsDir, err := os.Open("integrations/migration-test") | |||
| if err != nil { | |||
| @@ -73,7 +73,7 @@ func PrintCurrentTest(t testing.TB, skip ...int) { | |||
| _, filename, line, _ := runtime.Caller(actualSkip) | |||
| if log.CanColorStdout { | |||
| fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", log.NewColoredValue(t.Name()), strings.TrimPrefix(filename, prefix), line) | |||
| fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", fmt.Formatter(log.NewColoredValue(t.Name())), strings.TrimPrefix(filename, prefix), line) | |||
| } else { | |||
| fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line) | |||
| } | |||
| @@ -42,7 +42,7 @@ var ( | |||
| func init() { | |||
| setting.AppVer = Version | |||
| setting.AppBuiltWith = formatBuiltWith(Tags) | |||
| setting.AppBuiltWith = formatBuiltWith() | |||
| // Grab the original help templates | |||
| originalAppHelpTemplate = cli.AppHelpTemplate | |||
| @@ -56,7 +56,7 @@ func main() { | |||
| app.Usage = "A painless self-hosted Git service" | |||
| app.Description = `By default, gitea will start serving using the webserver with no | |||
| arguments - which can alternatively be run by running the subcommand web.` | |||
| app.Version = Version + formatBuiltWith(Tags) | |||
| app.Version = Version + formatBuiltWith() | |||
| app.Commands = []cli.Command{ | |||
| cmd.CmdWeb, | |||
| cmd.CmdServ, | |||
| @@ -179,7 +179,7 @@ DEFAULT CONFIGURATION: | |||
| `, originalTemplate, setting.CustomPath, overrided, setting.CustomConf, setting.AppPath, setting.AppWorkPath) | |||
| } | |||
| func formatBuiltWith(makeTags string) string { | |||
| func formatBuiltWith() string { | |||
| var version = runtime.Version() | |||
| if len(MakeVersion) > 0 { | |||
| version = MakeVersion + ", " + runtime.Version() | |||
| @@ -10,13 +10,6 @@ import ( | |||
| "github.com/stretchr/testify/assert" | |||
| ) | |||
| var accessModes = []AccessMode{ | |||
| AccessModeRead, | |||
| AccessModeWrite, | |||
| AccessModeAdmin, | |||
| AccessModeOwner, | |||
| } | |||
| func TestAccessLevel(t *testing.T) { | |||
| assert.NoError(t, PrepareTestDatabase()) | |||
| @@ -126,14 +126,14 @@ func (protectBranch *ProtectedBranch) GetGrantedApprovalsCount(pr *PullRequest) | |||
| } | |||
| // GetProtectedBranchByRepoID getting protected branch by repo ID | |||
| func GetProtectedBranchByRepoID(RepoID int64) ([]*ProtectedBranch, error) { | |||
| func GetProtectedBranchByRepoID(repoID int64) ([]*ProtectedBranch, error) { | |||
| protectedBranches := make([]*ProtectedBranch, 0) | |||
| return protectedBranches, x.Where("repo_id = ?", RepoID).Desc("updated_unix").Find(&protectedBranches) | |||
| return protectedBranches, x.Where("repo_id = ?", repoID).Desc("updated_unix").Find(&protectedBranches) | |||
| } | |||
| // GetProtectedBranchBy getting protected branch by ID/Name | |||
| func GetProtectedBranchBy(repoID int64, BranchName string) (*ProtectedBranch, error) { | |||
| rel := &ProtectedBranch{RepoID: repoID, BranchName: BranchName} | |||
| func GetProtectedBranchBy(repoID int64, branchName string) (*ProtectedBranch, error) { | |||
| rel := &ProtectedBranch{RepoID: repoID, BranchName: branchName} | |||
| has, err := x.Get(rel) | |||
| if err != nil { | |||
| return nil, err | |||
| @@ -40,7 +40,7 @@ func (r *BlameReader) NextPart() (*BlamePart, error) { | |||
| scanner := r.scanner | |||
| if r.lastSha != nil { | |||
| blamePart = &BlamePart{*r.lastSha, make([]string, 0, 0)} | |||
| blamePart = &BlamePart{*r.lastSha, make([]string, 0)} | |||
| } | |||
| for scanner.Scan() { | |||
| @@ -56,7 +56,7 @@ func (r *BlameReader) NextPart() (*BlamePart, error) { | |||
| sha1 := lines[1] | |||
| if blamePart == nil { | |||
| blamePart = &BlamePart{sha1, make([]string, 0, 0)} | |||
| blamePart = &BlamePart{sha1, make([]string, 0)} | |||
| } | |||
| if blamePart.Sha != sha1 { | |||
| @@ -384,13 +384,9 @@ func CutDiffAroundLine(originalDiff io.Reader, line int64, old bool, numbersOfLi | |||
| // headers + hunk header | |||
| newHunk := make([]string, headerLines) | |||
| // transfer existing headers | |||
| for idx, lof := range hunk[:headerLines] { | |||
| newHunk[idx] = lof | |||
| } | |||
| copy(newHunk, hunk[:headerLines]) | |||
| // transfer last n lines | |||
| for _, lof := range hunk[len(hunk)-numbersOfLine-1:] { | |||
| newHunk = append(newHunk, lof) | |||
| } | |||
| newHunk = append(newHunk, hunk[len(hunk)-numbersOfLine-1:]...) | |||
| // calculate newBegin, ... by counting lines | |||
| for i := len(hunk) - 1; i >= len(hunk)-numbersOfLine; i-- { | |||
| switch hunk[i][0] { | |||
| @@ -582,7 +578,10 @@ func ParsePatch(maxLines, maxLineCharacters, maxFiles int, reader io.Reader) (*D | |||
| diff.Files = append(diff.Files, curFile) | |||
| if len(diff.Files) >= maxFiles { | |||
| diff.IsIncomplete = true | |||
| io.Copy(ioutil.Discard, reader) | |||
| _, err := io.Copy(ioutil.Discard, reader) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("Copy: %v", err) | |||
| } | |||
| break | |||
| } | |||
| curFileLinesCount = 0 | |||
| @@ -17,12 +17,6 @@ func assertEqual(t *testing.T, s1 string, s2 template.HTML) { | |||
| } | |||
| } | |||
| func assertLineEqual(t *testing.T, d1 *DiffLine, d2 *DiffLine) { | |||
| if d1 != d2 { | |||
| t.Errorf("%v should be equal %v", d1, d2) | |||
| } | |||
| } | |||
| func TestDiffToHTML(t *testing.T) { | |||
| assertEqual(t, "+foo <span class=\"added-code\">bar</span> biz", diffToHTML([]dmp.Diff{ | |||
| {Type: dmp.DiffEqual, Text: "foo "}, | |||
| @@ -1330,7 +1330,7 @@ func sortIssuesSession(sess *xorm.Session, sortType string) { | |||
| } | |||
| } | |||
| func (opts *IssuesOptions) setupSession(sess *xorm.Session) error { | |||
| func (opts *IssuesOptions) setupSession(sess *xorm.Session) { | |||
| if opts.Page >= 0 && opts.PageSize > 0 { | |||
| var start int | |||
| if opts.Page == 0 { | |||
| @@ -1389,7 +1389,6 @@ func (opts *IssuesOptions) setupSession(sess *xorm.Session) error { | |||
| fmt.Sprintf("issue.id = il%[1]d.issue_id AND il%[1]d.label_id = %[2]d", i, labelID)) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // CountIssuesByRepo map from repoID to number of issues matching the options | |||
| @@ -1397,9 +1396,7 @@ func CountIssuesByRepo(opts *IssuesOptions) (map[int64]int64, error) { | |||
| sess := x.NewSession() | |||
| defer sess.Close() | |||
| if err := opts.setupSession(sess); err != nil { | |||
| return nil, err | |||
| } | |||
| opts.setupSession(sess) | |||
| countsSlice := make([]*struct { | |||
| RepoID int64 | |||
| @@ -1424,9 +1421,7 @@ func Issues(opts *IssuesOptions) ([]*Issue, error) { | |||
| sess := x.NewSession() | |||
| defer sess.Close() | |||
| if err := opts.setupSession(sess); err != nil { | |||
| return nil, err | |||
| } | |||
| opts.setupSession(sess) | |||
| sortIssuesSession(sess, opts.SortType) | |||
| issues := make([]*Issue, 0, setting.UI.IssuePagingNum) | |||
| @@ -171,17 +171,6 @@ func (c *Comment) loadPoster(e Engine) (err error) { | |||
| return err | |||
| } | |||
| func (c *Comment) loadAttachments(e Engine) (err error) { | |||
| if len(c.Attachments) > 0 { | |||
| return | |||
| } | |||
| c.Attachments, err = getAttachmentsByCommentID(e, c.ID) | |||
| if err != nil { | |||
| log.Error("getAttachmentsByCommentID[%d]: %v", c.ID, err) | |||
| } | |||
| return err | |||
| } | |||
| // AfterDelete is invoked from XORM after the object is deleted. | |||
| func (c *Comment) AfterDelete() { | |||
| if c.ID <= 0 { | |||
| @@ -463,7 +452,7 @@ func (c *Comment) LoadReview() error { | |||
| return c.loadReview(x) | |||
| } | |||
| func (c *Comment) checkInvalidation(e Engine, doer *User, repo *git.Repository, branch string) error { | |||
| func (c *Comment) checkInvalidation(doer *User, repo *git.Repository, branch string) error { | |||
| // FIXME differentiate between previous and proposed line | |||
| commit, err := repo.LineBlame(branch, repo.Path, c.TreePath, uint(c.UnsignedLine())) | |||
| if err != nil { | |||
| @@ -479,7 +468,7 @@ func (c *Comment) checkInvalidation(e Engine, doer *User, repo *git.Repository, | |||
| // CheckInvalidation checks if the line of code comment got changed by another commit. | |||
| // If the line got changed the comment is going to be invalidated. | |||
| func (c *Comment) CheckInvalidation(repo *git.Repository, doer *User, branch string) error { | |||
| return c.checkInvalidation(x, doer, repo, branch) | |||
| return c.checkInvalidation(doer, repo, branch) | |||
| } | |||
| // DiffSide returns "previous" if Comment.Line is a LOC of the previous changes and "proposed" if it is a LOC of the proposed changes. | |||
| @@ -915,7 +904,7 @@ func CreateCodeComment(doer *User, repo *Repository, issue *Issue, content, tree | |||
| commit, err := gitRepo.LineBlame(pr.GetGitRefName(), gitRepo.Path, treePath, uint(line)) | |||
| if err == nil { | |||
| commitID = commit.ID.String() | |||
| } else if err != nil && !strings.Contains(err.Error(), "exit status 128 - fatal: no such path") { | |||
| } else if !strings.Contains(err.Error(), "exit status 128 - fatal: no such path") { | |||
| return nil, fmt.Errorf("LineBlame[%s, %s, %s, %d]: %v", pr.GetGitRefName(), gitRepo.Path, treePath, line, err) | |||
| } | |||
| } | |||
| @@ -36,7 +36,7 @@ func (comments CommentList) loadPosters(e Engine) error { | |||
| if err != nil { | |||
| return err | |||
| } | |||
| left = left - limit | |||
| left -= limit | |||
| posterIDs = posterIDs[limit:] | |||
| } | |||
| @@ -94,13 +94,13 @@ func (comments CommentList) loadLabels(e Engine) error { | |||
| var label Label | |||
| err = rows.Scan(&label) | |||
| if err != nil { | |||
| rows.Close() | |||
| _ = rows.Close() | |||
| return err | |||
| } | |||
| commentLabels[label.ID] = &label | |||
| } | |||
| rows.Close() | |||
| left = left - limit | |||
| _ = rows.Close() | |||
| left -= limit | |||
| labelIDs = labelIDs[limit:] | |||
| } | |||
| @@ -143,7 +143,7 @@ func (comments CommentList) loadMilestones(e Engine) error { | |||
| if err != nil { | |||
| return err | |||
| } | |||
| left = left - limit | |||
| left -= limit | |||
| milestoneIDs = milestoneIDs[limit:] | |||
| } | |||
| @@ -186,7 +186,7 @@ func (comments CommentList) loadOldMilestones(e Engine) error { | |||
| if err != nil { | |||
| return err | |||
| } | |||
| left = left - limit | |||
| left -= limit | |||
| milestoneIDs = milestoneIDs[limit:] | |||
| } | |||
| @@ -236,9 +236,9 @@ func (comments CommentList) loadAssignees(e Engine) error { | |||
| assignees[user.ID] = &user | |||
| } | |||
| rows.Close() | |||
| _ = rows.Close() | |||
| left = left - limit | |||
| left -= limit | |||
| assigneeIDs = assigneeIDs[limit:] | |||
| } | |||
| @@ -310,9 +310,9 @@ func (comments CommentList) loadIssues(e Engine) error { | |||
| issues[issue.ID] = &issue | |||
| } | |||
| rows.Close() | |||
| _ = rows.Close() | |||
| left = left - limit | |||
| left -= limit | |||
| issueIDs = issueIDs[limit:] | |||
| } | |||
| @@ -361,15 +361,15 @@ func (comments CommentList) loadDependentIssues(e Engine) error { | |||
| var issue Issue | |||
| err = rows.Scan(&issue) | |||
| if err != nil { | |||
| rows.Close() | |||
| _ = rows.Close() | |||
| return err | |||
| } | |||
| issues[issue.ID] = &issue | |||
| } | |||
| rows.Close() | |||
| _ = rows.Close() | |||
| left = left - limit | |||
| left -= limit | |||
| issueIDs = issueIDs[limit:] | |||
| } | |||
| @@ -406,14 +406,14 @@ func (comments CommentList) loadAttachments(e Engine) (err error) { | |||
| var attachment Attachment | |||
| err = rows.Scan(&attachment) | |||
| if err != nil { | |||
| rows.Close() | |||
| _ = rows.Close() | |||
| return err | |||
| } | |||
| attachments[attachment.CommentID] = append(attachments[attachment.CommentID], &attachment) | |||
| } | |||
| rows.Close() | |||
| left = left - limit | |||
| _ = rows.Close() | |||
| left -= limit | |||
| commentsIDs = commentsIDs[limit:] | |||
| } | |||
| @@ -457,15 +457,15 @@ func (comments CommentList) loadReviews(e Engine) error { | |||
| var review Review | |||
| err = rows.Scan(&review) | |||
| if err != nil { | |||
| rows.Close() | |||
| _ = rows.Close() | |||
| return err | |||
| } | |||
| reviews[review.ID] = &review | |||
| } | |||
| rows.Close() | |||
| _ = rows.Close() | |||
| left = left - limit | |||
| left -= limit | |||
| reviewIDs = reviewIDs[limit:] | |||
| } | |||
| @@ -401,14 +401,6 @@ func NewIssueLabels(issue *Issue, labels []*Label, doer *User) (err error) { | |||
| return sess.Commit() | |||
| } | |||
| func getIssueLabels(e Engine, issueID int64) ([]*IssueLabel, error) { | |||
| issueLabels := make([]*IssueLabel, 0, 10) | |||
| return issueLabels, e. | |||
| Where("issue_id=?", issueID). | |||
| Asc("label_id"). | |||
| Find(&issueLabels) | |||
| } | |||
| func deleteIssueLabel(e *xorm.Session, issue *Issue, label *Label, doer *User) (err error) { | |||
| if count, err := e.Delete(&IssueLabel{ | |||
| IssueID: issue.ID, | |||
| @@ -7,6 +7,8 @@ package models | |||
| import ( | |||
| "fmt" | |||
| "code.gitea.io/gitea/modules/log" | |||
| "github.com/go-xorm/builder" | |||
| ) | |||
| @@ -47,7 +49,7 @@ func (issues IssueList) loadRepositories(e Engine) ([]*Repository, error) { | |||
| if err != nil { | |||
| return nil, fmt.Errorf("find repository: %v", err) | |||
| } | |||
| left = left - limit | |||
| left -= limit | |||
| repoIDs = repoIDs[limit:] | |||
| } | |||
| @@ -91,7 +93,7 @@ func (issues IssueList) loadPosters(e Engine) error { | |||
| if err != nil { | |||
| return err | |||
| } | |||
| left = left - limit | |||
| left -= limit | |||
| posterIDs = posterIDs[limit:] | |||
| } | |||
| @@ -146,13 +148,21 @@ func (issues IssueList) loadLabels(e Engine) error { | |||
| var labelIssue LabelIssue | |||
| err = rows.Scan(&labelIssue) | |||
| if err != nil { | |||
| rows.Close() | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadLabels: Close: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| issueLabels[labelIssue.IssueLabel.IssueID] = append(issueLabels[labelIssue.IssueLabel.IssueID], labelIssue.Label) | |||
| } | |||
| rows.Close() | |||
| left = left - limit | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadLabels: Close: %v", err) | |||
| } | |||
| left -= limit | |||
| issueIDs = issueIDs[limit:] | |||
| } | |||
| @@ -191,7 +201,7 @@ func (issues IssueList) loadMilestones(e Engine) error { | |||
| if err != nil { | |||
| return err | |||
| } | |||
| left = left - limit | |||
| left -= limit | |||
| milestoneIDs = milestoneIDs[limit:] | |||
| } | |||
| @@ -231,15 +241,22 @@ func (issues IssueList) loadAssignees(e Engine) error { | |||
| var assigneeIssue AssigneeIssue | |||
| err = rows.Scan(&assigneeIssue) | |||
| if err != nil { | |||
| rows.Close() | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadAssignees: Close: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| assignees[assigneeIssue.IssueAssignee.IssueID] = append(assignees[assigneeIssue.IssueAssignee.IssueID], assigneeIssue.Assignee) | |||
| } | |||
| rows.Close() | |||
| left = left - limit | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadAssignees: Close: %v", err) | |||
| } | |||
| left -= limit | |||
| issueIDs = issueIDs[limit:] | |||
| } | |||
| @@ -283,14 +300,21 @@ func (issues IssueList) loadPullRequests(e Engine) error { | |||
| var pr PullRequest | |||
| err = rows.Scan(&pr) | |||
| if err != nil { | |||
| rows.Close() | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadPullRequests: Close: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| pullRequestMaps[pr.IssueID] = &pr | |||
| } | |||
| rows.Close() | |||
| left = left - limit | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadPullRequests: Close: %v", err) | |||
| } | |||
| left -= limit | |||
| issuesIDs = issuesIDs[limit:] | |||
| } | |||
| @@ -325,14 +349,21 @@ func (issues IssueList) loadAttachments(e Engine) (err error) { | |||
| var attachment Attachment | |||
| err = rows.Scan(&attachment) | |||
| if err != nil { | |||
| rows.Close() | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadAttachments: Close: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment) | |||
| } | |||
| rows.Close() | |||
| left = left - limit | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadAttachments: Close: %v", err) | |||
| } | |||
| left -= limit | |||
| issuesIDs = issuesIDs[limit:] | |||
| } | |||
| @@ -368,13 +399,21 @@ func (issues IssueList) loadComments(e Engine, cond builder.Cond) (err error) { | |||
| var comment Comment | |||
| err = rows.Scan(&comment) | |||
| if err != nil { | |||
| rows.Close() | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadComments: Close: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| comments[comment.IssueID] = append(comments[comment.IssueID], &comment) | |||
| } | |||
| rows.Close() | |||
| left = left - limit | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadComments: Close: %v", err) | |||
| } | |||
| left -= limit | |||
| issuesIDs = issuesIDs[limit:] | |||
| } | |||
| @@ -422,13 +461,21 @@ func (issues IssueList) loadTotalTrackedTimes(e Engine) (err error) { | |||
| var totalTime totalTimesByIssue | |||
| err = rows.Scan(&totalTime) | |||
| if err != nil { | |||
| rows.Close() | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadTotalTrackedTimes: Close: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| trackedTimes[totalTime.IssueID] = totalTime.Time | |||
| } | |||
| rows.Close() | |||
| left = left - limit | |||
| // When there are no rows left and we try to close it, xorm will complain with an error. | |||
| // Since that is not relevant for us, we can safely ignore it. | |||
| if err := rows.Close(); err != nil { | |||
| log.Error("IssueList.loadTotalTrackedTimes: Close: %v", err) | |||
| } | |||
| left -= limit | |||
| ids = ids[limit:] | |||
| } | |||
| @@ -439,33 +486,33 @@ func (issues IssueList) loadTotalTrackedTimes(e Engine) (err error) { | |||
| } | |||
| // loadAttributes loads all attributes, expect for attachments and comments | |||
| func (issues IssueList) loadAttributes(e Engine) (err error) { | |||
| if _, err = issues.loadRepositories(e); err != nil { | |||
| return | |||
| func (issues IssueList) loadAttributes(e Engine) error { | |||
| if _, err := issues.loadRepositories(e); err != nil { | |||
| return fmt.Errorf("issue.loadAttributes: loadRepositories: %v", err) | |||
| } | |||
| if err = issues.loadPosters(e); err != nil { | |||
| return | |||
| if err := issues.loadPosters(e); err != nil { | |||
| return fmt.Errorf("issue.loadAttributes: loadPosters: %v", err) | |||
| } | |||
| if err = issues.loadLabels(e); err != nil { | |||
| return | |||
| if err := issues.loadLabels(e); err != nil { | |||
| return fmt.Errorf("issue.loadAttributes: loadLabels: %v", err) | |||
| } | |||
| if err = issues.loadMilestones(e); err != nil { | |||
| return | |||
| if err := issues.loadMilestones(e); err != nil { | |||
| return fmt.Errorf("issue.loadAttributes: loadMilestones: %v", err) | |||
| } | |||
| if err = issues.loadAssignees(e); err != nil { | |||
| return | |||
| if err := issues.loadAssignees(e); err != nil { | |||
| return fmt.Errorf("issue.loadAttributes: loadAssignees: %v", err) | |||
| } | |||
| if err = issues.loadPullRequests(e); err != nil { | |||
| return | |||
| if err := issues.loadPullRequests(e); err != nil { | |||
| return fmt.Errorf("issue.loadAttributes: loadPullRequests: %v", err) | |||
| } | |||
| if err = issues.loadTotalTrackedTimes(e); err != nil { | |||
| return | |||
| if err := issues.loadTotalTrackedTimes(e); err != nil { | |||
| return fmt.Errorf("issue.loadAttributes: loadTotalTrackedTimes: %v", err) | |||
| } | |||
| return nil | |||
| @@ -15,7 +15,6 @@ import ( | |||
| // XORMLogBridge a logger bridge from Logger to xorm | |||
| type XORMLogBridge struct { | |||
| showSQL bool | |||
| level core.LogLevel | |||
| logger *log.Logger | |||
| } | |||
| @@ -34,42 +33,42 @@ func (l *XORMLogBridge) Log(skip int, level log.Level, format string, v ...inter | |||
| // Debug show debug log | |||
| func (l *XORMLogBridge) Debug(v ...interface{}) { | |||
| l.Log(2, log.DEBUG, fmt.Sprint(v...)) | |||
| _ = l.Log(2, log.DEBUG, fmt.Sprint(v...)) | |||
| } | |||
| // Debugf show debug log | |||
| func (l *XORMLogBridge) Debugf(format string, v ...interface{}) { | |||
| l.Log(2, log.DEBUG, format, v...) | |||
| _ = l.Log(2, log.DEBUG, format, v...) | |||
| } | |||
| // Error show error log | |||
| func (l *XORMLogBridge) Error(v ...interface{}) { | |||
| l.Log(2, log.ERROR, fmt.Sprint(v...)) | |||
| _ = l.Log(2, log.ERROR, fmt.Sprint(v...)) | |||
| } | |||
| // Errorf show error log | |||
| func (l *XORMLogBridge) Errorf(format string, v ...interface{}) { | |||
| l.Log(2, log.ERROR, format, v...) | |||
| _ = l.Log(2, log.ERROR, format, v...) | |||
| } | |||
| // Info show information level log | |||
| func (l *XORMLogBridge) Info(v ...interface{}) { | |||
| l.Log(2, log.INFO, fmt.Sprint(v...)) | |||
| _ = l.Log(2, log.INFO, fmt.Sprint(v...)) | |||
| } | |||
| // Infof show information level log | |||
| func (l *XORMLogBridge) Infof(format string, v ...interface{}) { | |||
| l.Log(2, log.INFO, format, v...) | |||
| _ = l.Log(2, log.INFO, format, v...) | |||
| } | |||
| // Warn show warning log | |||
| func (l *XORMLogBridge) Warn(v ...interface{}) { | |||
| l.Log(2, log.WARN, fmt.Sprint(v...)) | |||
| _ = l.Log(2, log.WARN, fmt.Sprint(v...)) | |||
| } | |||
| // Warnf show warnning log | |||
| func (l *XORMLogBridge) Warnf(format string, v ...interface{}) { | |||
| l.Log(2, log.WARN, format, v...) | |||
| _ = l.Log(2, log.WARN, format, v...) | |||
| } | |||
| // Level get logger level | |||
| @@ -164,8 +164,7 @@ func Cell2Int64(val xorm.Cell) int64 { | |||
| // BeforeSet is invoked from XORM before setting the value of a field of this object. | |||
| func (source *LoginSource) BeforeSet(colName string, val xorm.Cell) { | |||
| switch colName { | |||
| case "type": | |||
| if colName == "type" { | |||
| switch LoginType(Cell2Int64(val)) { | |||
| case LoginLDAP, LoginDLDAP: | |||
| source.Cfg = new(LDAPConfig) | |||
| @@ -282,10 +281,12 @@ func CreateLoginSource(source *LoginSource) error { | |||
| oAuth2Config := source.OAuth2() | |||
| err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | |||
| err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config) | |||
| if err != nil { | |||
| // remove the LoginSource in case of errors while registering OAuth2 providers | |||
| x.Delete(source) | |||
| if _, err := x.Delete(source); err != nil { | |||
| log.Error("CreateLoginSource: Error while wrapOpenIDConnectInitializeError: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| } | |||
| return err | |||
| @@ -325,10 +326,12 @@ func UpdateSource(source *LoginSource) error { | |||
| oAuth2Config := source.OAuth2() | |||
| err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | |||
| err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config) | |||
| if err != nil { | |||
| // restore original values since we cannot update the provider it self | |||
| x.ID(source.ID).AllCols().Update(originalLoginSource) | |||
| if _, err := x.ID(source.ID).AllCols().Update(originalLoginSource); err != nil { | |||
| log.Error("UpdateSource: Error while wrapOpenIDConnectInitializeError: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| } | |||
| return err | |||
| @@ -385,7 +388,7 @@ func composeFullName(firstname, surname, username string) string { | |||
| } | |||
| var ( | |||
| alphaDashDotPattern = regexp.MustCompile("[^\\w-\\.]") | |||
| alphaDashDotPattern = regexp.MustCompile(`[^\w-\.]`) | |||
| ) | |||
| // LoginViaLDAP queries if login/password is valid against the LDAP directory pool, | |||
| @@ -401,7 +404,7 @@ func LoginViaLDAP(user *User, login, password string, source *LoginSource, autoR | |||
| if !autoRegister { | |||
| if isAttributeSSHPublicKeySet && synchronizeLdapSSHPublicKeys(user, source, sr.SSHPublicKey) { | |||
| RewriteAllPublicKeys() | |||
| return user, RewriteAllPublicKeys() | |||
| } | |||
| return user, nil | |||
| @@ -435,7 +438,7 @@ func LoginViaLDAP(user *User, login, password string, source *LoginSource, autoR | |||
| err := CreateUser(user) | |||
| if err == nil && isAttributeSSHPublicKeySet && addLdapSSHPublicKeys(user, source, sr.SSHPublicKey) { | |||
| RewriteAllPublicKeys() | |||
| err = RewriteAllPublicKeys() | |||
| } | |||
| return user, err | |||
| @@ -157,10 +157,13 @@ func composeTplData(subject, body, link string) map[string]interface{} { | |||
| func composeIssueCommentMessage(issue *Issue, doer *User, content string, comment *Comment, tplName base.TplName, tos []string, info string) *mailer.Message { | |||
| subject := issue.mailSubject() | |||
| issue.LoadRepo() | |||
| err := issue.LoadRepo() | |||
| if err != nil { | |||
| log.Error("LoadRepo: %v", err) | |||
| } | |||
| body := string(markup.RenderByType(markdown.MarkupName, []byte(content), issue.Repo.HTMLURL(), issue.Repo.ComposeMetas())) | |||
| data := make(map[string]interface{}, 10) | |||
| var data = make(map[string]interface{}, 10) | |||
| if comment != nil { | |||
| data = composeTplData(subject, body, issue.HTMLURL()+"#"+comment.HashTag()) | |||
| } else { | |||
| @@ -399,7 +399,7 @@ func trimCommitActionAppURLPrefix(x *xorm.Engine) error { | |||
| return fmt.Errorf("marshal action content[%d]: %v", actID, err) | |||
| } | |||
| if _, err = sess.Id(actID).Update(&Action{ | |||
| if _, err = sess.ID(actID).Update(&Action{ | |||
| Content: string(p), | |||
| }); err != nil { | |||
| return fmt.Errorf("update action[%d]: %v", actID, err) | |||
| @@ -503,7 +503,7 @@ func attachmentRefactor(x *xorm.Engine) error { | |||
| // Update database first because this is where error happens the most often. | |||
| for _, attach := range attachments { | |||
| if _, err = sess.Id(attach.ID).Update(attach); err != nil { | |||
| if _, err = sess.ID(attach.ID).Update(attach); err != nil { | |||
| return err | |||
| } | |||
| @@ -581,7 +581,7 @@ func renamePullRequestFields(x *xorm.Engine) (err error) { | |||
| if pull.Index == 0 { | |||
| continue | |||
| } | |||
| if _, err = sess.Id(pull.ID).Update(pull); err != nil { | |||
| if _, err = sess.ID(pull.ID).Update(pull); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| @@ -661,7 +661,7 @@ func generateOrgRandsAndSalt(x *xorm.Engine) (err error) { | |||
| if org.Salt, err = generate.GetRandomString(10); err != nil { | |||
| return err | |||
| } | |||
| if _, err = sess.Id(org.ID).Update(org); err != nil { | |||
| if _, err = sess.ID(org.ID).Update(org); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| @@ -58,13 +58,13 @@ func convertIntervalToDuration(x *xorm.Engine) (err error) { | |||
| return fmt.Errorf("Query repositories: %v", err) | |||
| } | |||
| for _, mirror := range mirrors { | |||
| mirror.Interval = mirror.Interval * time.Hour | |||
| mirror.Interval *= time.Hour | |||
| if mirror.Interval < setting.Mirror.MinInterval { | |||
| log.Info("Mirror interval less than Mirror.MinInterval, setting default interval: repo id %v", mirror.RepoID) | |||
| mirror.Interval = setting.Mirror.DefaultInterval | |||
| } | |||
| log.Debug("Mirror interval set to %v for repo id %v", mirror.Interval, mirror.RepoID) | |||
| _, err := sess.Id(mirror.ID).Cols("interval").Update(mirror) | |||
| _, err := sess.ID(mirror.ID).Cols("interval").Update(mirror) | |||
| if err != nil { | |||
| return fmt.Errorf("update mirror interval failed: %v", err) | |||
| } | |||
| @@ -48,6 +48,9 @@ func renameRepoIsBareToIsEmpty(x *xorm.Engine) error { | |||
| if len(indexes) >= 1 { | |||
| _, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository") | |||
| if err != nil { | |||
| return fmt.Errorf("Drop index failed: %v", err) | |||
| } | |||
| } | |||
| } else { | |||
| _, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository") | |||
| @@ -58,6 +58,9 @@ func hashAppToken(x *xorm.Engine) error { | |||
| if len(indexes) >= 1 { | |||
| _, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| } else { | |||
| _, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token") | |||
| @@ -48,6 +48,7 @@ type Engine interface { | |||
| Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *xorm.Session | |||
| SQL(interface{}, ...interface{}) *xorm.Session | |||
| Where(interface{}, ...interface{}) *xorm.Session | |||
| Asc(colNames ...string) *xorm.Session | |||
| } | |||
| var ( | |||
| @@ -181,14 +182,14 @@ func parsePostgreSQLHostPort(info string) (string, string) { | |||
| return host, port | |||
| } | |||
| func getPostgreSQLConnectionString(DBHost, DBUser, DBPasswd, DBName, DBParam, DBSSLMode string) (connStr string) { | |||
| host, port := parsePostgreSQLHostPort(DBHost) | |||
| func getPostgreSQLConnectionString(dbHost, dbUser, dbPasswd, dbName, dbParam, dbsslMode string) (connStr string) { | |||
| host, port := parsePostgreSQLHostPort(dbHost) | |||
| if host[0] == '/' { // looks like a unix socket | |||
| connStr = fmt.Sprintf("postgres://%s:%s@:%s/%s%ssslmode=%s&host=%s", | |||
| url.PathEscape(DBUser), url.PathEscape(DBPasswd), port, DBName, DBParam, DBSSLMode, host) | |||
| url.PathEscape(dbUser), url.PathEscape(dbPasswd), port, dbName, dbParam, dbsslMode, host) | |||
| } else { | |||
| connStr = fmt.Sprintf("postgres://%s:%s@%s:%s/%s%ssslmode=%s", | |||
| url.PathEscape(DBUser), url.PathEscape(DBPasswd), host, port, DBName, DBParam, DBSSLMode) | |||
| url.PathEscape(dbUser), url.PathEscape(dbPasswd), host, port, dbName, dbParam, dbsslMode) | |||
| } | |||
| return | |||
| } | |||
| @@ -119,7 +119,10 @@ func createOrUpdateIssueNotifications(e Engine, issue *Issue, notificationAuthor | |||
| } | |||
| } | |||
| issue.loadRepo(e) | |||
| err = issue.loadRepo(e) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| for _, watch := range watches { | |||
| issue.Repo.Units = nil | |||
| @@ -106,7 +106,10 @@ func InitOAuth2() error { | |||
| for _, source := range loginSources { | |||
| oAuth2Config := source.OAuth2() | |||
| oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | |||
| err := oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| @@ -142,6 +142,9 @@ func GetOAuth2ApplicationByID(id int64) (app *OAuth2Application, err error) { | |||
| func getOAuth2ApplicationByID(e Engine, id int64) (app *OAuth2Application, err error) { | |||
| app = new(OAuth2Application) | |||
| has, err := e.ID(id).Get(app) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if !has { | |||
| return nil, ErrOAuthApplicationNotFound{ID: id} | |||
| } | |||
| @@ -295,10 +298,10 @@ func (code *OAuth2AuthorizationCode) invalidate(e Engine) error { | |||
| // ValidateCodeChallenge validates the given verifier against the saved code challenge. This is part of the PKCE implementation. | |||
| func (code *OAuth2AuthorizationCode) ValidateCodeChallenge(verifier string) bool { | |||
| return code.validateCodeChallenge(x, verifier) | |||
| return code.validateCodeChallenge(verifier) | |||
| } | |||
| func (code *OAuth2AuthorizationCode) validateCodeChallenge(e Engine, verifier string) bool { | |||
| func (code *OAuth2AuthorizationCode) validateCodeChallenge(verifier string) bool { | |||
| switch code.CodeChallengeMethod { | |||
| case "S256": | |||
| // base64url(SHA256(verifier)) see https://tools.ietf.org/html/rfc7636#section-4.6 | |||
| @@ -172,7 +172,9 @@ func CreateOrganization(org, owner *User) (err error) { | |||
| } | |||
| if _, err = sess.Insert(&units); err != nil { | |||
| sess.Rollback() | |||
| if err := sess.Rollback(); err != nil { | |||
| log.Error("CreateOrganization: sess.Rollback: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| @@ -376,10 +378,7 @@ func HasOrgVisible(org *User, user *User) bool { | |||
| func hasOrgVisible(e Engine, org *User, user *User) bool { | |||
| // Not SignedUser | |||
| if user == nil { | |||
| if org.Visibility == structs.VisibleTypePublic { | |||
| return true | |||
| } | |||
| return false | |||
| return org.Visibility == structs.VisibleTypePublic | |||
| } | |||
| if user.IsAdmin { | |||
| @@ -485,10 +484,14 @@ func AddOrgUser(orgID, uid int64) error { | |||
| } | |||
| if _, err := sess.Insert(ou); err != nil { | |||
| sess.Rollback() | |||
| if err := sess.Rollback(); err != nil { | |||
| log.Error("AddOrgUser: sess.Rollback: %v", err) | |||
| } | |||
| return err | |||
| } else if _, err = sess.Exec("UPDATE `user` SET num_members = num_members + 1 WHERE id = ?", orgID); err != nil { | |||
| sess.Rollback() | |||
| if err := sess.Rollback(); err != nil { | |||
| log.Error("AddOrgUser: sess.Rollback: %v", err) | |||
| } | |||
| return err | |||
| } | |||
| @@ -287,7 +287,8 @@ func NewTeam(t *Team) (err error) { | |||
| has, err := x.ID(t.OrgID).Get(new(User)) | |||
| if err != nil { | |||
| return err | |||
| } else if !has { | |||
| } | |||
| if !has { | |||
| return ErrOrgNotExist{t.OrgID, ""} | |||
| } | |||
| @@ -298,7 +299,8 @@ func NewTeam(t *Team) (err error) { | |||
| Get(new(Team)) | |||
| if err != nil { | |||
| return err | |||
| } else if has { | |||
| } | |||
| if has { | |||
| return ErrTeamAlreadyExist{t.OrgID, t.LowerName} | |||
| } | |||
| @@ -309,7 +311,10 @@ func NewTeam(t *Team) (err error) { | |||
| } | |||
| if _, err = sess.Insert(t); err != nil { | |||
| sess.Rollback() | |||
| errRollback := sess.Rollback() | |||
| if errRollback != nil { | |||
| log.Error("NewTeam sess.Rollback: %v", errRollback) | |||
| } | |||
| return err | |||
| } | |||
| @@ -319,14 +324,20 @@ func NewTeam(t *Team) (err error) { | |||
| unit.TeamID = t.ID | |||
| } | |||
| if _, err = sess.Insert(&t.Units); err != nil { | |||
| sess.Rollback() | |||
| errRollback := sess.Rollback() | |||
| if errRollback != nil { | |||
| log.Error("NewTeam sess.Rollback: %v", errRollback) | |||
| } | |||
| return err | |||
| } | |||
| } | |||
| // Update organization number of teams. | |||
| if _, err = sess.Exec("UPDATE `user` SET num_teams=num_teams+1 WHERE id = ?", t.OrgID); err != nil { | |||
| sess.Rollback() | |||
| errRollback := sess.Rollback() | |||
| if errRollback != nil { | |||
| log.Error("NewTeam sess.Rollback: %v", errRollback) | |||
| } | |||
| return err | |||
| } | |||
| return sess.Commit() | |||
| @@ -412,7 +423,10 @@ func UpdateTeam(t *Team, authChanged bool) (err error) { | |||
| } | |||
| if _, err = sess.Insert(&t.Units); err != nil { | |||
| sess.Rollback() | |||
| errRollback := sess.Rollback() | |||
| if errRollback != nil { | |||
| log.Error("UpdateTeam sess.Rollback: %v", errRollback) | |||
| } | |||
| return err | |||
| } | |||
| } | |||
| @@ -841,7 +855,10 @@ func UpdateTeamUnits(team *Team, units []TeamUnit) (err error) { | |||
| } | |||
| if _, err = sess.Insert(units); err != nil { | |||
| sess.Rollback() | |||
| errRollback := sess.Rollback() | |||
| if errRollback != nil { | |||
| log.Error("UpdateTeamUnits sess.Rollback: %v", errRollback) | |||
| } | |||
| return err | |||
| } | |||
| @@ -242,10 +242,10 @@ func TestGetOrgByName(t *testing.T) { | |||
| assert.EqualValues(t, 3, org.ID) | |||
| assert.Equal(t, "user3", org.Name) | |||
| org, err = GetOrgByName("user2") // user2 is an individual | |||
| _, err = GetOrgByName("user2") // user2 is an individual | |||
| assert.True(t, IsErrOrgNotExist(err)) | |||
| org, err = GetOrgByName("") // corner case | |||
| _, err = GetOrgByName("") // corner case | |||
| assert.True(t, IsErrOrgNotExist(err)) | |||
| } | |||
| @@ -499,7 +499,7 @@ func TestAccessibleReposEnv_CountRepos(t *testing.T) { | |||
| func TestAccessibleReposEnv_RepoIDs(t *testing.T) { | |||
| assert.NoError(t, PrepareTestDatabase()) | |||
| org := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User) | |||
| testSuccess := func(userID, page, pageSize int64, expectedRepoIDs []int64) { | |||
| testSuccess := func(userID, _, pageSize int64, expectedRepoIDs []int64) { | |||
| env, err := org.AccessibleReposEnv(userID) | |||
| assert.NoError(t, err) | |||
| repoIDs, err := env.RepoIDs(1, 100) | |||
| @@ -192,15 +192,19 @@ func (pr *PullRequest) apiFormat(e Engine) *api.PullRequest { | |||
| } | |||
| } | |||
| if baseBranch, err = pr.BaseRepo.GetBranch(pr.BaseBranch); err != nil { | |||
| log.Error("pr.BaseRepo.GetBranch[%d]: %v", pr.BaseBranch, err) | |||
| return nil | |||
| } | |||
| if baseCommit, err = baseBranch.GetCommit(); err != nil { | |||
| log.Error("baseBranch.GetCommit[%d]: %v", pr.ID, err) | |||
| return nil | |||
| } | |||
| if headBranch, err = pr.HeadRepo.GetBranch(pr.HeadBranch); err != nil { | |||
| log.Error("pr.HeadRepo.GetBranch[%d]: %v", pr.HeadBranch, err) | |||
| return nil | |||
| } | |||
| if headCommit, err = headBranch.GetCommit(); err != nil { | |||
| log.Error("headBranch.GetCommit[%d]: %v", pr.ID, err) | |||
| return nil | |||
| } | |||
| apiBaseBranchInfo := &api.PRBranchInfo{ | |||
| @@ -218,7 +222,10 @@ func (pr *PullRequest) apiFormat(e Engine) *api.PullRequest { | |||
| Repository: pr.HeadRepo.innerAPIFormat(e, AccessModeNone, false), | |||
| } | |||
| pr.Issue.loadRepo(e) | |||
| if err = pr.Issue.loadRepo(e); err != nil { | |||
| log.Error("pr.Issue.loadRepo[%d]: %v", pr.ID, err) | |||
| return nil | |||
| } | |||
| apiPullRequest := &api.PullRequest{ | |||
| ID: pr.ID, | |||
| @@ -420,7 +427,11 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer RemoveTemporaryPath(tmpBasePath) | |||
| defer func() { | |||
| if err := RemoveTemporaryPath(tmpBasePath); err != nil { | |||
| log.Error("Merge: RemoveTemporaryPath: %s", err) | |||
| } | |||
| }() | |||
| headRepoPath := RepoPath(pr.HeadUserName, pr.HeadRepo.Name) | |||
| @@ -1142,7 +1153,9 @@ func (pr *PullRequest) UpdatePatch() (err error) { | |||
| return fmt.Errorf("AddRemote: %v", err) | |||
| } | |||
| defer func() { | |||
| headGitRepo.RemoveRemote(tmpRemote) | |||
| if err := headGitRepo.RemoveRemote(tmpRemote); err != nil { | |||
| log.Error("UpdatePatch: RemoveRemote: %s", err) | |||
| } | |||
| }() | |||
| pr.MergeBase, _, err = headGitRepo.GetMergeBase(tmpRemote, pr.BaseBranch, pr.HeadBranch) | |||
| if err != nil { | |||
| @@ -1180,7 +1193,11 @@ func (pr *PullRequest) PushToBaseRepo() (err error) { | |||
| return fmt.Errorf("headGitRepo.AddRemote: %v", err) | |||
| } | |||
| // Make sure to remove the remote even if the push fails | |||
| defer headGitRepo.RemoveRemote(tmpRemoteName) | |||
| defer func() { | |||
| if err := headGitRepo.RemoveRemote(tmpRemoteName); err != nil { | |||
| log.Error("PushToBaseRepo: RemoveRemote: %s", err) | |||
| } | |||
| }() | |||
| headFile := pr.GetGitRefName() | |||
| @@ -94,7 +94,7 @@ func TestGetUnmergedPullRequest(t *testing.T) { | |||
| assert.NoError(t, err) | |||
| assert.Equal(t, int64(2), pr.ID) | |||
| pr, err = GetUnmergedPullRequest(1, 9223372036854775807, "branch1", "master") | |||
| _, err = GetUnmergedPullRequest(1, 9223372036854775807, "branch1", "master") | |||
| assert.Error(t, err) | |||
| assert.True(t, IsErrPullRequestNotExist(err)) | |||
| } | |||
| @@ -128,7 +128,7 @@ func TestGetPullRequestByIndex(t *testing.T) { | |||
| assert.Equal(t, int64(1), pr.BaseRepoID) | |||
| assert.Equal(t, int64(2), pr.Index) | |||
| pr, err = GetPullRequestByIndex(9223372036854775807, 9223372036854775807) | |||
| _, err = GetPullRequestByIndex(9223372036854775807, 9223372036854775807) | |||
| assert.Error(t, err) | |||
| assert.True(t, IsErrPullRequestNotExist(err)) | |||
| } | |||
| @@ -151,7 +151,7 @@ func TestGetPullRequestByIssueID(t *testing.T) { | |||
| assert.NoError(t, err) | |||
| assert.Equal(t, int64(2), pr.IssueID) | |||
| pr, err = GetPullRequestByIssueID(9223372036854775807) | |||
| _, err = GetPullRequestByIssueID(9223372036854775807) | |||
| assert.Error(t, err) | |||
| assert.True(t, IsErrPullRequestNotExist(err)) | |||
| } | |||
| @@ -50,12 +50,12 @@ func (r *Release) loadAttributes(e Engine) error { | |||
| } | |||
| } | |||
| if r.Publisher == nil { | |||
| r.Publisher, err = GetUserByID(r.PublisherID) | |||
| r.Publisher, err = getUserByID(e, r.PublisherID) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return GetReleaseAttachments(r) | |||
| return getReleaseAttachments(e, r) | |||
| } | |||
| // LoadAttributes load repo and publisher attributes for a release | |||
| @@ -316,6 +316,10 @@ func (s releaseMetaSearch) Less(i, j int) bool { | |||
| // GetReleaseAttachments retrieves the attachments for releases | |||
| func GetReleaseAttachments(rels ...*Release) (err error) { | |||
| return getReleaseAttachments(x, rels...) | |||
| } | |||
| func getReleaseAttachments(e Engine, rels ...*Release) (err error) { | |||
| if len(rels) == 0 { | |||
| return | |||
| } | |||
| @@ -335,11 +339,10 @@ func GetReleaseAttachments(rels ...*Release) (err error) { | |||
| sort.Sort(sortedRels) | |||
| // Select attachments | |||
| err = x. | |||
| err = e. | |||
| Asc("release_id"). | |||
| In("release_id", sortedRels.ID). | |||
| Find(&attachments, Attachment{}) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| @@ -354,7 +357,6 @@ func GetReleaseAttachments(rels ...*Release) (err error) { | |||
| } | |||
| return | |||
| } | |||
| type releaseSorter struct { | |||
| @@ -493,7 +495,7 @@ func SyncReleasesWithTags(repo *Repository, gitRepo *git.Repository) error { | |||
| return fmt.Errorf("GetTagCommitID: %v", err) | |||
| } | |||
| if git.IsErrNotExist(err) || commitID != rel.Sha1 { | |||
| if err := pushUpdateDeleteTag(repo, gitRepo, rel.TagName); err != nil { | |||
| if err := pushUpdateDeleteTag(repo, rel.TagName); err != nil { | |||
| return fmt.Errorf("pushUpdateDeleteTag: %v", err) | |||
| } | |||
| } else { | |||
| @@ -20,7 +20,6 @@ import ( | |||
| "os" | |||
| "path" | |||
| "path/filepath" | |||
| "regexp" | |||
| "sort" | |||
| "strconv" | |||
| "strings" | |||
| @@ -744,10 +743,6 @@ func (repo *Repository) getUsersWithAccessMode(e Engine, mode AccessMode) (_ []* | |||
| return users, nil | |||
| } | |||
| var ( | |||
| descPattern = regexp.MustCompile(`https?://\S+`) | |||
| ) | |||
| // DescriptionHTML does special handles to description and return HTML string. | |||
| func (repo *Repository) DescriptionHTML() template.HTML { | |||
| desc, err := markup.RenderDescriptionHTML([]byte(repo.Description), repo.HTMLURL(), repo.ComposeMetas()) | |||
| @@ -1333,11 +1328,9 @@ func createRepository(e *xorm.Session, doer, u *User, repo *Repository) (err err | |||
| return fmt.Errorf("prepareWebhooks: %v", err) | |||
| } | |||
| go HookQueue.Add(repo.ID) | |||
| } else { | |||
| } else if err = repo.recalculateAccesses(e); err != nil { | |||
| // Organization automatically called this in addRepository method. | |||
| if err = repo.recalculateAccesses(e); err != nil { | |||
| return fmt.Errorf("recalculateAccesses: %v", err) | |||
| } | |||
| return fmt.Errorf("recalculateAccesses: %v", err) | |||
| } | |||
| if setting.Service.AutoWatchNewRepos { | |||
| @@ -1512,11 +1505,9 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) error | |||
| } else if err = t.addRepository(sess, repo); err != nil { | |||
| return fmt.Errorf("add to owner team: %v", err) | |||
| } | |||
| } else { | |||
| } else if err = repo.recalculateAccesses(sess); err != nil { | |||
| // Organization called this in addRepository method. | |||
| if err = repo.recalculateAccesses(sess); err != nil { | |||
| return fmt.Errorf("recalculateAccesses: %v", err) | |||
| } | |||
| return fmt.Errorf("recalculateAccesses: %v", err) | |||
| } | |||
| // Update repository count. | |||
| @@ -1864,7 +1855,10 @@ func DeleteRepository(doer *User, uid, repoID int64) error { | |||
| repoPath := repo.repoPath(sess) | |||
| removeAllWithNotice(sess, "Delete repository files", repoPath) | |||
| repo.deleteWiki(sess) | |||
| err = repo.deleteWiki(sess) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Remove attachment files. | |||
| for i := range attachmentPaths { | |||
| @@ -2522,7 +2516,7 @@ func (repo *Repository) GetUserFork(userID int64) (*Repository, error) { | |||
| // CustomAvatarPath returns repository custom avatar file path. | |||
| func (repo *Repository) CustomAvatarPath() string { | |||
| // Avatar empty by default | |||
| if len(repo.Avatar) <= 0 { | |||
| if len(repo.Avatar) == 0 { | |||
| return "" | |||
| } | |||
| return filepath.Join(setting.RepositoryAvatarUploadPath, repo.Avatar) | |||
| @@ -2562,10 +2556,7 @@ func (repo *Repository) generateRandomAvatar(e Engine) error { | |||
| // RemoveRandomAvatars removes the randomly generated avatars that were created for repositories | |||
| func RemoveRandomAvatars() error { | |||
| var ( | |||
| err error | |||
| ) | |||
| err = x. | |||
| return x. | |||
| Where("id > 0").BufferSize(setting.IterateBufferSize). | |||
| Iterate(new(Repository), | |||
| func(idx int, bean interface{}) error { | |||
| @@ -2576,7 +2567,6 @@ func RemoveRandomAvatars() error { | |||
| } | |||
| return nil | |||
| }) | |||
| return err | |||
| } | |||
| // RelAvatarLink returns a relative link to the repository's avatar. | |||
| @@ -2587,7 +2577,7 @@ func (repo *Repository) RelAvatarLink() string { | |||
| func (repo *Repository) relAvatarLink(e Engine) string { | |||
| // If no avatar - path is empty | |||
| avatarPath := repo.CustomAvatarPath() | |||
| if len(avatarPath) <= 0 || !com.IsFile(avatarPath) { | |||
| if len(avatarPath) == 0 || !com.IsFile(avatarPath) { | |||
| switch mode := setting.RepositoryAvatarFallback; mode { | |||
| case "image": | |||
| return setting.RepositoryAvatarFallbackImage | |||
| @@ -114,7 +114,7 @@ func GetActivityStatsTopAuthors(repo *Repository, timeFrom time.Time, count int) | |||
| v = append(v, u) | |||
| } | |||
| sort.Slice(v[:], func(i, j int) bool { | |||
| sort.Slice(v, func(i, j int) bool { | |||
| return v[i].Commits < v[j].Commits | |||
| }) | |||
| @@ -75,7 +75,11 @@ func (repo *Repository) CreateNewBranch(doer *User, oldBranchName, branchName st | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer RemoveTemporaryPath(basePath) | |||
| defer func() { | |||
| if err := RemoveTemporaryPath(basePath); err != nil { | |||
| log.Error("CreateNewBranch: RemoveTemporaryPath: %s", err) | |||
| } | |||
| }() | |||
| if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{ | |||
| Bare: true, | |||
| @@ -117,7 +121,11 @@ func (repo *Repository) CreateNewBranchFromCommit(doer *User, commit, branchName | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer RemoveTemporaryPath(basePath) | |||
| defer func() { | |||
| if err := RemoveTemporaryPath(basePath); err != nil { | |||
| log.Error("CreateNewBranchFromCommit: RemoveTemporaryPath: %s", err) | |||
| } | |||
| }() | |||
| if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{ | |||
| Bare: true, | |||
| @@ -142,7 +142,7 @@ func (repo *Repository) ChangeCollaborationAccessMode(uid int64, mode AccessMode | |||
| } | |||
| if _, err = sess. | |||
| Id(collaboration.ID). | |||
| ID(collaboration.ID). | |||
| Cols("mode"). | |||
| Update(collaboration); err != nil { | |||
| return fmt.Errorf("update collaboration: %v", err) | |||
| @@ -148,19 +148,19 @@ func (s SearchOrderBy) String() string { | |||
| // Strings for sorting result | |||
| const ( | |||
| SearchOrderByAlphabetically SearchOrderBy = "name ASC" | |||
| SearchOrderByAlphabeticallyReverse = "name DESC" | |||
| SearchOrderByLeastUpdated = "updated_unix ASC" | |||
| SearchOrderByRecentUpdated = "updated_unix DESC" | |||
| SearchOrderByOldest = "created_unix ASC" | |||
| SearchOrderByNewest = "created_unix DESC" | |||
| SearchOrderBySize = "size ASC" | |||
| SearchOrderBySizeReverse = "size DESC" | |||
| SearchOrderByID = "id ASC" | |||
| SearchOrderByIDReverse = "id DESC" | |||
| SearchOrderByStars = "num_stars ASC" | |||
| SearchOrderByStarsReverse = "num_stars DESC" | |||
| SearchOrderByForks = "num_forks ASC" | |||
| SearchOrderByForksReverse = "num_forks DESC" | |||
| SearchOrderByAlphabeticallyReverse SearchOrderBy = "name DESC" | |||
| SearchOrderByLeastUpdated SearchOrderBy = "updated_unix ASC" | |||
| SearchOrderByRecentUpdated SearchOrderBy = "updated_unix DESC" | |||
| SearchOrderByOldest SearchOrderBy = "created_unix ASC" | |||
| SearchOrderByNewest SearchOrderBy = "created_unix DESC" | |||
| SearchOrderBySize SearchOrderBy = "size ASC" | |||
| SearchOrderBySizeReverse SearchOrderBy = "size DESC" | |||
| SearchOrderByID SearchOrderBy = "id ASC" | |||
| SearchOrderByIDReverse SearchOrderBy = "id DESC" | |||
| SearchOrderByStars SearchOrderBy = "num_stars ASC" | |||
| SearchOrderByStarsReverse SearchOrderBy = "num_stars DESC" | |||
| SearchOrderByForks SearchOrderBy = "num_forks ASC" | |||
| SearchOrderByForksReverse SearchOrderBy = "num_forks DESC" | |||
| ) | |||
| // SearchRepositoryByName takes keyword and part of repository name to search, | |||
| @@ -4,7 +4,10 @@ | |||
| package models | |||
| import "strings" | |||
| import ( | |||
| "code.gitea.io/gitea/modules/log" | |||
| "strings" | |||
| ) | |||
| // RepoRedirect represents that a repo name should be redirected to another | |||
| type RepoRedirect struct { | |||
| @@ -38,7 +41,10 @@ func NewRepoRedirect(ownerID, repoID int64, oldRepoName, newRepoName string) err | |||
| } | |||
| if err := deleteRepoRedirect(sess, ownerID, newRepoName); err != nil { | |||
| sess.Rollback() | |||
| errRollback := sess.Rollback() | |||
| if errRollback != nil { | |||
| log.Error("NewRepoRedirect sess.Rollback: %v", errRollback) | |||
| } | |||
| return err | |||
| } | |||
| @@ -47,7 +53,10 @@ func NewRepoRedirect(ownerID, repoID int64, oldRepoName, newRepoName string) err | |||
| LowerName: oldRepoName, | |||
| RedirectRepoID: repoID, | |||
| }); err != nil { | |||
| sess.Rollback() | |||
| errRollback := sess.Rollback() | |||
| if errRollback != nil { | |||
| log.Error("NewRepoRedirect sess.Rollback: %v", errRollback) | |||
| } | |||
| return err | |||
| } | |||
| return sess.Commit() | |||
| @@ -142,7 +142,7 @@ func parseKeyString(content string) (string, error) { | |||
| if continuationLine || strings.ContainsAny(line, ":-") { | |||
| continuationLine = strings.HasSuffix(line, "\\") | |||
| } else { | |||
| keyContent = keyContent + line | |||
| keyContent += line | |||
| } | |||
| } | |||
| @@ -392,7 +392,7 @@ func addKey(e Engine, key *PublicKey) (err error) { | |||
| } | |||
| // AddPublicKey adds new public key to database and authorized_keys file. | |||
| func AddPublicKey(ownerID int64, name, content string, LoginSourceID int64) (*PublicKey, error) { | |||
| func AddPublicKey(ownerID int64, name, content string, loginSourceID int64) (*PublicKey, error) { | |||
| log.Trace(content) | |||
| fingerprint, err := calcFingerprint(content) | |||
| @@ -427,7 +427,7 @@ func AddPublicKey(ownerID int64, name, content string, LoginSourceID int64) (*Pu | |||
| Content: content, | |||
| Mode: AccessModeWrite, | |||
| Type: KeyTypeUser, | |||
| LoginSourceID: LoginSourceID, | |||
| LoginSourceID: loginSourceID, | |||
| } | |||
| if err = addKey(sess, key); err != nil { | |||
| return nil, fmt.Errorf("addKey: %v", err) | |||
| @@ -491,10 +491,10 @@ func ListPublicKeys(uid int64) ([]*PublicKey, error) { | |||
| } | |||
| // ListPublicLdapSSHKeys returns a list of synchronized public ldap ssh keys belongs to given user and login source. | |||
| func ListPublicLdapSSHKeys(uid int64, LoginSourceID int64) ([]*PublicKey, error) { | |||
| func ListPublicLdapSSHKeys(uid int64, loginSourceID int64) ([]*PublicKey, error) { | |||
| keys := make([]*PublicKey, 0, 5) | |||
| return keys, x. | |||
| Where("owner_id = ? AND login_source_id = ?", uid, LoginSourceID). | |||
| Where("owner_id = ? AND login_source_id = ?", uid, loginSourceID). | |||
| Find(&keys) | |||
| } | |||
| @@ -87,7 +87,7 @@ func (status *CommitStatus) loadRepo(e Engine) (err error) { | |||
| // APIURL returns the absolute APIURL to this commit-status. | |||
| func (status *CommitStatus) APIURL() string { | |||
| status.loadRepo(x) | |||
| _ = status.loadRepo(x) | |||
| return fmt.Sprintf("%sapi/v1/%s/statuses/%s", | |||
| setting.AppURL, status.Repo.FullName(), status.SHA) | |||
| } | |||
| @@ -95,7 +95,7 @@ func (status *CommitStatus) APIURL() string { | |||
| // APIFormat assumes some fields assigned with values: | |||
| // Required - Repo, Creator | |||
| func (status *CommitStatus) APIFormat() *api.Status { | |||
| status.loadRepo(x) | |||
| _ = status.loadRepo(x) | |||
| apiStatus := &api.Status{ | |||
| Created: status.CreatedUnix.AsTime(), | |||
| Updated: status.CreatedUnix.AsTime(), | |||
| @@ -219,7 +219,9 @@ func newCommitStatus(sess *xorm.Session, opts NewCommitStatusOptions) error { | |||
| } | |||
| has, err := sess.Desc("index").Limit(1).Get(lastCommitStatus) | |||
| if err != nil { | |||
| sess.Rollback() | |||
| if err := sess.Rollback(); err != nil { | |||
| log.Error("newCommitStatus: sess.Rollback: %v", err) | |||
| } | |||
| return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err) | |||
| } | |||
| if has { | |||
| @@ -231,7 +233,9 @@ func newCommitStatus(sess *xorm.Session, opts NewCommitStatusOptions) error { | |||
| // Insert new CommitStatus | |||
| if _, err = sess.Insert(opts.CommitStatus); err != nil { | |||
| sess.Rollback() | |||
| if err := sess.Rollback(); err != nil { | |||
| log.Error("newCommitStatus: sess.Rollback: %v", err) | |||
| } | |||
| return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err) | |||
| } | |||
| @@ -36,11 +36,11 @@ func TestGetAccessTokenBySHA(t *testing.T) { | |||
| assert.Equal(t, "2b3668e11cb82d3af8c6e4524fc7841297668f5008d1626f0ad3417e9fa39af84c268248b78c481daa7e5dc437784003494f", token.TokenHash) | |||
| assert.Equal(t, "e4efbf36", token.TokenLastEight) | |||
| token, err = GetAccessTokenBySHA("notahash") | |||
| _, err = GetAccessTokenBySHA("notahash") | |||
| assert.Error(t, err) | |||
| assert.True(t, IsErrAccessTokenNotExist(err)) | |||
| token, err = GetAccessTokenBySHA("") | |||
| _, err = GetAccessTokenBySHA("") | |||
| assert.Error(t, err) | |||
| assert.True(t, IsErrAccessTokenEmpty(err)) | |||
| } | |||
| @@ -84,7 +84,7 @@ func PushUpdate(branch string, opt PushUpdateOptions) error { | |||
| return nil | |||
| } | |||
| func pushUpdateDeleteTag(repo *Repository, gitRepo *git.Repository, tagName string) error { | |||
| func pushUpdateDeleteTag(repo *Repository, tagName string) error { | |||
| rel, err := GetRelease(repo.ID, tagName) | |||
| if err != nil { | |||
| if IsErrReleaseNotExist(err) { | |||
| @@ -223,7 +223,7 @@ func pushUpdate(opts PushUpdateOptions) (repo *Repository, err error) { | |||
| // If is tag reference | |||
| tagName := opts.RefFullName[len(git.TagPrefix):] | |||
| if isDelRef { | |||
| err = pushUpdateDeleteTag(repo, gitRepo, tagName) | |||
| err = pushUpdateDeleteTag(repo, tagName) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("pushUpdateDeleteTag: %v", err) | |||
| } | |||
| @@ -1072,7 +1072,10 @@ func deleteUser(e *xorm.Session, u *User) error { | |||
| if _, err = e.Delete(&PublicKey{OwnerID: u.ID}); err != nil { | |||
| return fmt.Errorf("deletePublicKeys: %v", err) | |||
| } | |||
| rewriteAllPublicKeys(e) | |||
| err = rewriteAllPublicKeys(e) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // ***** END: PublicKey ***** | |||
| // ***** START: GPGPublicKey ***** | |||
| @@ -1401,8 +1404,7 @@ func (opts *SearchUserOptions) toConds() builder.Cond { | |||
| } else { | |||
| exprCond = builder.Expr("org_user.org_id = \"user\".id") | |||
| } | |||
| var accessCond = builder.NewCond() | |||
| accessCond = builder.Or( | |||
| accessCond := builder.Or( | |||
| builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.OwnerID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))), | |||
| builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited)) | |||
| cond = cond.And(accessCond) | |||
| @@ -1512,9 +1514,9 @@ func deleteKeysMarkedForDeletion(keys []string) (bool, error) { | |||
| } | |||
| // addLdapSSHPublicKeys add a users public keys. Returns true if there are changes. | |||
| func addLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) bool { | |||
| func addLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool { | |||
| var sshKeysNeedUpdate bool | |||
| for _, sshKey := range SSHPublicKeys { | |||
| for _, sshKey := range sshPublicKeys { | |||
| _, _, _, _, err := ssh.ParseAuthorizedKey([]byte(sshKey)) | |||
| if err == nil { | |||
| sshKeyName := fmt.Sprintf("%s-%s", s.Name, sshKey[0:40]) | |||
| @@ -1536,7 +1538,7 @@ func addLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) boo | |||
| } | |||
| // synchronizeLdapSSHPublicKeys updates a users public keys. Returns true if there are changes. | |||
| func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) bool { | |||
| func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool { | |||
| var sshKeysNeedUpdate bool | |||
| log.Trace("synchronizeLdapSSHPublicKeys[%s]: Handling LDAP Public SSH Key synchronization for user %s", s.Name, usr.Name) | |||
| @@ -1554,7 +1556,7 @@ func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []str | |||
| // Get Public Keys from LDAP and skip duplicate keys | |||
| var ldapKeys []string | |||
| for _, v := range SSHPublicKeys { | |||
| for _, v := range sshPublicKeys { | |||
| sshKeySplit := strings.Split(v, " ") | |||
| if len(sshKeySplit) > 1 { | |||
| ldapKey := strings.Join(sshKeySplit[:2], " ") | |||
| @@ -1634,9 +1636,13 @@ func SyncExternalUsers() { | |||
| // Find all users with this login type | |||
| var users []*User | |||
| x.Where("login_type = ?", LoginLDAP). | |||
| err = x.Where("login_type = ?", LoginLDAP). | |||
| And("login_source = ?", s.ID). | |||
| Find(&users) | |||
| if err != nil { | |||
| log.Error("SyncExternalUsers: %v", err) | |||
| return | |||
| } | |||
| sr := s.LDAP().SearchEntries() | |||
| for _, su := range sr { | |||
| @@ -1694,7 +1700,7 @@ func SyncExternalUsers() { | |||
| // Check if user data has changed | |||
| if (len(s.LDAP().AdminFilter) > 0 && usr.IsAdmin != su.IsAdmin) || | |||
| strings.ToLower(usr.Email) != strings.ToLower(su.Mail) || | |||
| !strings.EqualFold(usr.Email, su.Mail) || | |||
| usr.FullName != fullName || | |||
| !usr.IsActive { | |||
| @@ -1718,7 +1724,10 @@ func SyncExternalUsers() { | |||
| // Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed | |||
| if sshKeysNeedUpdate { | |||
| RewriteAllPublicKeys() | |||
| err = RewriteAllPublicKeys() | |||
| if err != nil { | |||
| log.Error("RewriteAllPublicKeys: %v", err) | |||
| } | |||
| } | |||
| // Deactivate users not present in LDAP | |||
| @@ -134,7 +134,7 @@ func (email *EmailAddress) Activate() error { | |||
| email.IsActivated = true | |||
| if _, err := sess. | |||
| Id(email.ID). | |||
| ID(email.ID). | |||
| Cols("is_activated"). | |||
| Update(email); err != nil { | |||
| return err | |||
| @@ -31,12 +31,12 @@ func TestGetUserOpenIDs(t *testing.T) { | |||
| func TestGetUserByOpenID(t *testing.T) { | |||
| assert.NoError(t, PrepareTestDatabase()) | |||
| user, err := GetUserByOpenID("https://unknown") | |||
| _, err := GetUserByOpenID("https://unknown") | |||
| if assert.Error(t, err) { | |||
| assert.True(t, IsErrUserNotExist(err)) | |||
| } | |||
| user, err = GetUserByOpenID("https://user1.domain1.tld") | |||
| user, err := GetUserByOpenID("https://user1.domain1.tld") | |||
| if assert.NoError(t, err) { | |||
| assert.Equal(t, user.ID, int64(1)) | |||
| } | |||
| @@ -700,7 +700,10 @@ func prepareWebhook(e Engine, w *Webhook, repo *Repository, event HookEventType, | |||
| log.Error("prepareWebhooks.JSONPayload: %v", err) | |||
| } | |||
| sig := hmac.New(sha256.New, []byte(w.Secret)) | |||
| sig.Write(data) | |||
| _, err = sig.Write(data) | |||
| if err != nil { | |||
| log.Error("prepareWebhooks.sigWrite: %v", err) | |||
| } | |||
| signature = hex.EncodeToString(sig.Sum(nil)) | |||
| } | |||
| @@ -930,8 +933,7 @@ func InitDeliverHooks() { | |||
| return nil, err | |||
| } | |||
| conn.SetDeadline(time.Now().Add(timeout)) | |||
| return conn, nil | |||
| return conn, conn.SetDeadline(time.Now().Add(timeout)) | |||
| }, | |||
| }, | |||
| @@ -490,7 +490,7 @@ func getDiscordReleasePayload(p *api.ReleasePayload, meta *DiscordMeta) (*Discor | |||
| Embeds: []DiscordEmbed{ | |||
| { | |||
| Title: title, | |||
| Description: fmt.Sprintf("%s", p.Release.Note), | |||
| Description: p.Release.Note, | |||
| URL: url, | |||
| Color: color, | |||
| Author: DiscordEmbedAuthor{ | |||
| @@ -115,7 +115,11 @@ func (repo *Repository) updateWikiPage(doer *User, oldWikiName, newWikiName, con | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer RemoveTemporaryPath(basePath) | |||
| defer func() { | |||
| if err := RemoveTemporaryPath(basePath); err != nil { | |||
| log.Error("Merge: RemoveTemporaryPath: %s", err) | |||
| } | |||
| }() | |||
| cloneOpts := git.CloneRepoOptions{ | |||
| Bare: true, | |||
| @@ -246,7 +250,11 @@ func (repo *Repository) DeleteWikiPage(doer *User, wikiName string) (err error) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer RemoveTemporaryPath(basePath) | |||
| defer func() { | |||
| if err := RemoveTemporaryPath(basePath); err != nil { | |||
| log.Error("Merge: RemoveTemporaryPath: %s", err) | |||
| } | |||
| }() | |||
| if err := git.Clone(repo.WikiPath(), basePath, git.CloneRepoOptions{ | |||
| Bare: true, | |||
| @@ -214,10 +214,8 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool) | |||
| if err = models.UpdateAccessToken(token); err != nil { | |||
| log.Error("UpdateAccessToken: %v", err) | |||
| } | |||
| } else { | |||
| if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) { | |||
| log.Error("GetAccessTokenBySha: %v", err) | |||
| } | |||
| } else if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) { | |||
| log.Error("GetAccessTokenBySha: %v", err) | |||
| } | |||
| if u == nil { | |||
| @@ -301,12 +299,6 @@ func GetInclude(field reflect.StructField) string { | |||
| return getRuleBody(field, "Include(") | |||
| } | |||
| // FIXME: struct contains a struct | |||
| func validateStruct(obj interface{}) binding.Errors { | |||
| return nil | |||
| } | |||
| func validate(errs binding.Errors, data map[string]interface{}, f Form, l macaron.Locale) binding.Errors { | |||
| if errs.Len() == 0 { | |||
| return errs | |||
| @@ -220,8 +220,7 @@ func GetDefaultProfileURL(provider string) string { | |||
| // GetDefaultEmailURL return the default email url for the given provider | |||
| func GetDefaultEmailURL(provider string) string { | |||
| switch provider { | |||
| case "github": | |||
| if provider == "github" { | |||
| return github.EmailURL | |||
| } | |||
| return "" | |||
| @@ -39,7 +39,7 @@ func TestTimedDiscoveryCache(t *testing.T) { | |||
| t.Errorf("Expected nil, got %v", di) | |||
| } | |||
| // Sleep one second and try retrive again | |||
| // Sleep one second and try retrieve again | |||
| time.Sleep(1 * time.Second) | |||
| if di := dc.Get("foo"); di != nil { | |||
| @@ -253,7 +253,7 @@ func (f UpdateThemeForm) IsThemeExists() bool { | |||
| var exists bool | |||
| for _, v := range setting.UI.Themes { | |||
| if strings.ToLower(v) == strings.ToLower(f.Theme) { | |||
| if strings.EqualFold(v, f.Theme) { | |||
| exists = true | |||
| break | |||
| } | |||
| @@ -44,21 +44,21 @@ var UTF8BOM = []byte{'\xef', '\xbb', '\xbf'} | |||
| // EncodeMD5 encodes string to md5 hex value. | |||
| func EncodeMD5(str string) string { | |||
| m := md5.New() | |||
| m.Write([]byte(str)) | |||
| _, _ = m.Write([]byte(str)) | |||
| return hex.EncodeToString(m.Sum(nil)) | |||
| } | |||
| // EncodeSha1 string to sha1 hex value. | |||
| func EncodeSha1(str string) string { | |||
| h := sha1.New() | |||
| h.Write([]byte(str)) | |||
| _, _ = h.Write([]byte(str)) | |||
| return hex.EncodeToString(h.Sum(nil)) | |||
| } | |||
| // EncodeSha256 string to sha1 hex value. | |||
| func EncodeSha256(str string) string { | |||
| h := sha256.New() | |||
| h.Write([]byte(str)) | |||
| _, _ = h.Write([]byte(str)) | |||
| return hex.EncodeToString(h.Sum(nil)) | |||
| } | |||
| @@ -193,7 +193,7 @@ func CreateTimeLimitCode(data string, minutes int, startInf interface{}) string | |||
| // create sha1 encode string | |||
| sh := sha1.New() | |||
| sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes))) | |||
| _, _ = sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes))) | |||
| encoded := hex.EncodeToString(sh.Sum(nil)) | |||
| code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded) | |||
| @@ -425,16 +425,6 @@ const ( | |||
| EByte = PByte * 1024 | |||
| ) | |||
| var bytesSizeTable = map[string]uint64{ | |||
| "b": Byte, | |||
| "kb": KByte, | |||
| "mb": MByte, | |||
| "gb": GByte, | |||
| "tb": TByte, | |||
| "pb": PByte, | |||
| "eb": EByte, | |||
| } | |||
| func logn(n, b float64) float64 { | |||
| return math.Log(n) / math.Log(b) | |||
| } | |||
| @@ -582,27 +572,27 @@ func IsTextFile(data []byte) bool { | |||
| if len(data) == 0 { | |||
| return true | |||
| } | |||
| return strings.Index(http.DetectContentType(data), "text/") != -1 | |||
| return strings.Contains(http.DetectContentType(data), "text/") | |||
| } | |||
| // IsImageFile detects if data is an image format | |||
| func IsImageFile(data []byte) bool { | |||
| return strings.Index(http.DetectContentType(data), "image/") != -1 | |||
| return strings.Contains(http.DetectContentType(data), "image/") | |||
| } | |||
| // IsPDFFile detects if data is a pdf format | |||
| func IsPDFFile(data []byte) bool { | |||
| return strings.Index(http.DetectContentType(data), "application/pdf") != -1 | |||
| return strings.Contains(http.DetectContentType(data), "application/pdf") | |||
| } | |||
| // IsVideoFile detects if data is an video format | |||
| func IsVideoFile(data []byte) bool { | |||
| return strings.Index(http.DetectContentType(data), "video/") != -1 | |||
| return strings.Contains(http.DetectContentType(data), "video/") | |||
| } | |||
| // IsAudioFile detects if data is an video format | |||
| func IsAudioFile(data []byte) bool { | |||
| return strings.Index(http.DetectContentType(data), "audio/") != -1 | |||
| return strings.Contains(http.DetectContentType(data), "audio/") | |||
| } | |||
| // EntryIcon returns the octicon class for displaying files/directories | |||
| @@ -287,20 +287,19 @@ func TestHtmlTimeSince(t *testing.T) { | |||
| } | |||
| func TestFileSize(t *testing.T) { | |||
| var size int64 | |||
| size = 512 | |||
| var size int64 = 512 | |||
| assert.Equal(t, "512B", FileSize(size)) | |||
| size = size * 1024 | |||
| size *= 1024 | |||
| assert.Equal(t, "512KB", FileSize(size)) | |||
| size = size * 1024 | |||
| size *= 1024 | |||
| assert.Equal(t, "512MB", FileSize(size)) | |||
| size = size * 1024 | |||
| size *= 1024 | |||
| assert.Equal(t, "512GB", FileSize(size)) | |||
| size = size * 1024 | |||
| size *= 1024 | |||
| assert.Equal(t, "512TB", FileSize(size)) | |||
| size = size * 1024 | |||
| size *= 1024 | |||
| assert.Equal(t, "512PB", FileSize(size)) | |||
| size = size * 4 | |||
| size *= 4 | |||
| assert.Equal(t, "2.0EB", FileSize(size)) | |||
| } | |||
| @@ -43,7 +43,10 @@ func GetInt(key string, getFunc func() (int, error)) (int, error) { | |||
| if value, err = getFunc(); err != nil { | |||
| return value, err | |||
| } | |||
| conn.Put(key, value, int64(setting.CacheService.TTL.Seconds())) | |||
| err = conn.Put(key, value, int64(setting.CacheService.TTL.Seconds())) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| } | |||
| switch value := conn.Get(key).(type) { | |||
| case int: | |||
| @@ -72,7 +75,10 @@ func GetInt64(key string, getFunc func() (int64, error)) (int64, error) { | |||
| if value, err = getFunc(); err != nil { | |||
| return value, err | |||
| } | |||
| conn.Put(key, value, int64(setting.CacheService.TTL.Seconds())) | |||
| err = conn.Put(key, value, int64(setting.CacheService.TTL.Seconds())) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| } | |||
| switch value := conn.Get(key).(type) { | |||
| case int64: | |||
| @@ -93,5 +99,5 @@ func Remove(key string) { | |||
| if conn == nil { | |||
| return | |||
| } | |||
| conn.Delete(key) | |||
| _ = conn.Delete(key) | |||
| } | |||
| @@ -130,7 +130,6 @@ func (ctx *Context) RedirectToFirst(location ...string) { | |||
| } | |||
| ctx.Redirect(setting.AppSubURL + "/") | |||
| return | |||
| } | |||
| // HTML calls Context.HTML and converts template name to string. | |||
| @@ -266,7 +265,7 @@ func Contexter() macaron.Handler { | |||
| } | |||
| c.Header().Set("Content-Type", "text/html") | |||
| c.WriteHeader(http.StatusOK) | |||
| c.Write([]byte(com.Expand(`<!doctype html> | |||
| _, _ = c.Write([]byte(com.Expand(`<!doctype html> | |||
| <html> | |||
| <head> | |||
| <meta name="go-import" content="{GoGetImport} git {CloneLink}"> | |||
| @@ -39,7 +39,7 @@ func (p *Pagination) AddParam(ctx *Context, paramKey string, ctxKey string) { | |||
| // GetParams returns the configured URL params | |||
| func (p *Pagination) GetParams() template.URL { | |||
| return template.URL(strings.Join(p.urlParams[:], "&")) | |||
| return template.URL(strings.Join(p.urlParams, "&")) | |||
| } | |||
| // SetDefaultParams sets common pagination params that are often used | |||
| @@ -455,15 +455,13 @@ func RepoAssignment() macaron.Handler { | |||
| ctx.Repo.PullRequest.BaseRepo = repo.BaseRepo | |||
| ctx.Repo.PullRequest.Allowed = true | |||
| ctx.Repo.PullRequest.HeadInfo = ctx.Repo.Owner.Name + ":" + ctx.Repo.BranchName | |||
| } else { | |||
| } else if repo.AllowsPulls() { | |||
| // Or, this is repository accepts pull requests between branches. | |||
| if repo.AllowsPulls() { | |||
| ctx.Data["BaseRepo"] = repo | |||
| ctx.Repo.PullRequest.BaseRepo = repo | |||
| ctx.Repo.PullRequest.Allowed = true | |||
| ctx.Repo.PullRequest.SameRepo = true | |||
| ctx.Repo.PullRequest.HeadInfo = ctx.Repo.BranchName | |||
| } | |||
| ctx.Data["BaseRepo"] = repo | |||
| ctx.Repo.PullRequest.BaseRepo = repo | |||
| ctx.Repo.PullRequest.Allowed = true | |||
| ctx.Repo.PullRequest.SameRepo = true | |||
| ctx.Repo.PullRequest.HeadInfo = ctx.Repo.BranchName | |||
| } | |||
| } | |||
| ctx.Data["PullRequestCtx"] = ctx.Repo.PullRequest | |||
| @@ -50,12 +50,12 @@ func (b *Blob) GetBlobContentBase64() (string, error) { | |||
| go func() { | |||
| _, err := io.Copy(encoder, dataRc) | |||
| encoder.Close() | |||
| _ = encoder.Close() | |||
| if err != nil { | |||
| pw.CloseWithError(err) | |||
| _ = pw.CloseWithError(err) | |||
| } else { | |||
| pw.Close() | |||
| _ = pw.Close() | |||
| } | |||
| }() | |||
| @@ -133,7 +133,7 @@ func (c *Commit) ParentCount() int { | |||
| func isImageFile(data []byte) (string, bool) { | |||
| contentType := http.DetectContentType(data) | |||
| if strings.Index(contentType, "image/") != -1 { | |||
| if strings.Contains(contentType, "image/") { | |||
| return contentType, true | |||
| } | |||
| return contentType, false | |||
| @@ -206,8 +206,7 @@ func CommitChanges(repoPath string, opts CommitChangesOptions) error { | |||
| } | |||
| func commitsCount(repoPath, revision, relpath string) (int64, error) { | |||
| var cmd *Command | |||
| cmd = NewCommand("rev-list", "--count") | |||
| cmd := NewCommand("rev-list", "--count") | |||
| cmd.AddArguments(revision) | |||
| if len(relpath) > 0 { | |||
| cmd.AddArguments("--", relpath) | |||
| @@ -263,7 +262,7 @@ type SearchCommitsOptions struct { | |||
| All bool | |||
| } | |||
| // NewSearchCommitsOptions contruct a SearchCommitsOption from a space-delimited search string | |||
| // NewSearchCommitsOptions construct a SearchCommitsOption from a space-delimited search string | |||
| func NewSearchCommitsOptions(searchString string, forAllRefs bool) SearchCommitsOptions { | |||
| var keywords, authors, committers []string | |||
| var after, before string | |||
| @@ -87,16 +87,6 @@ func getCommitTree(c *object.Commit, treePath string) (*object.Tree, error) { | |||
| return tree, nil | |||
| } | |||
| func getFullPath(treePath, path string) string { | |||
| if treePath != "" { | |||
| if path != "" { | |||
| return treePath + "/" + path | |||
| } | |||
| return treePath | |||
| } | |||
| return path | |||
| } | |||
| func getFileHashes(c *object.Commit, treePath string, paths []string) (map[string]plumbing.Hash, error) { | |||
| tree, err := getCommitTree(c, treePath) | |||
| if err == object.ErrDirectoryNotFound { | |||
| @@ -58,21 +58,21 @@ func (repo *Repository) parsePrettyFormatLogToList(logs []byte) (*list.List, err | |||
| // IsRepoURLAccessible checks if given repository URL is accessible. | |||
| func IsRepoURLAccessible(url string) bool { | |||
| _, err := NewCommand("ls-remote", "-q", "-h", url, "HEAD").Run() | |||
| if err != nil { | |||
| return false | |||
| } | |||
| return true | |||
| return err == nil | |||
| } | |||
| // InitRepository initializes a new Git repository. | |||
| func InitRepository(repoPath string, bare bool) error { | |||
| os.MkdirAll(repoPath, os.ModePerm) | |||
| err := os.MkdirAll(repoPath, os.ModePerm) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| cmd := NewCommand("init") | |||
| if bare { | |||
| cmd.AddArguments("--bare") | |||
| } | |||
| _, err := cmd.RunInDir(repoPath) | |||
| _, err = cmd.RunInDir(repoPath) | |||
| return err | |||
| } | |||
| @@ -29,10 +29,7 @@ func IsBranchExist(repoPath, name string) bool { | |||
| // IsBranchExist returns true if given branch exists in current repository. | |||
| func (repo *Repository) IsBranchExist(name string) bool { | |||
| _, err := repo.gogitRepo.Reference(plumbing.ReferenceName(BranchPrefix+name), true) | |||
| if err != nil { | |||
| return false | |||
| } | |||
| return true | |||
| return err == nil | |||
| } | |||
| // Branch represents a Git branch. | |||
| @@ -77,7 +74,7 @@ func (repo *Repository) GetBranches() ([]string, error) { | |||
| return nil, err | |||
| } | |||
| branches.ForEach(func(branch *plumbing.Reference) error { | |||
| _ = branches.ForEach(func(branch *plumbing.Reference) error { | |||
| branchNames = append(branchNames, strings.TrimPrefix(branch.Name().String(), BranchPrefix)) | |||
| return nil | |||
| }) | |||
| @@ -31,10 +31,7 @@ func (repo *Repository) GetRefCommitID(name string) (string, error) { | |||
| func (repo *Repository) IsCommitExist(name string) bool { | |||
| hash := plumbing.NewHash(name) | |||
| _, err := repo.gogitRepo.CommitObject(hash) | |||
| if err != nil { | |||
| return false | |||
| } | |||
| return true | |||
| return err == nil | |||
| } | |||
| // GetBranchCommitID returns last commit ID string of given branch. | |||
| @@ -13,6 +13,8 @@ import ( | |||
| "strconv" | |||
| "strings" | |||
| "time" | |||
| logger "code.gitea.io/gitea/modules/log" | |||
| ) | |||
| // CompareInfo represents needed information for comparing references. | |||
| @@ -55,7 +57,11 @@ func (repo *Repository) GetCompareInfo(basePath, baseBranch, headBranch string) | |||
| if err = repo.AddRemote(tmpRemote, basePath, true); err != nil { | |||
| return nil, fmt.Errorf("AddRemote: %v", err) | |||
| } | |||
| defer repo.RemoveRemote(tmpRemote) | |||
| defer func() { | |||
| if err := repo.RemoveRemote(tmpRemote); err != nil { | |||
| logger.Error("GetPullRequestInfo: RemoveRemote: %v", err) | |||
| } | |||
| }() | |||
| } | |||
| compareInfo := new(CompareInfo) | |||
| @@ -24,10 +24,7 @@ func IsTagExist(repoPath, name string) bool { | |||
| // IsTagExist returns true if given tag exists in the repository. | |||
| func (repo *Repository) IsTagExist(name string) bool { | |||
| _, err := repo.gogitRepo.Reference(plumbing.ReferenceName(TagPrefix+name), true) | |||
| if err != nil { | |||
| return false | |||
| } | |||
| return true | |||
| return err == nil | |||
| } | |||
| // CreateTag create one tag in the repository | |||
| @@ -221,7 +218,7 @@ func (repo *Repository) GetTags() ([]string, error) { | |||
| return nil, err | |||
| } | |||
| tags.ForEach(func(tag *plumbing.Reference) error { | |||
| _ = tags.ForEach(func(tag *plumbing.Reference) error { | |||
| tagNames = append(tagNames, strings.TrimPrefix(tag.Name().String(), TagPrefix)) | |||
| return nil | |||
| }) | |||
| @@ -7,7 +7,6 @@ package git | |||
| import ( | |||
| "fmt" | |||
| "os" | |||
| "path/filepath" | |||
| "strings" | |||
| "sync" | |||
| ) | |||
| @@ -75,13 +74,6 @@ func concatenateError(err error, stderr string) error { | |||
| return fmt.Errorf("%v - %s", err, stderr) | |||
| } | |||
| // If the object is stored in its own file (i.e not in a pack file), | |||
| // this function returns the full path to the object file. | |||
| // It does not test if the file exists. | |||
| func filepathFromSHA1(rootdir, sha1 string) string { | |||
| return filepath.Join(rootdir, "objects", sha1[:2], sha1[2:]) | |||
| } | |||
| // RefEndName return the end name of a ref name | |||
| func RefEndName(refStr string) string { | |||
| if strings.HasPrefix(refStr, BranchPrefix) { | |||
| @@ -74,7 +74,6 @@ func (wp *WriterPool) Put(w *gzip.Writer) { | |||
| } | |||
| var writerPool WriterPool | |||
| var regex regexp.Regexp | |||
| // Options represents the configuration for the gzip middleware | |||
| type Options struct { | |||
| @@ -116,7 +115,7 @@ func Middleware(options ...Options) macaron.Handler { | |||
| if rangeHdr := ctx.Req.Header.Get(rangeHeader); rangeHdr != "" { | |||
| match := regex.FindStringSubmatch(rangeHdr) | |||
| if match != nil && len(match) > 1 { | |||
| if len(match) > 1 { | |||
| return | |||
| } | |||
| } | |||
| @@ -270,9 +269,8 @@ func (proxy *ProxyResponseWriter) Close() error { | |||
| if proxy.writer == nil { | |||
| err := proxy.startPlain() | |||
| if err != nil { | |||
| err = fmt.Errorf("GzipMiddleware: write to regular responseWriter at close gets error: %q", err.Error()) | |||
| return fmt.Errorf("GzipMiddleware: write to regular responseWriter at close gets error: %q", err.Error()) | |||
| } | |||
| } | |||
| @@ -263,7 +263,7 @@ func (r *Request) getResponse() (*http.Response, error) { | |||
| } | |||
| if r.req.Method == "GET" && len(paramBody) > 0 { | |||
| if strings.Index(r.url, "?") != -1 { | |||
| if strings.Contains(r.url, "?") { | |||
| r.url += "&" + paramBody | |||
| } else { | |||
| r.url = r.url + "?" + paramBody | |||
| @@ -290,10 +290,13 @@ func (r *Request) getResponse() (*http.Response, error) { | |||
| } | |||
| } | |||
| for k, v := range r.params { | |||
| bodyWriter.WriteField(k, v) | |||
| err := bodyWriter.WriteField(k, v) | |||
| if err != nil { | |||
| log.Fatal(err) | |||
| } | |||
| } | |||
| bodyWriter.Close() | |||
| pw.Close() | |||
| _ = bodyWriter.Close() | |||
| _ = pw.Close() | |||
| }() | |||
| r.Header("Content-Type", bodyWriter.FormDataContentType()) | |||
| r.req.Body = ioutil.NopCloser(pr) | |||
| @@ -323,18 +326,15 @@ func (r *Request) getResponse() (*http.Response, error) { | |||
| Proxy: proxy, | |||
| Dial: TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout), | |||
| } | |||
| } else { | |||
| // if r.transport is *http.Transport then set the settings. | |||
| if t, ok := trans.(*http.Transport); ok { | |||
| if t.TLSClientConfig == nil { | |||
| t.TLSClientConfig = r.setting.TLSClientConfig | |||
| } | |||
| if t.Proxy == nil { | |||
| t.Proxy = r.setting.Proxy | |||
| } | |||
| if t.Dial == nil { | |||
| t.Dial = TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout) | |||
| } | |||
| } else if t, ok := trans.(*http.Transport); ok { | |||
| if t.TLSClientConfig == nil { | |||
| t.TLSClientConfig = r.setting.TLSClientConfig | |||
| } | |||
| if t.Proxy == nil { | |||
| t.Proxy = r.setting.Proxy | |||
| } | |||
| if t.Dial == nil { | |||
| t.Dial = TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout) | |||
| } | |||
| } | |||
| @@ -461,7 +461,6 @@ func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, ad | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| conn.SetDeadline(time.Now().Add(rwTimeout)) | |||
| return conn, nil | |||
| return conn, conn.SetDeadline(time.Now().Add(rwTimeout)) | |||
| } | |||
| } | |||
| @@ -5,7 +5,6 @@ | |||
| package indexer | |||
| import ( | |||
| "fmt" | |||
| "os" | |||
| "strconv" | |||
| @@ -24,15 +23,6 @@ func indexerID(id int64) string { | |||
| return strconv.FormatInt(id, 36) | |||
| } | |||
| // idOfIndexerID the integer id associated with an indexer id | |||
| func idOfIndexerID(indexerID string) (int64, error) { | |||
| id, err := strconv.ParseInt(indexerID, 36, 64) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("Unexpected indexer ID %s: %v", indexerID, err) | |||
| } | |||
| return id, nil | |||
| } | |||
| // numericEqualityQuery a numeric equality query for the given value and field | |||
| func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery { | |||
| f := float64(value) | |||
| @@ -42,13 +32,6 @@ func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery { | |||
| return q | |||
| } | |||
| func newMatchPhraseQuery(matchPhrase, field, analyzer string) *query.MatchPhraseQuery { | |||
| q := bleve.NewMatchPhraseQuery(matchPhrase) | |||
| q.FieldVal = field | |||
| q.Analyzer = analyzer | |||
| return q | |||
| } | |||
| const unicodeNormalizeName = "unicodeNormalize" | |||
| func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error { | |||
| @@ -101,7 +101,12 @@ func InitIssueIndexer(syncReindex bool) error { | |||
| return fmt.Errorf("Unsupported indexer queue type: %v", setting.Indexer.IssueQueueType) | |||
| } | |||
| go issueIndexerQueue.Run() | |||
| go func() { | |||
| err = issueIndexerQueue.Run() | |||
| if err != nil { | |||
| log.Error("issueIndexerQueue.Run: %v", err) | |||
| } | |||
| }() | |||
| if populate { | |||
| if syncReindex { | |||
| @@ -161,7 +166,7 @@ func UpdateIssueIndexer(issue *models.Issue) { | |||
| comments = append(comments, comment.Content) | |||
| } | |||
| } | |||
| issueIndexerQueue.Push(&IndexerData{ | |||
| _ = issueIndexerQueue.Push(&IndexerData{ | |||
| ID: issue.ID, | |||
| RepoID: issue.RepoID, | |||
| Title: issue.Title, | |||
| @@ -179,11 +184,11 @@ func DeleteRepoIssueIndexer(repo *models.Repository) { | |||
| return | |||
| } | |||
| if len(ids) <= 0 { | |||
| if len(ids) == 0 { | |||
| return | |||
| } | |||
| issueIndexerQueue.Push(&IndexerData{ | |||
| _ = issueIndexerQueue.Push(&IndexerData{ | |||
| IDs: ids, | |||
| IsDelete: true, | |||
| }) | |||
| @@ -34,20 +34,20 @@ func (c *ChannelQueue) Run() error { | |||
| select { | |||
| case data := <-c.queue: | |||
| if data.IsDelete { | |||
| c.indexer.Delete(data.IDs...) | |||
| _ = c.indexer.Delete(data.IDs...) | |||
| continue | |||
| } | |||
| datas = append(datas, data) | |||
| if len(datas) >= c.batchNumber { | |||
| c.indexer.Index(datas) | |||
| _ = c.indexer.Index(datas) | |||
| // TODO: save the point | |||
| datas = make([]*IndexerData, 0, c.batchNumber) | |||
| } | |||
| case <-time.After(time.Millisecond * 100): | |||
| i++ | |||
| if i >= 3 && len(datas) > 0 { | |||
| c.indexer.Index(datas) | |||
| _ = c.indexer.Index(datas) | |||
| // TODO: save the point | |||
| datas = make([]*IndexerData, 0, c.batchNumber) | |||
| } | |||
| @@ -44,7 +44,7 @@ func (l *LevelQueue) Run() error { | |||
| for { | |||
| i++ | |||
| if len(datas) > l.batchNumber || (len(datas) > 0 && i > 3) { | |||
| l.indexer.Index(datas) | |||
| _ = l.indexer.Index(datas) | |||
| datas = make([]*IndexerData, 0, l.batchNumber) | |||
| i = 0 | |||
| continue | |||
| @@ -59,7 +59,7 @@ func (l *LevelQueue) Run() error { | |||
| continue | |||
| } | |||
| if len(bs) <= 0 { | |||
| if len(bs) == 0 { | |||
| time.Sleep(time.Millisecond * 100) | |||
| continue | |||
| } | |||
| @@ -96,12 +96,12 @@ func (r *RedisQueue) Run() error { | |||
| i++ | |||
| if len(datas) > r.batchNumber || (len(datas) > 0 && i > 3) { | |||
| r.indexer.Index(datas) | |||
| _ = r.indexer.Index(datas) | |||
| datas = make([]*IndexerData, 0, r.batchNumber) | |||
| i = 0 | |||
| } | |||
| if len(bs) <= 0 { | |||
| if len(bs) == 0 { | |||
| time.Sleep(time.Millisecond * 100) | |||
| continue | |||
| } | |||
| @@ -17,7 +17,7 @@ import ( | |||
| ) | |||
| //checkIsValidRequest check if it a valid request in case of bad request it write the response to ctx. | |||
| func checkIsValidRequest(ctx *context.Context, post bool) bool { | |||
| func checkIsValidRequest(ctx *context.Context) bool { | |||
| if !setting.LFS.StartServer { | |||
| writeStatus(ctx, 404) | |||
| return false | |||
| @@ -35,13 +35,6 @@ func checkIsValidRequest(ctx *context.Context, post bool) bool { | |||
| } | |||
| ctx.User = user | |||
| } | |||
| if post { | |||
| mediaParts := strings.Split(ctx.Req.Header.Get("Content-Type"), ";") | |||
| if mediaParts[0] != metaMediaType { | |||
| writeStatus(ctx, 400) | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| @@ -71,7 +64,7 @@ func handleLockListOut(ctx *context.Context, repo *models.Repository, lock *mode | |||
| // GetListLockHandler list locks | |||
| func GetListLockHandler(ctx *context.Context) { | |||
| if !checkIsValidRequest(ctx, false) { | |||
| if !checkIsValidRequest(ctx) { | |||
| return | |||
| } | |||
| ctx.Resp.Header().Set("Content-Type", metaMediaType) | |||
| @@ -135,7 +128,7 @@ func GetListLockHandler(ctx *context.Context) { | |||
| // PostLockHandler create lock | |||
| func PostLockHandler(ctx *context.Context) { | |||
| if !checkIsValidRequest(ctx, false) { | |||
| if !checkIsValidRequest(ctx) { | |||
| return | |||
| } | |||
| ctx.Resp.Header().Set("Content-Type", metaMediaType) | |||
| @@ -198,7 +191,7 @@ func PostLockHandler(ctx *context.Context) { | |||
| // VerifyLockHandler list locks for verification | |||
| func VerifyLockHandler(ctx *context.Context) { | |||
| if !checkIsValidRequest(ctx, false) { | |||
| if !checkIsValidRequest(ctx) { | |||
| return | |||
| } | |||
| ctx.Resp.Header().Set("Content-Type", metaMediaType) | |||
| @@ -249,7 +242,7 @@ func VerifyLockHandler(ctx *context.Context) { | |||
| // UnLockHandler delete locks | |||
| func UnLockHandler(ctx *context.Context) { | |||
| if !checkIsValidRequest(ctx, false) { | |||
| if !checkIsValidRequest(ctx) { | |||
| return | |||
| } | |||
| ctx.Resp.Header().Set("Content-Type", metaMediaType) | |||
| @@ -152,7 +152,7 @@ func getContentHandler(ctx *context.Context) { | |||
| if rangeHdr := ctx.Req.Header.Get("Range"); rangeHdr != "" { | |||
| regex := regexp.MustCompile(`bytes=(\d+)\-.*`) | |||
| match := regex.FindStringSubmatch(rangeHdr) | |||
| if match != nil && len(match) > 1 { | |||
| if len(match) > 1 { | |||
| statusCode = 206 | |||
| fromByte, _ = strconv.ParseInt(match[1], 10, 32) | |||
| ctx.Resp.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", fromByte, meta.Size-1, meta.Size-fromByte)) | |||
| @@ -178,8 +178,8 @@ func getContentHandler(ctx *context.Context) { | |||
| } | |||
| ctx.Resp.WriteHeader(statusCode) | |||
| io.Copy(ctx.Resp, content) | |||
| content.Close() | |||
| _, _ = io.Copy(ctx.Resp, content) | |||
| _ = content.Close() | |||
| logRequest(ctx.Req, statusCode) | |||
| } | |||
| @@ -196,7 +196,7 @@ func getMetaHandler(ctx *context.Context) { | |||
| if ctx.Req.Method == "GET" { | |||
| enc := json.NewEncoder(ctx.Resp) | |||
| enc.Encode(Represent(rv, meta, true, false)) | |||
| _ = enc.Encode(Represent(rv, meta, true, false)) | |||
| } | |||
| logRequest(ctx.Req, 200) | |||
| @@ -249,7 +249,7 @@ func PostHandler(ctx *context.Context) { | |||
| ctx.Resp.WriteHeader(sentStatus) | |||
| enc := json.NewEncoder(ctx.Resp) | |||
| enc.Encode(Represent(rv, meta, meta.Existing, true)) | |||
| _ = enc.Encode(Represent(rv, meta, meta.Existing, true)) | |||
| logRequest(ctx.Req, sentStatus) | |||
| } | |||
| @@ -313,7 +313,7 @@ func BatchHandler(ctx *context.Context) { | |||
| respobj := &BatchResponse{Objects: responseObjects} | |||
| enc := json.NewEncoder(ctx.Resp) | |||
| enc.Encode(respobj) | |||
| _ = enc.Encode(respobj) | |||
| logRequest(ctx.Req, 200) | |||
| } | |||
| @@ -208,7 +208,7 @@ normalLoop: | |||
| if i > lasti { | |||
| written, err := c.w.Write(bytes[lasti:i]) | |||
| totalWritten = totalWritten + written | |||
| totalWritten += written | |||
| if err != nil { | |||
| return totalWritten, err | |||
| } | |||
| @@ -243,7 +243,7 @@ normalLoop: | |||
| if bytes[j] == 'm' { | |||
| if c.mode == allowColor { | |||
| written, err := c.w.Write(bytes[i : j+1]) | |||
| totalWritten = totalWritten + written | |||
| totalWritten += written | |||
| if err != nil { | |||
| return totalWritten, err | |||
| } | |||
| @@ -278,7 +278,7 @@ func ColorSprintf(format string, args ...interface{}) string { | |||
| } | |||
| return fmt.Sprintf(format, v...) | |||
| } | |||
| return fmt.Sprintf(format) | |||
| return format | |||
| } | |||
| // ColorFprintf will write to the provided writer similar to ColorSprintf | |||
| @@ -290,7 +290,7 @@ func ColorFprintf(w io.Writer, format string, args ...interface{}) (int, error) | |||
| } | |||
| return fmt.Fprintf(w, format, v...) | |||
| } | |||
| return fmt.Fprintf(w, format) | |||
| return fmt.Fprint(w, format) | |||
| } | |||
| // ColorFormatted structs provide their own colored string when formatted with ColorSprintf | |||
| @@ -67,7 +67,10 @@ func (i *connWriter) connect() error { | |||
| } | |||
| if tcpConn, ok := conn.(*net.TCPConn); ok { | |||
| tcpConn.SetKeepAlive(true) | |||
| err = tcpConn.SetKeepAlive(true) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| i.innerWriter = conn | |||
| @@ -24,7 +24,6 @@ func listenReadAndClose(t *testing.T, l net.Listener, expected string) { | |||
| assert.NoError(t, err) | |||
| assert.Equal(t, expected, string(written)) | |||
| return | |||
| } | |||
| func TestConnLogger(t *testing.T) { | |||
| @@ -79,7 +79,7 @@ func (l *ChannelledLog) Start() { | |||
| return | |||
| } | |||
| l.loggerProvider.Flush() | |||
| case _, _ = <-l.close: | |||
| case <-l.close: | |||
| l.closeLogger() | |||
| return | |||
| } | |||
| @@ -104,7 +104,6 @@ func (l *ChannelledLog) closeLogger() { | |||
| l.loggerProvider.Flush() | |||
| l.loggerProvider.Close() | |||
| l.closed <- true | |||
| return | |||
| } | |||
| // Close this ChannelledLog | |||
| @@ -228,7 +227,6 @@ func (m *MultiChannelledLog) closeLoggers() { | |||
| } | |||
| m.mutex.Unlock() | |||
| m.closed <- true | |||
| return | |||
| } | |||
| // Start processing the MultiChannelledLog | |||
| @@ -223,7 +223,7 @@ func compressOldLogFile(fname string, compressionLevel int) error { | |||
| func (log *FileLogger) deleteOldLog() { | |||
| dir := filepath.Dir(log.Filename) | |||
| filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { | |||
| _ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { | |||
| defer func() { | |||
| if r := recover(); r != nil { | |||
| returnErr = fmt.Errorf("Unable to delete old log '%s', error: %+v", path, r) | |||
| @@ -246,7 +246,7 @@ func (log *FileLogger) deleteOldLog() { | |||
| // there are no buffering messages in file logger in memory. | |||
| // flush file means sync file from disk. | |||
| func (log *FileLogger) Flush() { | |||
| log.mw.fd.Sync() | |||
| _ = log.mw.fd.Sync() | |||
| } | |||
| // GetName returns the default name for this implementation | |||
| @@ -103,7 +103,7 @@ func TestFileLogger(t *testing.T) { | |||
| assert.Equal(t, expected, string(logData)) | |||
| event.level = WARN | |||
| expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||
| expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||
| fileLogger.LogEvent(&event) | |||
| fileLogger.Flush() | |||
| logData, err = ioutil.ReadFile(filename) | |||
| @@ -130,7 +130,7 @@ func TestFileLogger(t *testing.T) { | |||
| err = realFileLogger.DoRotate() | |||
| assert.Error(t, err) | |||
| expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||
| expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||
| fileLogger.LogEvent(&event) | |||
| fileLogger.Flush() | |||
| logData, err = ioutil.ReadFile(filename) | |||
| @@ -138,7 +138,7 @@ func TestFileLogger(t *testing.T) { | |||
| assert.Equal(t, expected, string(logData)) | |||
| // Should fail to rotate | |||
| expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||
| expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||
| fileLogger.LogEvent(&event) | |||
| fileLogger.Flush() | |||
| logData, err = ioutil.ReadFile(filename) | |||
| @@ -188,7 +188,7 @@ func TestCompressFileLogger(t *testing.T) { | |||
| assert.Equal(t, expected, string(logData)) | |||
| event.level = WARN | |||
| expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||
| expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||
| fileLogger.LogEvent(&event) | |||
| fileLogger.Flush() | |||
| logData, err = ioutil.ReadFile(filename) | |||
| @@ -57,7 +57,7 @@ func FlagsFromString(from string) int { | |||
| for _, flag := range strings.Split(strings.ToLower(from), ",") { | |||
| f, ok := flagFromString[strings.TrimSpace(flag)] | |||
| if ok { | |||
| flags = flags | f | |||
| flags |= f | |||
| } | |||
| } | |||
| return flags | |||
| @@ -218,7 +218,7 @@ func (l *LoggerAsWriter) Write(p []byte) (int, error) { | |||
| func (l *LoggerAsWriter) Log(msg string) { | |||
| for _, logger := range l.ourLoggers { | |||
| // Set the skip to reference the call just above this | |||
| logger.Log(1, l.level, msg) | |||
| _ = logger.Log(1, l.level, msg) | |||
| } | |||
| } | |||
| @@ -11,10 +11,6 @@ import ( | |||
| "strings" | |||
| ) | |||
| const ( | |||
| subjectPhrase = "Diagnostic message from server" | |||
| ) | |||
| type smtpWriter struct { | |||
| owner *SMTPLogger | |||
| } | |||
| @@ -252,10 +252,7 @@ func (logger *WriterLogger) Match(event *Event) bool { | |||
| mode: removeColor, | |||
| }).Write([]byte(event.msg)) | |||
| msg = baw | |||
| if logger.regexp.Match(msg) { | |||
| return true | |||
| } | |||
| return false | |||
| return logger.regexp.Match(msg) | |||
| } | |||
| // Close the base logger | |||
| @@ -258,15 +258,12 @@ func (s *dummySender) Send(from string, to []string, msg io.WriterTo) error { | |||
| } | |||
| func processMailQueue() { | |||
| for { | |||
| select { | |||
| case msg := <-mailQueue: | |||
| log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info) | |||
| if err := gomail.Send(Sender, msg.Message); err != nil { | |||
| log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err) | |||
| } else { | |||
| log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info) | |||
| } | |||
| for msg := range mailQueue { | |||
| log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info) | |||
| if err := gomail.Send(Sender, msg.Message); err != nil { | |||
| log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err) | |||
| } else { | |||
| log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info) | |||
| } | |||
| } | |||
| } | |||
| @@ -108,24 +108,6 @@ func FindAllMentions(content string) []string { | |||
| return ret | |||
| } | |||
| // cutoutVerbosePrefix cutouts URL prefix including sub-path to | |||
| // return a clean unified string of request URL path. | |||
| func cutoutVerbosePrefix(prefix string) string { | |||
| if len(prefix) == 0 || prefix[0] != '/' { | |||
| return prefix | |||
| } | |||
| count := 0 | |||
| for i := 0; i < len(prefix); i++ { | |||
| if prefix[i] == '/' { | |||
| count++ | |||
| } | |||
| if count >= 3+setting.AppSubURLDepth { | |||
| return prefix[:i] | |||
| } | |||
| } | |||
| return prefix | |||
| } | |||
| // IsSameDomain checks if given url string has the same hostname as current Gitea instance | |||
| func IsSameDomain(s string) bool { | |||
| if strings.HasPrefix(s, "/") { | |||
| @@ -146,7 +128,7 @@ type postProcessError struct { | |||
| } | |||
| func (p *postProcessError) Error() string { | |||
| return "PostProcess: " + p.context + ", " + p.Error() | |||
| return "PostProcess: " + p.context + ", " + p.err.Error() | |||
| } | |||
| type processor func(ctx *postProcessCtx, node *html.Node) | |||
| @@ -304,20 +286,6 @@ func (ctx *postProcessCtx) visitNode(node *html.Node) { | |||
| // ignore everything else | |||
| } | |||
| func (ctx *postProcessCtx) visitNodeForShortLinks(node *html.Node) { | |||
| switch node.Type { | |||
| case html.TextNode: | |||
| shortLinkProcessorFull(ctx, node, true) | |||
| case html.ElementNode: | |||
| if node.Data == "code" || node.Data == "pre" || node.Data == "a" { | |||
| return | |||
| } | |||
| for n := node.FirstChild; n != nil; n = n.NextSibling { | |||
| ctx.visitNodeForShortLinks(n) | |||
| } | |||
| } | |||
| } | |||
| // textNode runs the passed node through various processors, in order to handle | |||
| // all kinds of special links handled by the post-processing. | |||
| func (ctx *postProcessCtx) textNode(node *html.Node) { | |||
| @@ -29,11 +29,6 @@ func numericIssueLink(baseURL string, index int) string { | |||
| return link(util.URLJoin(baseURL, strconv.Itoa(index)), fmt.Sprintf("#%d", index)) | |||
| } | |||
| // urlContentsLink an HTML link whose contents is the target URL | |||
| func urlContentsLink(href string) string { | |||
| return link(href, href) | |||
| } | |||
| // link an HTML link | |||
| func link(href, contents string) string { | |||
| return fmt.Sprintf("<a href=\"%s\">%s</a>", href, contents) | |||
| @@ -35,12 +35,9 @@ func NewNotifier() base.Notifier { | |||
| } | |||
| func (ns *notificationService) Run() { | |||
| for { | |||
| select { | |||
| case opts := <-ns.issueQueue: | |||
| if err := models.CreateOrUpdateIssueNotifications(opts.issue, opts.notificationAuthorID); err != nil { | |||
| log.Error("Was unable to create issue notification: %v", err) | |||
| } | |||
| for opts := range ns.issueQueue { | |||
| if err := models.CreateOrUpdateIssueNotifications(opts.issue, opts.notificationAuthorID); err != nil { | |||
| log.Error("Was unable to create issue notification: %v", err) | |||
| } | |||
| } | |||
| } | |||