chore: golang lint

This commit is contained in:
Jason Song 2022-11-26 20:14:03 +08:00
parent 3b119f3d78
commit 0bd9553219
No known key found for this signature in database
GPG Key ID: 8402EEEE4511A8B5
11 changed files with 93 additions and 107 deletions

View File

@ -6,7 +6,6 @@ package bots
import (
"context"
"encoding/json"
"fmt"
"time"
@ -14,6 +13,7 @@ import (
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/models/webhook"
"code.gitea.io/gitea/modules/json"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"

View File

@ -52,10 +52,7 @@ func (jobs RunJobList) LoadRuns(ctx context.Context, withRepo bool) error {
}
func (jobs RunJobList) LoadAttributes(ctx context.Context, withRepo bool) error {
if err := jobs.LoadRuns(ctx, withRepo); err != nil {
return err
}
return nil
return jobs.LoadRuns(ctx, withRepo)
}
type FindRunJobOptions struct {

View File

@ -317,7 +317,8 @@ func CreateTaskForRunner(ctx context.Context, runner *Runner) (*Task, bool, erro
// TODO: a more efficient way to filter labels
var job *RunJob
labels := append(runner.AgentLabels, runner.CustomLabels...)
labels := runner.AgentLabels
labels = append(labels, runner.CustomLabels...)
log.Trace("runner labels: %v", labels)
for _, v := range jobs {
if isSubset(labels, v.RunsOn) {

View File

@ -50,10 +50,7 @@ func (tasks TaskList) LoadJobs(ctx context.Context) error {
}
func (tasks TaskList) LoadAttributes(ctx context.Context) error {
if err := tasks.LoadJobs(ctx); err != nil {
return err
}
return nil
return tasks.LoadJobs(ctx)
}
type FindTaskOptions struct {

View File

@ -87,7 +87,7 @@ func (f *file) Read(p []byte) (n int, err error) {
return 0, os.ErrInvalid
}
fileMeta, err := findFileMetaById(f.ctx, f.metaID)
fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
if err != nil {
return 0, err
}
@ -101,7 +101,7 @@ func (f *file) Write(p []byte) (n int, err error) {
return 0, os.ErrInvalid
}
fileMeta, err := findFileMetaById(f.ctx, f.metaID)
fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
if err != nil {
return 0, err
}
@ -198,10 +198,6 @@ func timeToFileTimestamp(t time.Time) int64 {
return t.UnixMicro()
}
func fileTimestampToTime(t int64) time.Time {
return time.UnixMicro(t)
}
func (f *file) loadMetaByPath() (*FileMeta, error) {
var fileMeta FileMeta
if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil {
@ -328,14 +324,14 @@ func (f *file) size() (int64, error) {
if f.metaID == 0 {
return 0, os.ErrNotExist
}
fileMeta, err := findFileMetaById(f.ctx, f.metaID)
fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
if err != nil {
return 0, err
}
return fileMeta.FileSize, nil
}
func findFileMetaById(ctx context.Context, metaID int64) (*FileMeta, error) {
func findFileMetaByID(ctx context.Context, metaID int64) (*FileMeta, error) {
var fileMeta FileMeta
if ok, err := db.GetEngine(ctx).Where("id = ?", metaID).Get(&fileMeta); err != nil {
return nil, err

View File

@ -135,16 +135,14 @@ func FormatLog(timestamp time.Time, content string) string {
return fmt.Sprintf("%s %s", timestamp.UTC().Format(timeFormat), content)
}
func ParseLog(in string) (timestamp time.Time, content string, err error) {
func ParseLog(in string) (time.Time, string, error) {
index := strings.IndexRune(in, ' ')
if index < 0 {
err = fmt.Errorf("invalid log: %q", in)
return
return time.Time{}, "", fmt.Errorf("invalid log: %q", in)
}
timestamp, err = time.Parse(timeFormat, in[:index])
timestamp, err := time.Parse(timeFormat, in[:index])
if err != nil {
return
return time.Time{}, "", err
}
content = in[index+1:]
return
return timestamp, in[index+1:], nil
}

View File

@ -6,7 +6,6 @@ package bots
import (
"context"
"encoding/json"
"fmt"
bots_model "code.gitea.io/gitea/models/bots"
@ -23,6 +22,7 @@ import (
"code.gitea.io/gitea/modules/convert"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/graceful"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/notification/base"
"code.gitea.io/gitea/modules/process"
@ -409,9 +409,6 @@ func (*botsNotifier) NotifyMergePullRequest(ctx context.Context, doer *user_mode
}
func (a *botsNotifier) NotifyPushCommits(ctx context.Context, pusher *user_model.User, repo *repo_model.Repository, opts *repository.PushUpdateOptions, commits *repository.PushCommits) {
ctx, _, finished := process.GetManager().AddContext(graceful.GetManager().HammerContext(), fmt.Sprintf("botsNofiter.NotifyPushCommits User: %s[%d] in %s[%d]", pusher.Name, pusher.ID, repo.FullName(), repo.ID))
defer finished()
apiPusher := convert.ToUser(pusher, nil)
apiCommits, apiHeadCommit, err := commits.ToAPIPayloadCommits(ctx, repo.RepoPath(), repo.HTMLURL())
if err != nil {
@ -419,7 +416,7 @@ func (a *botsNotifier) NotifyPushCommits(ctx context.Context, pusher *user_model
return
}
notify(repo, pusher, opts.RefFullName, webhook.HookEventPush, &api.PushPayload{
if err := notify(repo, pusher, opts.RefFullName, webhook.HookEventPush, &api.PushPayload{
Ref: opts.RefFullName,
Before: opts.OldCommitID,
After: opts.NewCommitID,
@ -429,7 +426,9 @@ func (a *botsNotifier) NotifyPushCommits(ctx context.Context, pusher *user_model
Repo: convert.ToRepo(repo, perm.AccessModeOwner),
Pusher: apiPusher,
Sender: apiPusher,
})
}); err != nil {
log.Error("PrepareWebhooks: %v", err)
}
}
func (a *botsNotifier) NotifyCreateRef(ctx context.Context, pusher *user_model.User, repo *repo_model.Repository, refType, refFullName, refID string) {

View File

@ -74,7 +74,6 @@ import (
bots_model "code.gitea.io/gitea/models/bots"
"code.gitea.io/gitea/models/organization"
"code.gitea.io/gitea/models/perm"
perm_model "code.gitea.io/gitea/models/perm"
access_model "code.gitea.io/gitea/models/perm/access"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
@ -200,9 +199,9 @@ func repoAssignment() func(ctx *context.APIContext) {
}
if task.IsForkPullRequest {
ctx.Repo.Permission.AccessMode = perm_model.AccessModeRead
ctx.Repo.Permission.AccessMode = perm.AccessModeRead
} else {
ctx.Repo.Permission.AccessMode = perm_model.AccessModeWrite
ctx.Repo.Permission.AccessMode = perm.AccessModeWrite
}
if err := ctx.Repo.Repository.LoadUnits(ctx); err != nil {
@ -210,7 +209,7 @@ func repoAssignment() func(ctx *context.APIContext) {
return
}
ctx.Repo.Permission.Units = ctx.Repo.Repository.Units
ctx.Repo.Permission.UnitsMode = make(map[unit.Type]perm_model.AccessMode)
ctx.Repo.Permission.UnitsMode = make(map[unit.Type]perm.AccessMode)
for _, u := range ctx.Repo.Repository.Units {
ctx.Repo.Permission.UnitsMode[u.Type] = ctx.Repo.Permission.AccessMode
}

View File

@ -42,11 +42,9 @@ func RunnersList(ctx *context.Context, tplName base.TplName, opts bots_model.Fin
ctx.ServerError("CreateRunnerToken", err)
return
}
} else {
if err != nil {
ctx.ServerError("GetUnactivatedRunnerToken", err)
return
}
} else if err != nil {
ctx.ServerError("GetUnactivatedRunnerToken", err)
return
}
ctx.Data["Keyword"] = opts.Filter

View File

@ -18,7 +18,6 @@ import (
const (
tplRunners base.TplName = "admin/runners/base"
tplRunnerNew base.TplName = "admin/runners/new"
tplRunnerEdit base.TplName = "admin/runners/edit"
)

View File

@ -125,67 +125,65 @@ func ViewPost(ctx *context_module.Context) {
},
}
if current != nil {
var task *bots_model.Task
if current.TaskID > 0 {
var err error
task, err = bots_model.GetTaskByID(ctx, current.TaskID)
if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
}
task.Job = current
if err := task.LoadAttributes(ctx); err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
var task *bots_model.Task
if current.TaskID > 0 {
var err error
task, err = bots_model.GetTaskByID(ctx, current.TaskID)
if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
}
task.Job = current
if err := task.LoadAttributes(ctx); err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
}
}
resp.StateData.CurrentJobInfo.Title = current.Name
resp.StateData.CurrentJobSteps = make([]ViewJobStep, 0)
resp.LogsData.StreamingLogs = make([]ViewStepLog, 0, len(req.StepLogCursors))
resp.StateData.CurrentJobInfo.Detail = current.Status.String()
if task != nil {
steps := bots.FullSteps(task)
resp.StateData.CurrentJobSteps = make([]ViewJobStep, len(steps))
for i, v := range steps {
resp.StateData.CurrentJobSteps[i] = ViewJobStep{
Summary: v.Name,
Duration: float64(v.TakeTime() / time.Second),
Status: v.Status.String(),
}
}
resp.StateData.CurrentJobInfo.Title = current.Name
resp.StateData.CurrentJobSteps = make([]ViewJobStep, 0)
resp.LogsData.StreamingLogs = make([]ViewStepLog, 0, len(req.StepLogCursors))
resp.StateData.CurrentJobInfo.Detail = current.Status.String()
if task != nil {
steps := bots.FullSteps(task)
resp.StateData.CurrentJobSteps = make([]ViewJobStep, len(steps))
for i, v := range steps {
resp.StateData.CurrentJobSteps[i] = ViewJobStep{
Summary: v.Name,
Duration: float64(v.TakeTime() / time.Second),
Status: v.Status.String(),
}
}
for _, cursor := range req.StepLogCursors {
if cursor.Expanded {
step := steps[cursor.StepIndex]
var logRows []*runnerv1.LogRow
if cursor.Cursor < step.LogLength || step.LogLength < 0 {
index := step.LogIndex + cursor.Cursor
length := step.LogLength - cursor.Cursor
offset := (*task.LogIndexes)[index]
var err error
logRows, err = bots.ReadLogs(ctx, task.LogInStorage, task.LogFilename, offset, length)
if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
}
for _, cursor := range req.StepLogCursors {
if cursor.Expanded {
step := steps[cursor.StepIndex]
var logRows []*runnerv1.LogRow
if cursor.Cursor < step.LogLength || step.LogLength < 0 {
index := step.LogIndex + cursor.Cursor
length := step.LogLength - cursor.Cursor
offset := (*task.LogIndexes)[index]
var err error
logRows, err = bots.ReadLogs(ctx, task.LogInStorage, task.LogFilename, offset, length)
if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
}
logLines := make([]ViewStepLogLine, len(logRows))
for i, row := range logRows {
logLines[i] = ViewStepLogLine{
Ln: cursor.Cursor + int64(i) + 1, // start at 1
M: row.Content,
T: float64(row.Time.AsTime().UnixNano()) / float64(time.Second),
}
}
resp.LogsData.StreamingLogs = append(resp.LogsData.StreamingLogs, ViewStepLog{
StepIndex: cursor.StepIndex,
Cursor: cursor.Cursor + int64(len(logLines)),
Lines: logLines,
})
}
logLines := make([]ViewStepLogLine, len(logRows))
for i, row := range logRows {
logLines[i] = ViewStepLogLine{
Ln: cursor.Cursor + int64(i) + 1, // start at 1
M: row.Content,
T: float64(row.Time.AsTime().UnixNano()) / float64(time.Second),
}
}
resp.LogsData.StreamingLogs = append(resp.LogsData.StreamingLogs, ViewStepLog{
StepIndex: cursor.StepIndex,
Cursor: cursor.Cursor + int64(len(logLines)),
Lines: logLines,
})
}
}
}
@ -262,33 +260,37 @@ func Cancel(ctx *context_module.Context) {
ctx.JSON(http.StatusOK, struct{}{})
}
func getRunJobs(ctx *context_module.Context, runIndex, jobIndex int64) (current *bots_model.RunJob, jobs []*bots_model.RunJob) {
// getRunJobs gets the jobs of runIndex, and returns jobs[jobIndex], jobs.
// Any error will be written to the ctx.
// It never returns a nil job of an empty jobs, if the jobIndex is out of range, it will be treated as 0.
func getRunJobs(ctx *context_module.Context, runIndex, jobIndex int64) (*bots_model.RunJob, []*bots_model.RunJob) {
run, err := bots_model.GetRunByIndex(ctx, ctx.Repo.Repository.ID, runIndex)
if err != nil {
if _, ok := err.(bots_model.ErrRunNotExist); ok {
ctx.Error(http.StatusNotFound, err.Error())
return
return nil, nil
}
ctx.Error(http.StatusInternalServerError, err.Error())
return
return nil, nil
}
run.Repo = ctx.Repo.Repository
jobs, err = bots_model.GetRunJobsByRunID(ctx, run.ID)
jobs, err := bots_model.GetRunJobsByRunID(ctx, run.ID)
if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return nil, nil
}
if len(jobs) == 0 {
ctx.Error(http.StatusNotFound, err.Error())
return nil, nil
}
for _, v := range jobs {
v.Run = run
}
if jobIndex >= 0 && jobIndex < int64(len(jobs)) {
if len(jobs) == 0 {
ctx.Error(http.StatusNotFound, fmt.Sprintf("run %v has no job %v", runIndex, jobIndex))
return nil, nil
}
current = jobs[jobIndex]
return jobs[jobIndex], jobs
}
return
return jobs[0], jobs
}