Merge branch 'main' into lunny/fix_migrate_issue_bug

This commit is contained in:
Lunny Xiao 2025-02-10 14:09:46 -08:00
commit 573a55d556
226 changed files with 5698 additions and 1911 deletions

View File

@ -22,20 +22,25 @@ groups:
name: FEATURES
labels:
- type/feature
-
name: API
labels:
- modifies/api
-
name: ENHANCEMENTS
labels:
- type/enhancement
- type/refactoring
- topic/ui
-
name: PERFORMANCE
labels:
- performance/memory
- performance/speed
- performance/bigrepo
- performance/cpu
-
name: BUGFIXES
labels:
- type/bug
-
name: API
labels:
- modifies/api
-
name: TESTING
labels:

View File

@ -1,8 +1,8 @@
name: cron-licenses
on:
schedule:
- cron: "7 0 * * 1" # every Monday at 00:07 UTC
#schedule:
# - cron: "7 0 * * 1" # every Monday at 00:07 UTC
workflow_dispatch:
jobs:

View File

@ -63,3 +63,4 @@ Kemal Zebari <kemalzebra@gmail.com> (@kemzeb)
Rowan Bohde <rowan.bohde@gmail.com> (@bohde)
hiifong <i@hiif.ong> (@hiifong)
metiftikci <metiftikci@hotmail.com> (@metiftikci)
Christopher Homberger <christopher.homberger@web.de> (@ChristopherHX)

View File

@ -31,6 +31,11 @@ var microcmdUserCreate = &cli.Command{
Name: "username",
Usage: "Username",
},
&cli.StringFlag{
Name: "user-type",
Usage: "Set user's type: individual or bot",
Value: "individual",
},
&cli.StringFlag{
Name: "password",
Usage: "User password",
@ -77,6 +82,22 @@ func runCreateUser(c *cli.Context) error {
return err
}
userTypes := map[string]user_model.UserType{
"individual": user_model.UserTypeIndividual,
"bot": user_model.UserTypeBot,
}
userType, ok := userTypes[c.String("user-type")]
if !ok {
return fmt.Errorf("invalid user type: %s", c.String("user-type"))
}
if userType != user_model.UserTypeIndividual {
// Some other commands like "change-password" also only support individual users.
// It needs to clarify the "password" behavior for bot users in the future.
// At the moment, we do not allow setting password for bot users.
if c.IsSet("password") || c.IsSet("random-password") {
return errors.New("password can only be set for individual users")
}
}
if c.IsSet("name") && c.IsSet("username") {
return errors.New("cannot set both --name and --username flags")
}
@ -118,16 +139,19 @@ func runCreateUser(c *cli.Context) error {
return err
}
fmt.Printf("generated random password is '%s'\n", password)
} else {
} else if userType == user_model.UserTypeIndividual {
return errors.New("must set either password or random-password flag")
}
isAdmin := c.Bool("admin")
mustChangePassword := true // always default to true
if c.IsSet("must-change-password") {
if userType != user_model.UserTypeIndividual {
return errors.New("must-change-password flag can only be set for individual users")
}
// if the flag is set, use the value provided by the user
mustChangePassword = c.Bool("must-change-password")
} else {
} else if userType == user_model.UserTypeIndividual {
// check whether there are users in the database
hasUserRecord, err := db.IsTableNotEmpty(&user_model.User{})
if err != nil {
@ -151,8 +175,9 @@ func runCreateUser(c *cli.Context) error {
u := &user_model.User{
Name: username,
Email: c.String("email"),
Passwd: password,
IsAdmin: isAdmin,
Type: userType,
Passwd: password,
MustChangePassword: mustChangePassword,
Visibility: visibility,
}

View File

@ -13,32 +13,54 @@ import (
user_model "code.gitea.io/gitea/models/user"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAdminUserCreate(t *testing.T) {
app := NewMainApp(AppVersion{})
reset := func() {
assert.NoError(t, db.TruncateBeans(db.DefaultContext, &user_model.User{}))
assert.NoError(t, db.TruncateBeans(db.DefaultContext, &user_model.EmailAddress{}))
require.NoError(t, db.TruncateBeans(db.DefaultContext, &user_model.User{}))
require.NoError(t, db.TruncateBeans(db.DefaultContext, &user_model.EmailAddress{}))
}
type createCheck struct{ IsAdmin, MustChangePassword bool }
createUser := func(name, args string) createCheck {
assert.NoError(t, app.Run(strings.Fields(fmt.Sprintf("./gitea admin user create --username %s --email %s@gitea.local %s --password foobar", name, name, args))))
u := unittest.AssertExistsAndLoadBean(t, &user_model.User{LowerName: name})
return createCheck{u.IsAdmin, u.MustChangePassword}
}
reset()
assert.Equal(t, createCheck{IsAdmin: false, MustChangePassword: false}, createUser("u", ""), "first non-admin user doesn't need to change password")
t.Run("MustChangePassword", func(t *testing.T) {
type check struct {
IsAdmin bool
MustChangePassword bool
}
createCheck := func(name, args string) check {
require.NoError(t, app.Run(strings.Fields(fmt.Sprintf("./gitea admin user create --username %s --email %s@gitea.local %s --password foobar", name, name, args))))
u := unittest.AssertExistsAndLoadBean(t, &user_model.User{LowerName: name})
return check{IsAdmin: u.IsAdmin, MustChangePassword: u.MustChangePassword}
}
reset()
assert.Equal(t, check{IsAdmin: false, MustChangePassword: false}, createCheck("u", ""), "first non-admin user doesn't need to change password")
reset()
assert.Equal(t, createCheck{IsAdmin: true, MustChangePassword: false}, createUser("u", "--admin"), "first admin user doesn't need to change password")
reset()
assert.Equal(t, check{IsAdmin: true, MustChangePassword: false}, createCheck("u", "--admin"), "first admin user doesn't need to change password")
reset()
assert.Equal(t, createCheck{IsAdmin: true, MustChangePassword: true}, createUser("u", "--admin --must-change-password"))
assert.Equal(t, createCheck{IsAdmin: true, MustChangePassword: true}, createUser("u2", "--admin"))
assert.Equal(t, createCheck{IsAdmin: true, MustChangePassword: false}, createUser("u3", "--admin --must-change-password=false"))
assert.Equal(t, createCheck{IsAdmin: false, MustChangePassword: true}, createUser("u4", ""))
assert.Equal(t, createCheck{IsAdmin: false, MustChangePassword: false}, createUser("u5", "--must-change-password=false"))
reset()
assert.Equal(t, check{IsAdmin: true, MustChangePassword: true}, createCheck("u", "--admin --must-change-password"))
assert.Equal(t, check{IsAdmin: true, MustChangePassword: true}, createCheck("u2", "--admin"))
assert.Equal(t, check{IsAdmin: true, MustChangePassword: false}, createCheck("u3", "--admin --must-change-password=false"))
assert.Equal(t, check{IsAdmin: false, MustChangePassword: true}, createCheck("u4", ""))
assert.Equal(t, check{IsAdmin: false, MustChangePassword: false}, createCheck("u5", "--must-change-password=false"))
})
t.Run("UserType", func(t *testing.T) {
createUser := func(name, args string) error {
return app.Run(strings.Fields(fmt.Sprintf("./gitea admin user create --username %s --email %s@gitea.local %s", name, name, args)))
}
reset()
assert.ErrorContains(t, createUser("u", "--user-type invalid"), "invalid user type")
assert.ErrorContains(t, createUser("u", "--user-type bot --password 123"), "can only be set for individual users")
assert.ErrorContains(t, createUser("u", "--user-type bot --must-change-password"), "can only be set for individual users")
assert.NoError(t, createUser("u", "--user-type bot"))
u := unittest.AssertExistsAndLoadBean(t, &user_model.User{LowerName: "u"})
assert.Equal(t, user_model.UserTypeBot, u.Type)
assert.Equal(t, "", u.Passwd)
})
}

16
main_timezones.go Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
//go:build windows
package main
// Golang has the ability to load OS's timezone data from most UNIX systems (https://github.com/golang/go/blob/master/src/time/zoneinfo_unix.go)
// Even if the timezone data is missing, users could install the related packages to get it.
// But on Windows, although `zoneinfo_windows.go` tries to load the timezone data from Windows registry,
// some users still suffer from the issue that the timezone data is missing: https://github.com/go-gitea/gitea/issues/33235
// So we import the tzdata package to make sure the timezone data is included in the binary.
//
// For non-Windows package builders, they could still use the "TAGS=timetzdata" to include the tzdata package in the binary.
// If we decided to add the tzdata for other platforms, modify the "go:build" directive above.
import _ "time/tzdata"

View File

@ -58,6 +58,7 @@ func InsertVariable(ctx context.Context, ownerID, repoID int64, name, data strin
type FindVariablesOpts struct {
db.ListOptions
IDs []int64
RepoID int64
OwnerID int64 // it will be ignored if RepoID is set
Name string
@ -65,6 +66,15 @@ type FindVariablesOpts struct {
func (opts FindVariablesOpts) ToConds() builder.Cond {
cond := builder.NewCond()
if len(opts.IDs) > 0 {
if len(opts.IDs) == 1 {
cond = cond.And(builder.Eq{"id": opts.IDs[0]})
} else {
cond = cond.And(builder.In("id", opts.IDs))
}
}
// Since we now support instance-level variables,
// there is no need to check for null values for `owner_id` and `repo_id`
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
@ -85,12 +95,12 @@ func FindVariables(ctx context.Context, opts FindVariablesOpts) ([]*ActionVariab
return db.Find[ActionVariable](ctx, opts)
}
func UpdateVariable(ctx context.Context, variable *ActionVariable) (bool, error) {
count, err := db.GetEngine(ctx).ID(variable.ID).Cols("name", "data").
Update(&ActionVariable{
Name: variable.Name,
Data: variable.Data,
})
func UpdateVariableCols(ctx context.Context, variable *ActionVariable, cols ...string) (bool, error) {
variable.Name = strings.ToUpper(variable.Name)
count, err := db.GetEngine(ctx).
ID(variable.ID).
Cols(cols...).
Update(variable)
return count != 0, err
}

View File

@ -17,6 +17,7 @@ import (
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/optional"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
@ -501,6 +502,45 @@ func GetIssueByIndex(ctx context.Context, repoID, index int64) (*Issue, error) {
return issue, nil
}
func isPullToCond(isPull optional.Option[bool]) builder.Cond {
if isPull.Has() {
return builder.Eq{"is_pull": isPull.Value()}
}
return builder.NewCond()
}
func FindLatestUpdatedIssues(ctx context.Context, repoID int64, isPull optional.Option[bool], pageSize int) (IssueList, error) {
issues := make([]*Issue, 0, pageSize)
err := db.GetEngine(ctx).Where("repo_id = ?", repoID).
And(isPullToCond(isPull)).
OrderBy("updated_unix DESC").
Limit(pageSize).
Find(&issues)
return issues, err
}
func FindIssuesSuggestionByKeyword(ctx context.Context, repoID int64, keyword string, isPull optional.Option[bool], excludedID int64, pageSize int) (IssueList, error) {
cond := builder.NewCond()
if excludedID > 0 {
cond = cond.And(builder.Neq{"`id`": excludedID})
}
// It seems that GitHub searches both title and content (maybe sorting by the search engine's ranking system?)
// The first PR (https://github.com/go-gitea/gitea/pull/32327) uses "search indexer" to search "name(title) + content"
// But it seems that searching "content" (especially LIKE by DB engine) generates worse (unusable) results.
// So now (https://github.com/go-gitea/gitea/pull/33538) it only searches "name(title)", leave the improvements to the future.
cond = cond.And(db.BuildCaseInsensitiveLike("`name`", keyword))
issues := make([]*Issue, 0, pageSize)
err := db.GetEngine(ctx).Where("repo_id = ?", repoID).
And(isPullToCond(isPull)).
And(cond).
OrderBy("updated_unix DESC, `index` DESC").
Limit(pageSize).
Find(&issues)
return issues, err
}
// GetIssueWithAttrsByIndex returns issue by index in a repository.
func GetIssueWithAttrsByIndex(ctx context.Context, repoID, index int64) (*Issue, error) {
issue, err := GetIssueByIndex(ctx, repoID, index)

View File

@ -38,13 +38,15 @@ func (issue *Issue) projectID(ctx context.Context) int64 {
}
// ProjectColumnID return project column id if issue was assigned to one
func (issue *Issue) ProjectColumnID(ctx context.Context) int64 {
func (issue *Issue) ProjectColumnID(ctx context.Context) (int64, error) {
var ip project_model.ProjectIssue
has, err := db.GetEngine(ctx).Where("issue_id=?", issue.ID).Get(&ip)
if err != nil || !has {
return 0
if err != nil {
return 0, err
} else if !has {
return 0, nil
}
return ip.ProjectColumnID
return ip.ProjectColumnID, nil
}
// LoadIssuesFromColumn load issues assigned to this column

View File

@ -107,7 +107,7 @@ func GetIssueStats(ctx context.Context, opts *IssuesOptions) (*IssueStats, error
accum.YourRepositoriesCount += stats.YourRepositoriesCount
accum.AssignCount += stats.AssignCount
accum.CreateCount += stats.CreateCount
accum.OpenCount += stats.MentionCount
accum.MentionCount += stats.MentionCount
accum.ReviewRequestedCount += stats.ReviewRequestedCount
accum.ReviewedCount += stats.ReviewedCount
i = chunk

View File

@ -930,17 +930,19 @@ func MarkConversation(ctx context.Context, comment *Comment, doer *user_model.Us
}
// CanMarkConversation Add or remove Conversation mark for a code comment permission check
// the PR writer , offfcial reviewer and poster can do it
// the PR writer , official reviewer and poster can do it
func CanMarkConversation(ctx context.Context, issue *Issue, doer *user_model.User) (permResult bool, err error) {
if doer == nil || issue == nil {
return false, fmt.Errorf("issue or doer is nil")
}
if err = issue.LoadRepo(ctx); err != nil {
return false, err
}
if issue.Repo.IsArchived {
return false, nil
}
if doer.ID != issue.PosterID {
if err = issue.LoadRepo(ctx); err != nil {
return false, err
}
p, err := access_model.GetUserRepoPermission(ctx, issue.Repo, doer)
if err != nil {
return false, err

View File

@ -0,0 +1,103 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package organization
import (
"sort"
"code.gitea.io/gitea/models/db"
"xorm.io/builder"
)
type WorktimeSumByRepos struct {
RepoName string
SumTime int64
}
func GetWorktimeByRepos(org *Organization, unitFrom, unixTo int64) (results []WorktimeSumByRepos, err error) {
err = db.GetEngine(db.DefaultContext).
Select("repository.name AS repo_name, SUM(tracked_time.time) AS sum_time").
Table("tracked_time").
Join("INNER", "issue", "tracked_time.issue_id = issue.id").
Join("INNER", "repository", "issue.repo_id = repository.id").
Where(builder.Eq{"repository.owner_id": org.ID}).
And(builder.Eq{"tracked_time.deleted": false}).
And(builder.Gte{"tracked_time.created_unix": unitFrom}).
And(builder.Lte{"tracked_time.created_unix": unixTo}).
GroupBy("repository.name").
OrderBy("repository.name").
Find(&results)
return results, err
}
type WorktimeSumByMilestones struct {
RepoName string
MilestoneName string
MilestoneID int64
MilestoneDeadline int64
SumTime int64
HideRepoName bool
}
func GetWorktimeByMilestones(org *Organization, unitFrom, unixTo int64) (results []WorktimeSumByMilestones, err error) {
err = db.GetEngine(db.DefaultContext).
Select("repository.name AS repo_name, milestone.name AS milestone_name, milestone.id AS milestone_id, milestone.deadline_unix as milestone_deadline, SUM(tracked_time.time) AS sum_time").
Table("tracked_time").
Join("INNER", "issue", "tracked_time.issue_id = issue.id").
Join("INNER", "repository", "issue.repo_id = repository.id").
Join("LEFT", "milestone", "issue.milestone_id = milestone.id").
Where(builder.Eq{"repository.owner_id": org.ID}).
And(builder.Eq{"tracked_time.deleted": false}).
And(builder.Gte{"tracked_time.created_unix": unitFrom}).
And(builder.Lte{"tracked_time.created_unix": unixTo}).
GroupBy("repository.name, milestone.name, milestone.deadline_unix, milestone.id").
OrderBy("repository.name, milestone.deadline_unix, milestone.id").
Find(&results)
// TODO: pgsql: NULL values are sorted last in default ascending order, so we need to sort them manually again.
sort.Slice(results, func(i, j int) bool {
if results[i].RepoName != results[j].RepoName {
return results[i].RepoName < results[j].RepoName
}
if results[i].MilestoneDeadline != results[j].MilestoneDeadline {
return results[i].MilestoneDeadline < results[j].MilestoneDeadline
}
return results[i].MilestoneID < results[j].MilestoneID
})
// Show only the first RepoName, for nicer output.
prevRepoName := ""
for i := 0; i < len(results); i++ {
res := &results[i]
res.MilestoneDeadline = 0 // clear the deadline because we do not really need it
if prevRepoName == res.RepoName {
res.HideRepoName = true
}
prevRepoName = res.RepoName
}
return results, err
}
type WorktimeSumByMembers struct {
UserName string
SumTime int64
}
func GetWorktimeByMembers(org *Organization, unitFrom, unixTo int64) (results []WorktimeSumByMembers, err error) {
err = db.GetEngine(db.DefaultContext).
Select("`user`.name AS user_name, SUM(tracked_time.time) AS sum_time").
Table("tracked_time").
Join("INNER", "issue", "tracked_time.issue_id = issue.id").
Join("INNER", "repository", "issue.repo_id = repository.id").
Join("INNER", "`user`", "tracked_time.user_id = `user`.id").
Where(builder.Eq{"repository.owner_id": org.ID}).
And(builder.Eq{"tracked_time.deleted": false}).
And(builder.Gte{"tracked_time.created_unix": unitFrom}).
And(builder.Lte{"tracked_time.created_unix": unixTo}).
GroupBy("`user`.name").
OrderBy("sum_time DESC").
Find(&results)
return results, err
}

View File

@ -244,6 +244,10 @@ func GetSearchOrderByBySortType(sortType string) db.SearchOrderBy {
return db.SearchOrderByRecentUpdated
case "leastupdate":
return db.SearchOrderByLeastUpdated
case "alphabetically":
return "title ASC"
case "reversealphabetically":
return "title DESC"
default:
return db.SearchOrderByNewest
}

View File

@ -45,8 +45,6 @@ func TestCreateRepositoryNotice(t *testing.T) {
unittest.AssertExistsAndLoadBean(t, noticeBean)
}
// TODO TestRemoveAllWithNotice
func TestCountNotices(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
assert.Equal(t, int64(3), system.CountNotices(db.DefaultContext))

View File

@ -385,11 +385,12 @@ func (u *User) ValidatePassword(passwd string) bool {
}
// IsPasswordSet checks if the password is set or left empty
// TODO: It's better to clarify the "password" behavior for different types (individual, bot)
func (u *User) IsPasswordSet() bool {
return len(u.Passwd) != 0
return u.Passwd != ""
}
// IsOrganization returns true if user is actually a organization.
// IsOrganization returns true if user is actually an organization.
func (u *User) IsOrganization() bool {
return u.Type == UserTypeOrganization
}
@ -399,13 +400,14 @@ func (u *User) IsIndividual() bool {
return u.Type == UserTypeIndividual
}
func (u *User) IsUser() bool {
return u.Type == UserTypeIndividual || u.Type == UserTypeBot
// IsTypeBot returns whether the user is of type bot
func (u *User) IsTypeBot() bool {
return u.Type == UserTypeBot
}
// IsBot returns whether or not the user is of type bot
func (u *User) IsBot() bool {
return u.Type == UserTypeBot
// IsTokenAccessAllowed returns whether the user is an individual or a bot (which allows for token access)
func (u *User) IsTokenAccessAllowed() bool {
return u.Type == UserTypeIndividual || u.Type == UserTypeBot
}
// DisplayName returns full name if it's not empty,

View File

@ -56,7 +56,7 @@ func NewActionsUser() *User {
Email: ActionsUserEmail,
KeepEmailPrivate: true,
LoginName: ActionsUserName,
Type: UserTypeIndividual,
Type: UserTypeBot,
AllowCreateOrganization: true,
Visibility: structs.VisibleTypePublic,
}

View File

@ -18,7 +18,6 @@ import (
"time"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
@ -64,10 +63,7 @@ func VerifyTimeLimitCode(now time.Time, data string, minutes int, code string) b
// check code
retCode := CreateTimeLimitCode(data, aliveTime, startTimeStr, nil)
if subtle.ConstantTimeCompare([]byte(retCode), []byte(code)) != 1 {
retCode = CreateTimeLimitCode(data, aliveTime, startTimeStr, sha1.New()) // TODO: this is only for the support of legacy codes, remove this in/after 1.23
if subtle.ConstantTimeCompare([]byte(retCode), []byte(code)) != 1 {
return false
}
return false
}
// check time is expired or not: startTime <= now && now < startTime + minutes
@ -144,13 +140,12 @@ func Int64sToStrings(ints []int64) []string {
return strs
}
// EntryIcon returns the octicon class for displaying files/directories
// EntryIcon returns the octicon name for displaying files/directories
func EntryIcon(entry *git.TreeEntry) string {
switch {
case entry.IsLink():
te, err := entry.FollowLink()
if err != nil {
log.Debug(err.Error())
return "file-symlink-file"
}
if te.IsDir() {

View File

@ -86,13 +86,10 @@ JWT_SECRET = %s
verifyDataCode := func(c string) bool {
return VerifyTimeLimitCode(now, "data", 2, c)
}
code1 := CreateTimeLimitCode("data", 2, now, sha1.New())
code2 := CreateTimeLimitCode("data", 2, now, nil)
assert.True(t, verifyDataCode(code1))
assert.True(t, verifyDataCode(code2))
code := CreateTimeLimitCode("data", 2, now, nil)
assert.True(t, verifyDataCode(code))
initGeneralSecret("000_QLUd4fYVyxetjxC4eZkrBgWM2SndOOWDNtgUUko")
assert.False(t, verifyDataCode(code1))
assert.False(t, verifyDataCode(code2))
assert.False(t, verifyDataCode(code))
})
}
@ -137,5 +134,3 @@ func TestInt64sToStrings(t *testing.T) {
Int64sToStrings([]int64{1, 4, 16, 64, 256}),
)
}
// TODO: Test EntryIcon

View File

@ -46,19 +46,9 @@ func parseLsTreeLine(line []byte) (*LsTreeEntry, error) {
entry.Size = optional.Some(size)
}
switch string(entryMode) {
case "100644":
entry.EntryMode = EntryModeBlob
case "100755":
entry.EntryMode = EntryModeExec
case "120000":
entry.EntryMode = EntryModeSymlink
case "160000":
entry.EntryMode = EntryModeCommit
case "040000", "040755": // git uses 040000 for tree object, but some users may get 040755 for unknown reasons
entry.EntryMode = EntryModeTree
default:
return nil, fmt.Errorf("unknown type: %v", string(entryMode))
entry.EntryMode, err = ParseEntryMode(string(entryMode))
if err != nil || entry.EntryMode == EntryModeNoEntry {
return nil, fmt.Errorf("invalid ls-tree output (invalid mode): %q, err: %w", line, err)
}
entry.ID, err = NewIDFromString(string(entryObjectID))

View File

@ -3,7 +3,10 @@
package git
import "strconv"
import (
"fmt"
"strconv"
)
// EntryMode the type of the object in the git tree
type EntryMode int
@ -11,6 +14,9 @@ type EntryMode int
// There are only a few file modes in Git. They look like unix file modes, but they can only be
// one of these.
const (
// EntryModeNoEntry is possible if the file was added or removed in a commit. In the case of
// added the base commit will not have the file in its tree so a mode of 0o000000 is used.
EntryModeNoEntry EntryMode = 0o000000
// EntryModeBlob
EntryModeBlob EntryMode = 0o100644
// EntryModeExec
@ -33,3 +39,22 @@ func ToEntryMode(value string) EntryMode {
v, _ := strconv.ParseInt(value, 8, 32)
return EntryMode(v)
}
func ParseEntryMode(mode string) (EntryMode, error) {
switch mode {
case "000000":
return EntryModeNoEntry, nil
case "100644":
return EntryModeBlob, nil
case "100755":
return EntryModeExec, nil
case "120000":
return EntryModeSymlink, nil
case "160000":
return EntryModeCommit, nil
case "040000", "040755": // git uses 040000 for tree object, but some users may get 040755 for unknown reasons
return EntryModeTree, nil
default:
return 0, fmt.Errorf("unparsable entry mode: %s", mode)
}
}

View File

@ -99,10 +99,10 @@ func (r *Request) Param(key, value string) *Request {
return r
}
// Body adds request raw body.
// it supports string and []byte.
// Body adds request raw body. It supports string, []byte and io.Reader as body.
func (r *Request) Body(data any) *Request {
switch t := data.(type) {
case nil: // do nothing
case string:
bf := bytes.NewBufferString(t)
r.req.Body = io.NopCloser(bf)
@ -111,6 +111,12 @@ func (r *Request) Body(data any) *Request {
bf := bytes.NewBuffer(t)
r.req.Body = io.NopCloser(bf)
r.req.ContentLength = int64(len(t))
case io.ReadCloser:
r.req.Body = t
case io.Reader:
r.req.Body = io.NopCloser(t)
default:
panic(fmt.Sprintf("unsupported request body type %T", t))
}
return r
}
@ -141,7 +147,7 @@ func (r *Request) getResponse() (*http.Response, error) {
}
} else if r.req.Method == "POST" && r.req.Body == nil && len(paramBody) > 0 {
r.Header("Content-Type", "application/x-www-form-urlencoded")
r.Body(paramBody)
r.Body(paramBody) // string
}
var err error
@ -185,6 +191,7 @@ func (r *Request) getResponse() (*http.Response, error) {
}
// Response executes request client gets response manually.
// Caller MUST close the response body if no error occurs
func (r *Request) Response() (*http.Response, error) {
return r.getResponse()
}

View File

@ -92,6 +92,11 @@ func getIssueIndexerData(ctx context.Context, issueID int64) (*internal.IndexerD
projectID = issue.Project.ID
}
projectColumnID, err := issue.ProjectColumnID(ctx)
if err != nil {
return nil, false, err
}
return &internal.IndexerData{
ID: issue.ID,
RepoID: issue.RepoID,
@ -106,7 +111,7 @@ func getIssueIndexerData(ctx context.Context, issueID int64) (*internal.IndexerD
NoLabel: len(labels) == 0,
MilestoneID: issue.MilestoneID,
ProjectID: projectID,
ProjectColumnID: issue.ProjectColumnID(ctx),
ProjectColumnID: projectColumnID,
PosterID: issue.PosterID,
AssigneeID: issue.AssigneeID,
MentionIDs: mentionIDs,

View File

@ -72,10 +72,14 @@ func (c *HTTPClient) batch(ctx context.Context, operation string, objects []Poin
url := fmt.Sprintf("%s/objects/batch", c.endpoint)
// Original: In some lfs server implementations, they require the ref attribute. #32838
// `ref` is an "optional object describing the server ref that the objects belong to"
// but some (incorrect) lfs servers require it, so maybe adding an empty ref here doesn't break the correct ones.
// but some (incorrect) lfs servers like aliyun require it, so maybe adding an empty ref here doesn't break the correct ones.
// https://github.com/git-lfs/git-lfs/blob/a32a02b44bf8a511aa14f047627c49e1a7fd5021/docs/api/batch.md?plain=1#L37
request := &BatchRequest{operation, c.transferNames(), &Reference{}, objects}
//
// UPDATE: it can't use "empty ref" here because it breaks others like https://github.com/go-gitea/gitea/issues/33453
request := &BatchRequest{operation, c.transferNames(), nil, objects}
payload := new(bytes.Buffer)
err := json.NewEncoder(payload).Encode(request)
if err != nil {

View File

@ -4,7 +4,6 @@
package backend
import (
"bytes"
"context"
"encoding/base64"
"fmt"
@ -29,7 +28,7 @@ var Capabilities = []string{
"locking",
}
var _ transfer.Backend = &GiteaBackend{}
var _ transfer.Backend = (*GiteaBackend)(nil)
// GiteaBackend is an adapter between git-lfs-transfer library and Gitea's internal LFS API
type GiteaBackend struct {
@ -78,17 +77,17 @@ func (g *GiteaBackend) Batch(_ string, pointers []transfer.BatchItem, args trans
headerAccept: mimeGitLFS,
headerContentType: mimeGitLFS,
}
req := newInternalRequest(g.ctx, url, http.MethodPost, headers, bodyBytes)
req := newInternalRequestLFS(g.ctx, url, http.MethodPost, headers, bodyBytes)
resp, err := req.Response()
if err != nil {
g.logger.Log("http request error", err)
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
g.logger.Log("http statuscode error", resp.StatusCode, statusCodeToErr(resp.StatusCode))
return nil, statusCodeToErr(resp.StatusCode)
}
defer resp.Body.Close()
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
g.logger.Log("http read error", err)
@ -158,8 +157,7 @@ func (g *GiteaBackend) Batch(_ string, pointers []transfer.BatchItem, args trans
return pointers, nil
}
// Download implements transfer.Backend. The returned reader must be closed by the
// caller.
// Download implements transfer.Backend. The returned reader must be closed by the caller.
func (g *GiteaBackend) Download(oid string, args transfer.Args) (io.ReadCloser, int64, error) {
idMapStr, exists := args[argID]
if !exists {
@ -187,25 +185,25 @@ func (g *GiteaBackend) Download(oid string, args transfer.Args) (io.ReadCloser,
headerGiteaInternalAuth: g.internalAuth,
headerAccept: mimeOctetStream,
}
req := newInternalRequest(g.ctx, url, http.MethodGet, headers, nil)
req := newInternalRequestLFS(g.ctx, url, http.MethodGet, headers, nil)
resp, err := req.Response()
if err != nil {
return nil, 0, err
return nil, 0, fmt.Errorf("failed to get response: %w", err)
}
// no need to close the body here by "defer resp.Body.Close()", see below
if resp.StatusCode != http.StatusOK {
return nil, 0, statusCodeToErr(resp.StatusCode)
}
defer resp.Body.Close()
respBytes, err := io.ReadAll(resp.Body)
respSize, err := strconv.ParseInt(resp.Header.Get("X-Gitea-LFS-Content-Length"), 10, 64)
if err != nil {
return nil, 0, err
return nil, 0, fmt.Errorf("failed to parse content length: %w", err)
}
respSize := int64(len(respBytes))
respBuf := io.NopCloser(bytes.NewBuffer(respBytes))
return respBuf, respSize, nil
// transfer.Backend will check io.Closer interface and close this Body reader
return resp.Body, respSize, nil
}
// StartUpload implements transfer.Backend.
// Upload implements transfer.Backend.
func (g *GiteaBackend) Upload(oid string, size int64, r io.Reader, args transfer.Args) error {
idMapStr, exists := args[argID]
if !exists {
@ -234,15 +232,14 @@ func (g *GiteaBackend) Upload(oid string, size int64, r io.Reader, args transfer
headerContentType: mimeOctetStream,
headerContentLength: strconv.FormatInt(size, 10),
}
reqBytes, err := io.ReadAll(r)
if err != nil {
return err
}
req := newInternalRequest(g.ctx, url, http.MethodPut, headers, reqBytes)
req := newInternalRequestLFS(g.ctx, url, http.MethodPut, headers, nil)
req.Body(r)
resp, err := req.Response()
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return statusCodeToErr(resp.StatusCode)
}
@ -284,11 +281,12 @@ func (g *GiteaBackend) Verify(oid string, size int64, args transfer.Args) (trans
headerAccept: mimeGitLFS,
headerContentType: mimeGitLFS,
}
req := newInternalRequest(g.ctx, url, http.MethodPost, headers, bodyBytes)
req := newInternalRequestLFS(g.ctx, url, http.MethodPost, headers, bodyBytes)
resp, err := req.Response()
if err != nil {
return transfer.NewStatus(transfer.StatusInternalServerError), err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return transfer.NewStatus(uint32(resp.StatusCode), http.StatusText(resp.StatusCode)), statusCodeToErr(resp.StatusCode)
}

View File

@ -50,7 +50,7 @@ func (g *giteaLockBackend) Create(path, refname string) (transfer.Lock, error) {
headerAccept: mimeGitLFS,
headerContentType: mimeGitLFS,
}
req := newInternalRequest(g.ctx, url, http.MethodPost, headers, bodyBytes)
req := newInternalRequestLFS(g.ctx, url, http.MethodPost, headers, bodyBytes)
resp, err := req.Response()
if err != nil {
g.logger.Log("http request error", err)
@ -102,7 +102,7 @@ func (g *giteaLockBackend) Unlock(lock transfer.Lock) error {
headerAccept: mimeGitLFS,
headerContentType: mimeGitLFS,
}
req := newInternalRequest(g.ctx, url, http.MethodPost, headers, bodyBytes)
req := newInternalRequestLFS(g.ctx, url, http.MethodPost, headers, bodyBytes)
resp, err := req.Response()
if err != nil {
g.logger.Log("http request error", err)
@ -185,7 +185,7 @@ func (g *giteaLockBackend) queryLocks(v url.Values) ([]transfer.Lock, string, er
headerAccept: mimeGitLFS,
headerContentType: mimeGitLFS,
}
req := newInternalRequest(g.ctx, url, http.MethodGet, headers, nil)
req := newInternalRequestLFS(g.ctx, url, http.MethodGet, headers, nil)
resp, err := req.Response()
if err != nil {
g.logger.Log("http request error", err)

View File

@ -5,15 +5,12 @@ package backend
import (
"context"
"crypto/tls"
"fmt"
"net"
"io"
"net/http"
"time"
"code.gitea.io/gitea/modules/httplib"
"code.gitea.io/gitea/modules/proxyprotocol"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/private"
"github.com/charmbracelet/git-lfs-transfer/transfer"
)
@ -89,53 +86,19 @@ func statusCodeToErr(code int) error {
}
}
func newInternalRequest(ctx context.Context, url, method string, headers map[string]string, body []byte) *httplib.Request {
req := httplib.NewRequest(url, method).
SetContext(ctx).
SetTimeout(10*time.Second, 60*time.Second).
SetTLSClientConfig(&tls.Config{
InsecureSkipVerify: true,
})
if setting.Protocol == setting.HTTPUnix {
req.SetTransport(&http.Transport{
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
var d net.Dialer
conn, err := d.DialContext(ctx, "unix", setting.HTTPAddr)
if err != nil {
return conn, err
}
if setting.LocalUseProxyProtocol {
if err = proxyprotocol.WriteLocalHeader(conn); err != nil {
_ = conn.Close()
return nil, err
}
}
return conn, err
},
})
} else if setting.LocalUseProxyProtocol {
req.SetTransport(&http.Transport{
DialContext: func(ctx context.Context, network, address string) (net.Conn, error) {
var d net.Dialer
conn, err := d.DialContext(ctx, network, address)
if err != nil {
return conn, err
}
if err = proxyprotocol.WriteLocalHeader(conn); err != nil {
_ = conn.Close()
return nil, err
}
return conn, err
},
})
}
func newInternalRequestLFS(ctx context.Context, url, method string, headers map[string]string, body any) *httplib.Request {
req := private.NewInternalRequest(ctx, url, method)
for k, v := range headers {
req.Header(k, v)
}
req.Body(body)
switch body := body.(type) {
case nil: // do nothing
case []byte:
req.Body(body) // []byte
case io.Reader:
req.Body(body) // io.Reader or io.ReadCloser
default:
panic(fmt.Sprintf("unsupported request body type %T", body))
}
return req
}

View File

@ -47,7 +47,7 @@ var globalVars = sync.OnceValue(func() *globalVarsType {
// NOTE: All below regex matching do not perform any extra validation.
// Thus a link is produced even if the linked entity does not exist.
// While fast, this is also incorrect and lead to false positives.
// TODO: fix invalid linking issue
// TODO: fix invalid linking issue (update: stale TODO, what issues? maybe no TODO anymore)
// valid chars in encoded path and parameter: [-+~_%.a-zA-Z0-9/]

View File

@ -12,18 +12,17 @@ import (
// Downloader downloads the site repo information
type Downloader interface {
SetContext(context.Context)
GetRepoInfo() (*Repository, error)
GetTopics() ([]string, error)
GetMilestones() ([]*Milestone, error)
GetReleases() ([]*Release, error)
GetLabels() ([]*Label, error)
GetIssues(page, perPage int) ([]*Issue, bool, error)
GetComments(commentable Commentable) ([]*Comment, bool, error)
GetAllComments(page, perPage int) ([]*Comment, bool, error)
GetRepoInfo(ctx context.Context) (*Repository, error)
GetTopics(ctx context.Context) ([]string, error)
GetMilestones(ctx context.Context) ([]*Milestone, error)
GetReleases(ctx context.Context) ([]*Release, error)
GetLabels(ctx context.Context) ([]*Label, error)
GetIssues(ctx context.Context, page, perPage int) ([]*Issue, bool, error)
GetComments(ctx context.Context, commentable Commentable) ([]*Comment, bool, error)
GetAllComments(ctx context.Context, page, perPage int) ([]*Comment, bool, error)
SupportGetRepoComments() bool
GetPullRequests(page, perPage int) ([]*PullRequest, bool, error)
GetReviews(reviewable Reviewable) ([]*Review, error)
GetPullRequests(ctx context.Context, page, perPage int) ([]*PullRequest, bool, error)
GetReviews(ctx context.Context, reviewable Reviewable) ([]*Review, error)
FormatCloneURL(opts MigrateOptions, remoteAddr string) (string, error)
}

View File

@ -13,56 +13,53 @@ type NullDownloader struct{}
var _ Downloader = &NullDownloader{}
// SetContext set context
func (n NullDownloader) SetContext(_ context.Context) {}
// GetRepoInfo returns a repository information
func (n NullDownloader) GetRepoInfo() (*Repository, error) {
func (n NullDownloader) GetRepoInfo(_ context.Context) (*Repository, error) {
return nil, ErrNotSupported{Entity: "RepoInfo"}
}
// GetTopics return repository topics
func (n NullDownloader) GetTopics() ([]string, error) {
func (n NullDownloader) GetTopics(_ context.Context) ([]string, error) {
return nil, ErrNotSupported{Entity: "Topics"}
}
// GetMilestones returns milestones
func (n NullDownloader) GetMilestones() ([]*Milestone, error) {
func (n NullDownloader) GetMilestones(_ context.Context) ([]*Milestone, error) {
return nil, ErrNotSupported{Entity: "Milestones"}
}
// GetReleases returns releases
func (n NullDownloader) GetReleases() ([]*Release, error) {
func (n NullDownloader) GetReleases(_ context.Context) ([]*Release, error) {
return nil, ErrNotSupported{Entity: "Releases"}
}
// GetLabels returns labels
func (n NullDownloader) GetLabels() ([]*Label, error) {
func (n NullDownloader) GetLabels(_ context.Context) ([]*Label, error) {
return nil, ErrNotSupported{Entity: "Labels"}
}
// GetIssues returns issues according start and limit
func (n NullDownloader) GetIssues(page, perPage int) ([]*Issue, bool, error) {
func (n NullDownloader) GetIssues(_ context.Context, page, perPage int) ([]*Issue, bool, error) {
return nil, false, ErrNotSupported{Entity: "Issues"}
}
// GetComments returns comments of an issue or PR
func (n NullDownloader) GetComments(commentable Commentable) ([]*Comment, bool, error) {
func (n NullDownloader) GetComments(_ context.Context, commentable Commentable) ([]*Comment, bool, error) {
return nil, false, ErrNotSupported{Entity: "Comments"}
}
// GetAllComments returns paginated comments
func (n NullDownloader) GetAllComments(page, perPage int) ([]*Comment, bool, error) {
func (n NullDownloader) GetAllComments(_ context.Context, page, perPage int) ([]*Comment, bool, error) {
return nil, false, ErrNotSupported{Entity: "AllComments"}
}
// GetPullRequests returns pull requests according page and perPage
func (n NullDownloader) GetPullRequests(page, perPage int) ([]*PullRequest, bool, error) {
func (n NullDownloader) GetPullRequests(_ context.Context, page, perPage int) ([]*PullRequest, bool, error) {
return nil, false, ErrNotSupported{Entity: "PullRequests"}
}
// GetReviews returns pull requests review
func (n NullDownloader) GetReviews(reviewable Reviewable) ([]*Review, error) {
func (n NullDownloader) GetReviews(_ context.Context, reviewable Reviewable) ([]*Review, error) {
return nil, ErrNotSupported{Entity: "Reviews"}
}

View File

@ -49,21 +49,15 @@ func (d *RetryDownloader) retry(work func() error) error {
return err
}
// SetContext set context
func (d *RetryDownloader) SetContext(ctx context.Context) {
d.ctx = ctx
d.Downloader.SetContext(ctx)
}
// GetRepoInfo returns a repository information with retry
func (d *RetryDownloader) GetRepoInfo() (*Repository, error) {
func (d *RetryDownloader) GetRepoInfo(ctx context.Context) (*Repository, error) {
var (
repo *Repository
err error
)
err = d.retry(func() error {
repo, err = d.Downloader.GetRepoInfo()
repo, err = d.Downloader.GetRepoInfo(ctx)
return err
})
@ -71,14 +65,14 @@ func (d *RetryDownloader) GetRepoInfo() (*Repository, error) {
}
// GetTopics returns a repository's topics with retry
func (d *RetryDownloader) GetTopics() ([]string, error) {
func (d *RetryDownloader) GetTopics(ctx context.Context) ([]string, error) {
var (
topics []string
err error
)
err = d.retry(func() error {
topics, err = d.Downloader.GetTopics()
topics, err = d.Downloader.GetTopics(ctx)
return err
})
@ -86,14 +80,14 @@ func (d *RetryDownloader) GetTopics() ([]string, error) {
}
// GetMilestones returns a repository's milestones with retry
func (d *RetryDownloader) GetMilestones() ([]*Milestone, error) {
func (d *RetryDownloader) GetMilestones(ctx context.Context) ([]*Milestone, error) {
var (
milestones []*Milestone
err error
)
err = d.retry(func() error {
milestones, err = d.Downloader.GetMilestones()
milestones, err = d.Downloader.GetMilestones(ctx)
return err
})
@ -101,14 +95,14 @@ func (d *RetryDownloader) GetMilestones() ([]*Milestone, error) {
}
// GetReleases returns a repository's releases with retry
func (d *RetryDownloader) GetReleases() ([]*Release, error) {
func (d *RetryDownloader) GetReleases(ctx context.Context) ([]*Release, error) {
var (
releases []*Release
err error
)
err = d.retry(func() error {
releases, err = d.Downloader.GetReleases()
releases, err = d.Downloader.GetReleases(ctx)
return err
})
@ -116,14 +110,14 @@ func (d *RetryDownloader) GetReleases() ([]*Release, error) {
}
// GetLabels returns a repository's labels with retry
func (d *RetryDownloader) GetLabels() ([]*Label, error) {
func (d *RetryDownloader) GetLabels(ctx context.Context) ([]*Label, error) {
var (
labels []*Label
err error
)
err = d.retry(func() error {
labels, err = d.Downloader.GetLabels()
labels, err = d.Downloader.GetLabels(ctx)
return err
})
@ -131,7 +125,7 @@ func (d *RetryDownloader) GetLabels() ([]*Label, error) {
}
// GetIssues returns a repository's issues with retry
func (d *RetryDownloader) GetIssues(page, perPage int) ([]*Issue, bool, error) {
func (d *RetryDownloader) GetIssues(ctx context.Context, page, perPage int) ([]*Issue, bool, error) {
var (
issues []*Issue
isEnd bool
@ -139,7 +133,7 @@ func (d *RetryDownloader) GetIssues(page, perPage int) ([]*Issue, bool, error) {
)
err = d.retry(func() error {
issues, isEnd, err = d.Downloader.GetIssues(page, perPage)
issues, isEnd, err = d.Downloader.GetIssues(ctx, page, perPage)
return err
})
@ -147,7 +141,7 @@ func (d *RetryDownloader) GetIssues(page, perPage int) ([]*Issue, bool, error) {
}
// GetComments returns a repository's comments with retry
func (d *RetryDownloader) GetComments(commentable Commentable) ([]*Comment, bool, error) {
func (d *RetryDownloader) GetComments(ctx context.Context, commentable Commentable) ([]*Comment, bool, error) {
var (
comments []*Comment
isEnd bool
@ -155,7 +149,7 @@ func (d *RetryDownloader) GetComments(commentable Commentable) ([]*Comment, bool
)
err = d.retry(func() error {
comments, isEnd, err = d.Downloader.GetComments(commentable)
comments, isEnd, err = d.Downloader.GetComments(ctx, commentable)
return err
})
@ -163,7 +157,7 @@ func (d *RetryDownloader) GetComments(commentable Commentable) ([]*Comment, bool
}
// GetPullRequests returns a repository's pull requests with retry
func (d *RetryDownloader) GetPullRequests(page, perPage int) ([]*PullRequest, bool, error) {
func (d *RetryDownloader) GetPullRequests(ctx context.Context, page, perPage int) ([]*PullRequest, bool, error) {
var (
prs []*PullRequest
err error
@ -171,7 +165,7 @@ func (d *RetryDownloader) GetPullRequests(page, perPage int) ([]*PullRequest, bo
)
err = d.retry(func() error {
prs, isEnd, err = d.Downloader.GetPullRequests(page, perPage)
prs, isEnd, err = d.Downloader.GetPullRequests(ctx, page, perPage)
return err
})
@ -179,14 +173,13 @@ func (d *RetryDownloader) GetPullRequests(page, perPage int) ([]*PullRequest, bo
}
// GetReviews returns pull requests reviews
func (d *RetryDownloader) GetReviews(reviewable Reviewable) ([]*Review, error) {
func (d *RetryDownloader) GetReviews(ctx context.Context, reviewable Reviewable) ([]*Review, error) {
var (
reviews []*Review
err error
)
err = d.retry(func() error {
reviews, err = d.Downloader.GetReviews(reviewable)
reviews, err = d.Downloader.GetReviews(ctx, reviewable)
return err
})

View File

@ -4,20 +4,22 @@
package migration
import "context"
// Uploader uploads all the information of one repository
type Uploader interface {
MaxBatchInsertSize(tp string) int
CreateRepo(repo *Repository, opts MigrateOptions) error
CreateTopics(topic ...string) error
CreateMilestones(milestones ...*Milestone) error
CreateReleases(releases ...*Release) error
SyncTags() error
CreateLabels(labels ...*Label) error
CreateIssues(issues ...*Issue) error
CreateComments(comments ...*Comment) error
CreatePullRequests(prs ...*PullRequest) error
CreateReviews(reviews ...*Review) error
CreateRepo(ctx context.Context, repo *Repository, opts MigrateOptions) error
CreateTopics(ctx context.Context, topic ...string) error
CreateMilestones(ctx context.Context, milestones ...*Milestone) error
CreateReleases(ctx context.Context, releases ...*Release) error
SyncTags(ctx context.Context) error
CreateLabels(ctx context.Context, labels ...*Label) error
CreateIssues(ctx context.Context, issues ...*Issue) error
CreateComments(ctx context.Context, comments ...*Comment) error
CreatePullRequests(ctx context.Context, prs ...*PullRequest) error
CreateReviews(ctx context.Context, reviews ...*Review) error
Rollback() error
Finish() error
Finish(ctx context.Context) error
Close()
}

View File

@ -17,7 +17,7 @@ type GenerateTokenRequest struct {
func GenerateActionsRunnerToken(ctx context.Context, scope string) (*ResponseText, ResponseExtra) {
reqURL := setting.LocalURL + "api/internal/actions/generate_actions_runner_token"
req := newInternalRequest(ctx, reqURL, "POST", GenerateTokenRequest{
req := newInternalRequestAPI(ctx, reqURL, "POST", GenerateTokenRequest{
Scope: scope,
})

View File

@ -85,7 +85,7 @@ type HookProcReceiveRefResult struct {
// HookPreReceive check whether the provided commits are allowed
func HookPreReceive(ctx context.Context, ownerName, repoName string, opts HookOptions) ResponseExtra {
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/hook/pre-receive/%s/%s", url.PathEscape(ownerName), url.PathEscape(repoName))
req := newInternalRequest(ctx, reqURL, "POST", opts)
req := newInternalRequestAPI(ctx, reqURL, "POST", opts)
req.SetReadWriteTimeout(time.Duration(60+len(opts.OldCommitIDs)) * time.Second)
_, extra := requestJSONResp(req, &ResponseText{})
return extra
@ -94,7 +94,7 @@ func HookPreReceive(ctx context.Context, ownerName, repoName string, opts HookOp
// HookPostReceive updates services and users
func HookPostReceive(ctx context.Context, ownerName, repoName string, opts HookOptions) (*HookPostReceiveResult, ResponseExtra) {
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/hook/post-receive/%s/%s", url.PathEscape(ownerName), url.PathEscape(repoName))
req := newInternalRequest(ctx, reqURL, "POST", opts)
req := newInternalRequestAPI(ctx, reqURL, "POST", opts)
req.SetReadWriteTimeout(time.Duration(60+len(opts.OldCommitIDs)) * time.Second)
return requestJSONResp(req, &HookPostReceiveResult{})
}
@ -103,7 +103,7 @@ func HookPostReceive(ctx context.Context, ownerName, repoName string, opts HookO
func HookProcReceive(ctx context.Context, ownerName, repoName string, opts HookOptions) (*HookProcReceiveResult, ResponseExtra) {
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/hook/proc-receive/%s/%s", url.PathEscape(ownerName), url.PathEscape(repoName))
req := newInternalRequest(ctx, reqURL, "POST", opts)
req := newInternalRequestAPI(ctx, reqURL, "POST", opts)
req.SetReadWriteTimeout(time.Duration(60+len(opts.OldCommitIDs)) * time.Second)
return requestJSONResp(req, &HookProcReceiveResult{})
}
@ -115,7 +115,7 @@ func SetDefaultBranch(ctx context.Context, ownerName, repoName, branch string) R
url.PathEscape(repoName),
url.PathEscape(branch),
)
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
_, extra := requestJSONResp(req, &ResponseText{})
return extra
}
@ -123,7 +123,7 @@ func SetDefaultBranch(ctx context.Context, ownerName, repoName, branch string) R
// SSHLog sends ssh error log response
func SSHLog(ctx context.Context, isErr bool, msg string) error {
reqURL := setting.LocalURL + "api/internal/ssh/log"
req := newInternalRequest(ctx, reqURL, "POST", &SSHLogOption{IsError: isErr, Message: msg})
req := newInternalRequestAPI(ctx, reqURL, "POST", &SSHLogOption{IsError: isErr, Message: msg})
_, extra := requestJSONResp(req, &ResponseText{})
return extra.Error
}

View File

@ -34,7 +34,7 @@ func getClientIP() string {
return strings.Fields(sshConnEnv)[0]
}
func newInternalRequest(ctx context.Context, url, method string, body ...any) *httplib.Request {
func NewInternalRequest(ctx context.Context, url, method string) *httplib.Request {
if setting.InternalToken == "" {
log.Fatal(`The INTERNAL_TOKEN setting is missing from the configuration file: %q.
Ensure you are running in the correct environment or set the correct configuration file with -c.`, setting.CustomConf)
@ -82,13 +82,17 @@ Ensure you are running in the correct environment or set the correct configurati
},
})
}
return req
}
func newInternalRequestAPI(ctx context.Context, url, method string, body ...any) *httplib.Request {
req := NewInternalRequest(ctx, url, method)
if len(body) == 1 {
req.Header("Content-Type", "application/json")
jsonBytes, _ := json.Marshal(body[0])
req.Body(jsonBytes)
} else if len(body) > 1 {
log.Fatal("Too many arguments for newInternalRequest")
log.Fatal("Too many arguments for newInternalRequestAPI")
}
req.SetTimeout(10*time.Second, 60*time.Second)

View File

@ -14,7 +14,7 @@ import (
func UpdatePublicKeyInRepo(ctx context.Context, keyID, repoID int64) error {
// Ask for running deliver hook and test pull request tasks.
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/ssh/%d/update/%d", keyID, repoID)
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
_, extra := requestJSONResp(req, &ResponseText{})
return extra.Error
}
@ -24,7 +24,7 @@ func UpdatePublicKeyInRepo(ctx context.Context, keyID, repoID int64) error {
func AuthorizedPublicKeyByContent(ctx context.Context, content string) (*ResponseText, ResponseExtra) {
// Ask for running deliver hook and test pull request tasks.
reqURL := setting.LocalURL + "api/internal/ssh/authorized_keys"
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
req.Param("content", content)
return requestJSONResp(req, &ResponseText{})
}

View File

@ -23,7 +23,7 @@ type Email struct {
func SendEmail(ctx context.Context, subject, message string, to []string) (*ResponseText, ResponseExtra) {
reqURL := setting.LocalURL + "api/internal/mail/send"
req := newInternalRequest(ctx, reqURL, "POST", Email{
req := newInternalRequestAPI(ctx, reqURL, "POST", Email{
Subject: subject,
Message: message,
To: to,

View File

@ -18,21 +18,21 @@ import (
// Shutdown calls the internal shutdown function
func Shutdown(ctx context.Context) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/shutdown"
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
return requestJSONClientMsg(req, "Shutting down")
}
// Restart calls the internal restart function
func Restart(ctx context.Context) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/restart"
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
return requestJSONClientMsg(req, "Restarting")
}
// ReloadTemplates calls the internal reload-templates function
func ReloadTemplates(ctx context.Context) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/reload-templates"
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
return requestJSONClientMsg(req, "Reloaded")
}
@ -45,7 +45,7 @@ type FlushOptions struct {
// FlushQueues calls the internal flush-queues function
func FlushQueues(ctx context.Context, timeout time.Duration, nonBlocking bool) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/flush-queues"
req := newInternalRequest(ctx, reqURL, "POST", FlushOptions{Timeout: timeout, NonBlocking: nonBlocking})
req := newInternalRequestAPI(ctx, reqURL, "POST", FlushOptions{Timeout: timeout, NonBlocking: nonBlocking})
if timeout > 0 {
req.SetReadWriteTimeout(timeout + 10*time.Second)
}
@ -55,28 +55,28 @@ func FlushQueues(ctx context.Context, timeout time.Duration, nonBlocking bool) R
// PauseLogging pauses logging
func PauseLogging(ctx context.Context) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/pause-logging"
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
return requestJSONClientMsg(req, "Logging Paused")
}
// ResumeLogging resumes logging
func ResumeLogging(ctx context.Context) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/resume-logging"
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
return requestJSONClientMsg(req, "Logging Restarted")
}
// ReleaseReopenLogging releases and reopens logging files
func ReleaseReopenLogging(ctx context.Context) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/release-and-reopen-logging"
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
return requestJSONClientMsg(req, "Logging Restarted")
}
// SetLogSQL sets database logging
func SetLogSQL(ctx context.Context, on bool) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/set-log-sql?on=" + strconv.FormatBool(on)
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
return requestJSONClientMsg(req, "Log SQL setting set")
}
@ -91,7 +91,7 @@ type LoggerOptions struct {
// AddLogger adds a logger
func AddLogger(ctx context.Context, logger, writer, mode string, config map[string]any) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/add-logger"
req := newInternalRequest(ctx, reqURL, "POST", LoggerOptions{
req := newInternalRequestAPI(ctx, reqURL, "POST", LoggerOptions{
Logger: logger,
Writer: writer,
Mode: mode,
@ -103,7 +103,7 @@ func AddLogger(ctx context.Context, logger, writer, mode string, config map[stri
// RemoveLogger removes a logger
func RemoveLogger(ctx context.Context, logger, writer string) ResponseExtra {
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/manager/remove-logger/%s/%s", url.PathEscape(logger), url.PathEscape(writer))
req := newInternalRequest(ctx, reqURL, "POST")
req := newInternalRequestAPI(ctx, reqURL, "POST")
return requestJSONClientMsg(req, "Removed")
}
@ -111,7 +111,7 @@ func RemoveLogger(ctx context.Context, logger, writer string) ResponseExtra {
func Processes(ctx context.Context, out io.Writer, flat, noSystem, stacktraces, json bool, cancel string) ResponseExtra {
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/manager/processes?flat=%t&no-system=%t&stacktraces=%t&json=%t&cancel-pid=%s", flat, noSystem, stacktraces, json, url.QueryEscape(cancel))
req := newInternalRequest(ctx, reqURL, "GET")
req := newInternalRequestAPI(ctx, reqURL, "GET")
callback := func(resp *http.Response, extra *ResponseExtra) {
_, extra.Error = io.Copy(out, resp.Body)
}

View File

@ -24,7 +24,7 @@ type RestoreParams struct {
func RestoreRepo(ctx context.Context, repoDir, ownerName, repoName string, units []string, validation bool) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/restore_repo"
req := newInternalRequest(ctx, reqURL, "POST", RestoreParams{
req := newInternalRequestAPI(ctx, reqURL, "POST", RestoreParams{
RepoDir: repoDir,
OwnerName: ownerName,
RepoName: repoName,

View File

@ -23,7 +23,7 @@ type KeyAndOwner struct {
// ServNoCommand returns information about the provided key
func ServNoCommand(ctx context.Context, keyID int64) (*asymkey_model.PublicKey, *user_model.User, error) {
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/serv/none/%d", keyID)
req := newInternalRequest(ctx, reqURL, "GET")
req := newInternalRequestAPI(ctx, reqURL, "GET")
keyAndOwner, extra := requestJSONResp(req, &KeyAndOwner{})
if extra.HasError() {
return nil, nil, extra.Error
@ -58,6 +58,6 @@ func ServCommand(ctx context.Context, keyID int64, ownerName, repoName string, m
reqURL += fmt.Sprintf("&verb=%s", url.QueryEscape(verb))
}
}
req := newInternalRequest(ctx, reqURL, "GET")
req := newInternalRequestAPI(ctx, reqURL, "GET")
return requestJSONResp(req, &ServCommandResults{})
}

View File

@ -57,3 +57,12 @@ type EditOrgOption struct {
Visibility string `json:"visibility" binding:"In(,public,limited,private)"`
RepoAdminChangeTeamAccess *bool `json:"repo_admin_change_team_access"`
}
// RenameOrgOption options when renaming an organization
type RenameOrgOption struct {
// New username for this org. This name cannot be in use yet by any other user.
//
// required: true
// unique: true
NewName string `json:"new_name" binding:"Required"`
}

View File

@ -32,3 +32,36 @@ type ActionTaskResponse struct {
Entries []*ActionTask `json:"workflow_runs"`
TotalCount int64 `json:"total_count"`
}
// CreateActionWorkflowDispatch represents the payload for triggering a workflow dispatch event
// swagger:model
type CreateActionWorkflowDispatch struct {
// required: true
// example: refs/heads/main
Ref string `json:"ref" binding:"Required"`
// required: false
Inputs map[string]string `json:"inputs,omitempty"`
}
// ActionWorkflow represents a ActionWorkflow
type ActionWorkflow struct {
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
// swagger:strfmt date-time
CreatedAt time.Time `json:"created_at"`
// swagger:strfmt date-time
UpdatedAt time.Time `json:"updated_at"`
URL string `json:"url"`
HTMLURL string `json:"html_url"`
BadgeURL string `json:"badge_url"`
// swagger:strfmt date-time
DeletedAt time.Time `json:"deleted_at,omitempty"`
}
// ActionWorkflowResponse returns a ActionWorkflow
type ActionWorkflowResponse struct {
Workflows []*ActionWorkflow `json:"workflows"`
TotalCount int64 `json:"total_count"`
}

View File

@ -69,7 +69,7 @@ func NewFuncMap() template.FuncMap {
// time / number / format
"FileSize": base.FileSize,
"CountFmt": countFmt,
"Sec2Time": util.SecToHours,
"Sec2Hour": util.SecToHours,
"TimeEstimateString": timeEstimateString,

View File

@ -36,6 +36,22 @@ func (w SilentWrap) Unwrap() error {
return w.Err
}
type LocaleWrap struct {
err error
TrKey string
TrArgs []any
}
// Error returns the message
func (w LocaleWrap) Error() string {
return w.err.Error()
}
// Unwrap returns the underlying error
func (w LocaleWrap) Unwrap() error {
return w.err
}
// NewSilentWrapErrorf returns an error that formats as the given text but unwraps as the provided error
func NewSilentWrapErrorf(unwrap error, message string, args ...any) error {
if len(args) == 0 {
@ -63,3 +79,16 @@ func NewAlreadyExistErrorf(message string, args ...any) error {
func NewNotExistErrorf(message string, args ...any) error {
return NewSilentWrapErrorf(ErrNotExist, message, args...)
}
// ErrWrapLocale wraps an err with a translation key and arguments
func ErrWrapLocale(err error, trKey string, trArgs ...any) error {
return LocaleWrap{err: err, TrKey: trKey, TrArgs: trArgs}
}
func ErrAsLocale(err error) *LocaleWrap {
var e LocaleWrap
if errors.As(err, &e) {
return &e
}
return nil
}

View File

@ -11,16 +11,20 @@ import (
// SecToHours converts an amount of seconds to a human-readable hours string.
// This is stable for planning and managing timesheets.
// Here it only supports hours and minutes, because a work day could contain 6 or 7 or 8 hours.
// If the duration is less than 1 minute, it will be shown as seconds.
func SecToHours(durationVal any) string {
duration, _ := ToInt64(durationVal)
hours := duration / 3600
minutes := (duration / 60) % 60
seconds, _ := ToInt64(durationVal)
hours := seconds / 3600
minutes := (seconds / 60) % 60
formattedTime := ""
formattedTime = formatTime(hours, "hour", formattedTime)
formattedTime = formatTime(minutes, "minute", formattedTime)
// The formatTime() function always appends a space at the end. This will be trimmed
if formattedTime == "" && seconds > 0 {
formattedTime = formatTime(seconds, "second", "")
}
return strings.TrimRight(formattedTime, " ")
}

View File

@ -22,4 +22,7 @@ func TestSecToHours(t *testing.T) {
assert.Equal(t, "156 hours 30 minutes", SecToHours(6*day+12*hour+30*minute+18*second))
assert.Equal(t, "98 hours 16 minutes", SecToHours(4*day+2*hour+16*minute+58*second))
assert.Equal(t, "672 hours", SecToHours(4*7*day))
assert.Equal(t, "1 second", SecToHours(1))
assert.Equal(t, "2 seconds", SecToHours(2))
assert.Equal(t, "", SecToHours(nil)) // old behavior, empty means no output
}

View File

@ -78,7 +78,7 @@ func GetInclude(field reflect.StructField) string {
return getRuleBody(field, "Include(")
}
// Validate validate TODO:
// Validate validate
func Validate(errs binding.Errors, data map[string]any, f Form, l translation.Locale) binding.Errors {
if errs.Len() == 0 {
return errs

119
options/gitignore/Flutter Normal file
View File

@ -0,0 +1,119 @@
# Miscellaneous
*.class
*.lock
*.log
*.pyc
*.swp
.buildlog/
.history
# Flutter repo-specific
/bin/cache/
/bin/internal/bootstrap.bat
/bin/internal/bootstrap.sh
/bin/mingit/
/dev/benchmarks/mega_gallery/
/dev/bots/.recipe_deps
/dev/bots/android_tools/
/dev/devicelab/ABresults*.json
/dev/docs/doc/
/dev/docs/flutter.docs.zip
/dev/docs/lib/
/dev/docs/pubspec.yaml
/dev/integration_tests/**/xcuserdata
/dev/integration_tests/**/Pods
/packages/flutter/coverage/
version
analysis_benchmark.json
# packages file containing multi-root paths
.packages.generated
# Flutter/Dart/Pub related
**/doc/api/
.dart_tool/
.flutter-plugins
.flutter-plugins-dependencies
**/generated_plugin_registrant.dart
.packages
.pub-preload-cache/
.pub/
build/
flutter_*.png
linked_*.ds
unlinked.ds
unlinked_spec.ds
# Android related
**/android/**/gradle-wrapper.jar
.gradle/
**/android/captures/
**/android/gradlew
**/android/gradlew.bat
**/android/local.properties
**/android/**/GeneratedPluginRegistrant.java
**/android/key.properties
*.jks
# iOS/XCode related
**/ios/**/*.mode1v3
**/ios/**/*.mode2v3
**/ios/**/*.moved-aside
**/ios/**/*.pbxuser
**/ios/**/*.perspectivev3
**/ios/**/*sync/
**/ios/**/.sconsign.dblite
**/ios/**/.tags*
**/ios/**/.vagrant/
**/ios/**/DerivedData/
**/ios/**/Icon?
**/ios/**/Pods/
**/ios/**/.symlinks/
**/ios/**/profile
**/ios/**/xcuserdata
**/ios/.generated/
**/ios/Flutter/.last_build_id
**/ios/Flutter/App.framework
**/ios/Flutter/Flutter.framework
**/ios/Flutter/Flutter.podspec
**/ios/Flutter/Generated.xcconfig
**/ios/Flutter/ephemeral
**/ios/Flutter/app.flx
**/ios/Flutter/app.zip
**/ios/Flutter/flutter_assets/
**/ios/Flutter/flutter_export_environment.sh
**/ios/ServiceDefinitions.json
**/ios/Runner/GeneratedPluginRegistrant.*
# macOS
**/Flutter/ephemeral/
**/Pods/
**/macos/Flutter/GeneratedPluginRegistrant.swift
**/macos/Flutter/ephemeral
**/xcuserdata/
# Windows
**/windows/flutter/generated_plugin_registrant.cc
**/windows/flutter/generated_plugin_registrant.h
**/windows/flutter/generated_plugins.cmake
# Linux
**/linux/flutter/generated_plugin_registrant.cc
**/linux/flutter/generated_plugin_registrant.h
**/linux/flutter/generated_plugins.cmake
# Coverage
coverage/
# Symbols
app.*.symbols
# Exceptions to above rules.
!**/ios/**/default.mode1v3
!**/ios/**/default.mode2v3
!**/ios/**/default.pbxuser
!**/ios/**/default.perspectivev3
!/packages/flutter_tools/test/data/dart_dependencies_test/**/.packages
!/dev/ci/**/Gemfile.lock

View File

@ -1,3 +1,6 @@
# Ignore build outputs from performing a nix-build or `nix build` command
result
result-*
# Ignore automatically generated direnv output
.direnv

View File

@ -0,0 +1,16 @@
# Excludes Obsidian workspace cache and plugins. All notes and core obsidian
# configuration files are tracked by Git.
# The current application UI state (DOM layout, recently-opened files, etc.) is
# stored in these files (separate for desktop and mobile) so you can resume
# your session seamlessly after a restart. If you want to track UI state, use
# the Workspaces core plugin instead of relying on these files.
.obsidian/workspace.json
.obsidian/workspace-mobile.json
# Obsidian plugins are stored under .obsidian/plugins/$plugin_name. They
# contain metadata (manifest.json), application code (main.js), stylesheets
# (styles.css), and user-configuration data (data.json).
# We want to exclude all plugin-related files, so we can exclude everything
# under this directory.
.obsidian/plugins/**/*

View File

@ -0,0 +1,38 @@
# Excludes Obsidian workspace cache and plugin code, but retains plugin
# configuration. All notes and user-controlled configuration files are tracked
# by Git.
#
# !!! WARNING !!!
#
# Community plugins may store sensitive secrets in their data.json files. By
# including these files, those secrets may be tracked in your Git repository.
#
# To ignore configurations for specific plugins, add a line like this after the
# contents of this file (order is important):
# .obsidian/plugins/{{plugin_name}}/data.json
#
# Alternatively, ensure that you are treating your entire Git repository as
# sensitive data, since it may contain secrets, or may have contained them in
# past commits. Understand your threat profile, and make the decision
# appropriate for yourself. If in doubt, err on the side of not including
# plugin configuration. Use one of the alternative gitignore files instead:
# * NotesOnly.gitignore
# * NotesAndCoreConfiguration.gitignore
# The current application UI state (DOM layout, recently-opened files, etc.) is
# stored in these files (separate for desktop and mobile) so you can resume
# your session seamlessly after a restart. If you want to track UI state, use
# the Workspaces core plugin instead of relying on these files.
.obsidian/workspace.json
.obsidian/workspace-mobile.json
# Obsidian plugins are stored under .obsidian/plugins/$plugin_name. They
# contain metadata (manifest.json), application code (main.js), stylesheets
# (styles.css), and user-configuration data (data.json).
# We only want to track data.json, so we:
# 1. exclude everything under the plugins directory recursively,
# 2. unignore the plugin directories themselves, which then allows us to
# 3. unignore the data.json files
.obsidian/plugins/**/*
!.obsidian/plugins/*/
!.obsidian/plugins/*/data.json

View File

@ -0,0 +1,4 @@
# Excludes all Obsidian-related configuration. All notes are tracked by Git.
# All Obsidian configuration and runtime state is stored here
.obsidian/**/*

View File

@ -54,6 +54,7 @@ webauthn_reload=Znovu načíst
repository=Repozitář
organization=Organizace
mirror=Zrcadlo
issue_milestone=Milník
new_repo=Nový repozitář
new_migrate=Nová migrace
new_mirror=Nové zrcadlo
@ -1253,6 +1254,7 @@ labels=Štítky
org_labels_desc=Štítky na úrovni organizace, které mohou být použity se <strong>všemi repozitáři</strong> v rámci této organizace
org_labels_desc_manage=spravovat
milestone=Milník
milestones=Milníky
commits=Commity
commit=Commit
@ -2873,6 +2875,7 @@ view_as_role=Zobrazit jako: %s
view_as_public_hint=Prohlížíte README jako veřejný uživatel.
view_as_member_hint=Prohlížíte README jako člen této organizace.
[admin]
maintenance=Údržba
dashboard=Přehled

View File

@ -54,6 +54,7 @@ webauthn_reload=Neu laden
repository=Repository
organization=Organisation
mirror=Mirror
issue_milestone=Meilenstein
new_repo=Neues Repository
new_migrate=Neue Migration
new_mirror=Neuer Mirror
@ -1247,6 +1248,7 @@ labels=Label
org_labels_desc=Labels der Organisationsebene, die mit <strong>allen Repositories</strong> in dieser Organisation verwendet werden können
org_labels_desc_manage=verwalten
milestone=Meilenstein
milestones=Meilensteine
commits=Commits
commit=Commit
@ -2854,6 +2856,7 @@ teams.invite.by=Von %s eingeladen
teams.invite.description=Bitte klicke auf die folgende Schaltfläche, um dem Team beizutreten.
[admin]
maintenance=Wartung
dashboard=Dashboard

View File

@ -53,6 +53,7 @@ webauthn_reload=Ανανέωση
repository=Αποθετήριο
organization=Οργανισμός
mirror=Αντίγραφο
issue_milestone=Ορόσημο
new_repo=Νέο Αποθετήριο
new_migrate=Νέα Μεταφορά
new_mirror=Νέο Είδωλο
@ -1119,6 +1120,7 @@ labels=Σήματα
org_labels_desc=Τα σήματα στο επίπεδο οργανισμού, που μπορούν να χρησιμοποιηθούν με <strong>όλα τα αποθετήρια</strong> κάτω από αυτόν τον οργανισμό
org_labels_desc_manage=διαχείριση
milestone=Ορόσημο
milestones=Ορόσημα
commits=Υποβολές
commit=Υποβολή
@ -2590,6 +2592,7 @@ teams.invite.by=Προσκλήθηκε από %s
teams.invite.description=Παρακαλώ κάντε κλικ στον παρακάτω σύνδεσμο για συμμετοχή στην ομάδα.
[admin]
dashboard=Πίνακας Ελέγχου
identity_access=Ταυτότητα & Πρόσβαση

View File

@ -54,6 +54,7 @@ webauthn_reload = Reload
repository = Repository
organization = Organization
mirror = Mirror
issue_milestone = Milestone
new_repo = New Repository
new_migrate = New Migration
new_mirror = New Mirror
@ -384,6 +385,13 @@ show_only_public = Showing only public
issues.in_your_repos = In your repositories
guide_title = No Activity
guide_desc = You are currently not following any repositories or users, so there is no content to display. You can explore repositories or users of interest from the links below.
explore_repos = Explore repositories
explore_users = Explore users
empty_org = There are no organizations yet.
empty_repo = There are no repositories yet.
[explore]
repos = Repositories
users = Users
@ -1253,6 +1261,7 @@ labels = Labels
org_labels_desc = Organization level labels that can be used with <strong>all repositories</strong> under this organization
org_labels_desc_manage = manage
milestone = Milestone
milestones = Milestones
commits = Commits
commit = Commit
@ -2329,6 +2338,8 @@ settings.event_fork = Fork
settings.event_fork_desc = Repository forked.
settings.event_wiki = Wiki
settings.event_wiki_desc = Wiki page created, renamed, edited or deleted.
settings.event_statuses = Statuses
settings.event_statuses_desc = Commit Status updated from the API.
settings.event_release = Release
settings.event_release_desc = Release published, updated or deleted in a repository.
settings.event_push = Push
@ -2876,6 +2887,15 @@ view_as_role = View as: %s
view_as_public_hint = You are viewing the README as a public user.
view_as_member_hint = You are viewing the README as a member of this organization.
worktime = Worktime
worktime.date_range_start = Start date
worktime.date_range_end = End date
worktime.query = Query
worktime.time = Time
worktime.by_repositories = By repositories
worktime.by_milestones = By milestones
worktime.by_members = By members
[admin]
maintenance = Maintenance
dashboard = Dashboard

View File

@ -52,6 +52,7 @@ webauthn_reload=Recargar
repository=Repositorio
organization=Organización
mirror=Réplica
issue_milestone=Hito
new_repo=Nuevo repositorio
new_migrate=Nueva migración
new_mirror=Nueva réplica
@ -1109,6 +1110,7 @@ labels=Etiquetas
org_labels_desc=Etiquetas de nivel de la organización que pueden ser utilizadas con <strong>todos los repositorios</strong> bajo esta organización
org_labels_desc_manage=gestionar
milestone=Hito
milestones=Hitos
commits=Commits
commit=Commit
@ -2571,6 +2573,7 @@ teams.invite.by=Invitado por %s
teams.invite.description=Por favor, haga clic en el botón de abajo para unirse al equipo.
[admin]
dashboard=Panel de control
identity_access=Identidad y acceso

View File

@ -1993,6 +1993,7 @@ teams.all_repositories_write_permission_desc=این تیم دسترسی<strong>
teams.all_repositories_admin_permission_desc=این تیم دسترسی<strong> مدیر </strong> به <strong> مخازن همه</strong> را می بخشد: اعضا می توانند مخازن را بخواند، همکار و مخزن اضافه کنند.
[admin]
dashboard=پیشخوان
users=حساب کاربران

View File

@ -49,6 +49,7 @@ webauthn_reload=Päivitä
repository=Repo
organization=Organisaatio
mirror=Peili
issue_milestone=Merkkipaalu
new_repo=Uusi repo
new_migrate=Uusi migraatio
new_mirror=Uusi peilaus
@ -720,6 +721,7 @@ projects=Projektit
packages=Paketit
labels=Tunnisteet
milestone=Merkkipaalu
milestones=Merkkipaalut
commits=Commitit
commit=Commit
@ -1361,6 +1363,7 @@ teams.members.none=Ei jäseniä tässä tiimissä.
teams.all_repositories=Kaikki repot
[admin]
dashboard=Kojelauta
users=Käyttäjätilit

View File

@ -54,6 +54,7 @@ webauthn_reload=Recharger
repository=Dépôt
organization=Organisation
mirror=Miroir
issue_milestone=Jalon
new_repo=Nouveau dépôt
new_migrate=Nouvelle migration
new_mirror=Nouveau miroir
@ -1172,7 +1173,7 @@ migrate_items_releases=Publications
migrate_repo=Migrer le dépôt
migrate.clone_address=Migrer/Cloner depuis une URL
migrate.clone_address_desc=L'URL HTTP(S) ou Git "clone" d'un dépôt existant
migrate.github_token_desc=Vous pouvez mettre un ou plusieurs jetons séparés par des virgules ici pour rendre la migration plus rapide en raison de la limite de débit de l'API GitHub. ATTENTION : Abuser de cette fonctionnalité peut enfreindre la politique du fournisseur de services et entraîner un blocage de compte.
migrate.github_token_desc=Vous pouvez mettre un ou plusieurs jetons séparés par des virgules ici pour rendre la migration plus rapide et contourner la limite de débit de lAPI GitHub. ATTENTION : Abuser de cette fonctionnalité peut enfreindre la politique du fournisseur de service et entraîner un blocage de votre compte.
migrate.clone_local_path=ou un chemin serveur local
migrate.permission_denied=Vous n'êtes pas autorisé à importer des dépôts locaux.
migrate.permission_denied_blocked=Vous ne pouvez pas importer depuis des hôtes interdits, veuillez demander à l'administrateur de vérifier les paramètres ALLOWED_DOMAINS/ALLOW_LOCALNETWORKS/BLOCKED_DOMAINS.
@ -1253,6 +1254,7 @@ labels=Labels
org_labels_desc=Les labels d'une organisation peuvent être utilisés avec <strong>tous les dépôts</strong> de cette organisation.
org_labels_desc_manage=gérer
milestone=Jalon
milestones=Jalons
commits=Révisions
commit=Révision
@ -1345,6 +1347,8 @@ editor.new_branch_name_desc=Nouveau nom de la branche…
editor.cancel=Annuler
editor.filename_cannot_be_empty=Le nom de fichier ne peut être vide.
editor.filename_is_invalid=Le nom du fichier est invalide : "%s".
editor.commit_email=Courriel de la révision
editor.invalid_commit_email=Le courriel pour la révision nest pas valide.
editor.branch_does_not_exist=La branche "%s" n'existe pas dans ce dépôt.
editor.branch_already_exists=La branche "%s" existe déjà dans ce dépôt.
editor.directory_is_a_file=Le nom de dossier "%s" est déjà utilisé comme nom de fichier dans ce dépôt.
@ -1562,12 +1566,12 @@ issues.action_assignee=Assigné à
issues.action_assignee_no_select=Pas d'assignataire
issues.action_check=Cocher/Décocher
issues.action_check_all=Cocher/Décocher tous les éléments
issues.opened_by=créé %[1]s par <a href="%[2]s">%[3]s</a>
pulls.merged_by=par <a href="%[2]s">%[3]s</a> fusionné %[1]s.
pulls.merged_by_fake=par %[2]s fusionné %[1]s.
issues.closed_by=de <a href="%[2]s">%[3]s</a>, clôt %[1]s
issues.opened_by_fake=%[1]s ouvert par %[2]s
issues.closed_by_fake=de %[2]s, clôt %[1]s
issues.opened_by=ouvert(e) par <a href="%[2]s">%[3]s</a> %[1]s
pulls.merged_by=par <a href="%[2]s">%[3]s</a> a été fusionnée %[1]s
pulls.merged_by_fake=par %[2]s a été fusionnée %[1]s
issues.closed_by=par <a href="%[2]s">%[3]s</a> a été fermé(e) %[1]s
issues.opened_by_fake=ouvert(e) par %[2]s %[1]s
issues.closed_by_fake=par %[2]s a été fermé(e) %[1]s
issues.previous=Précédent
issues.next=Suivant
issues.open_title=Ouvert
@ -1735,8 +1739,8 @@ issues.dependency.added_dependency=`a créé une dépendance %s.`
issues.dependency.removed_dependency=`a supprimé une dépendance %s.`
issues.dependency.pr_closing_blockedby=La fermeture de cette demande dajout est bloquée par les tickets suivants
issues.dependency.issue_closing_blockedby=La fermeture de ce ticket est bloquée par les tickets suivants
issues.dependency.issue_close_blocks=Cette demande d'ajout empêche la clôture des tickets suivants
issues.dependency.pr_close_blocks=Cette demande d'ajout empêche la clôture des tickets suivants
issues.dependency.issue_close_blocks=Ce ticket empêche la clôture des tickets suivants
issues.dependency.pr_close_blocks=Cette demande dajout empêche la clôture des tickets suivants
issues.dependency.issue_close_blocked=Vous devez fermer tous les tickets qui bloquent ce ticket avant de pouvoir le fermer.
issues.dependency.issue_batch_close_blocked=Impossible de fermer tous les tickets que vous avez choisis, car le ticket #%d a toujours des dépendances ouvertes.
issues.dependency.pr_close_blocked=Vous devez fermer tous les tickets qui bloquent cette demande d'ajout avant de pouvoir la fusionner.
@ -2873,6 +2877,7 @@ view_as_role=Voir en tant que %s
view_as_public_hint=Vous visualisez le README en tant quutilisateur public.
view_as_member_hint=Vous visualisez le README en tant que membre de cette organisation.
[admin]
maintenance=Maintenance
dashboard=Tableau de bord

View File

@ -54,6 +54,7 @@ webauthn_reload=Athlódáil
repository=Stór
organization=Eagraíocht
mirror=Scáthán
issue_milestone=Cloch Mhíle
new_repo=Stór Nua
new_migrate=Imirce Nua
new_mirror=Scáthán Nua
@ -1253,6 +1254,7 @@ labels=Lipéid
org_labels_desc=Lipéid ar leibhéal eagraíochta is féidir a úsáid le <strong>gach stóras</strong> faoin eagraíocht seo
org_labels_desc_manage=bainistigh
milestone=Cloch Mhíle
milestones=Clocha míle
commits=Tiomáintí
commit=Tiomantas
@ -1345,6 +1347,8 @@ editor.new_branch_name_desc=Ainm brainse nua…
editor.cancel=Cealaigh
editor.filename_cannot_be_empty=Ní féidir ainm an chomhaid a bheith folamh.
editor.filename_is_invalid=Tá ainm an chomhaid neamhbhailí: "%s".
editor.commit_email=Tiomantas ríomhphost
editor.invalid_commit_email=Tá an ríomhphost don ghealltanas neamhbhailí.
editor.branch_does_not_exist=Níl brainse "%s" ann sa stóras seo.
editor.branch_already_exists=Tá brainse "%s" ann cheana féin sa stóras seo.
editor.directory_is_a_file=Úsáidtear ainm eolaire "%s" cheana féin mar ainm comhaid sa stóras seo.
@ -2326,6 +2330,8 @@ settings.event_fork=Forc
settings.event_fork_desc=Forcadh stóras.
settings.event_wiki=Vicí
settings.event_wiki_desc=Leathanach Vicí cruthaithe, athainmnithe, curtha in eagar nó scriosta.
settings.event_statuses=Stádais
settings.event_statuses_desc=Nuashonraíodh Stádas Commit ón API.
settings.event_release=Scaoileadh
settings.event_release_desc=Scaoileadh foilsithe, nuashonraithe nó scriosta i stóras.
settings.event_push=Brúigh
@ -2873,6 +2879,15 @@ view_as_role=Féach mar: %s
view_as_public_hint=Tá tú ag féachaint ar an README mar úsáideoir poiblí.
view_as_member_hint=Tá tú ag féachaint ar an README mar bhall den eagraíocht seo.
worktime=Am oibre
worktime.date_range_start=Dáta tosaithe
worktime.date_range_end=Dáta deiridh
worktime.query=Ceist
worktime.time=Am
worktime.by_repositories=De réir stórtha
worktime.by_milestones=De réir clocha míle
worktime.by_members=Ag baill
[admin]
maintenance=Cothabháil
dashboard=Deais

View File

@ -1229,6 +1229,7 @@ teams.specific_repositories=Meghatározott tárolók
teams.all_repositories=Minden tároló
[admin]
dashboard=Műszerfal
users=Felhasználói fiókok

View File

@ -1084,6 +1084,7 @@ teams.delete_team_success=Tim sudah di hapus.
teams.repositories=Tim repositori
[admin]
dashboard=Dasbor
organizations=Organisasi

View File

@ -49,6 +49,7 @@ webauthn_reload=Endurhlaða
repository=Hugbúnaðarsafn
organization=Stofnun
mirror=Speglun
issue_milestone=Tímamót
new_repo=Nýtt Hugbúnaðarsafn
new_migrate=Nýr Flutningur
new_mirror=Ný Speglun
@ -652,6 +653,7 @@ projects=Verkefni
packages=Pakkar
labels=Skýringar
milestone=Tímamót
milestones=Tímamót
commits=Framlög
commit=Framlag
@ -1137,6 +1139,7 @@ teams.update_settings=Uppfæra Stillingar
teams.all_repositories=Öll hugbúnaðarsöfn
[admin]
repositories=Hugbúnaðarsöfn
config=Stilling

View File

@ -50,6 +50,7 @@ webauthn_reload=Ricarica
repository=Repository
organization=Organizzazione
mirror=Mirror
issue_milestone=Traguardo
new_repo=Nuovo Repository
new_migrate=Nuova Migrazione
new_mirror=Nuovo Mirror
@ -942,6 +943,7 @@ labels=Etichette
org_labels_desc=Etichette a livello di organizzazione che possono essere utilizzate con <strong>tutti i repository</strong> sotto questa organizzazione
org_labels_desc_manage=gestisci
milestone=Traguardo
milestones=Traguardi
commits=Commit
commit=Commit
@ -2154,6 +2156,7 @@ teams.all_repositories_write_permission_desc=Questo team concede <strong>permess
teams.all_repositories_admin_permission_desc=Questo team concede a <strong>Amministratore</strong> l'accesso a <strong>tutte le repository</strong>: i membri possono leggere, pushare e aggiungere collaboratori alle repository.
[admin]
dashboard=Pannello di Controllo
users=Account utenti

View File

@ -54,6 +54,7 @@ webauthn_reload=リロード
repository=リポジトリ
organization=組織
mirror=ミラー
issue_milestone=マイルストーン
new_repo=新しいリポジトリ
new_migrate=新しい移行
new_mirror=新しいミラー
@ -1253,6 +1254,7 @@ labels=ラベル
org_labels_desc=組織で定義されているラベル (組織の<strong>すべてのリポジトリ</strong>で使用可能なもの)
org_labels_desc_manage=編集
milestone=マイルストーン
milestones=マイルストーン
commits=コミット
commit=コミット
@ -2873,6 +2875,7 @@ view_as_role=表示: %s
view_as_public_hint=READMEを公開ユーザーとして見ています。
view_as_member_hint=READMEをこの組織のメンバーとして見ています。
[admin]
maintenance=メンテナンス
dashboard=ダッシュボード

View File

@ -1191,6 +1191,7 @@ teams.add_duplicate_users=사용자가 이미 팀 멤버입니다.
teams.members.none=이 팀에 멤버가 없습니다.
[admin]
dashboard=대시보드
users=사용자 계정

View File

@ -54,6 +54,7 @@ webauthn_reload=Pārlādēt
repository=Repozitorijs
organization=Organizācija
mirror=Spogulis
issue_milestone=Atskaites punktus
new_repo=Jauns repozitorijs
new_migrate=Jauna migrācija
new_mirror=Jauns spogulis
@ -1125,6 +1126,7 @@ labels=Iezīmes
org_labels_desc=Organizācijas līmeņa iezīmes var tikt izmantotas <strong>visiem repozitorijiem</strong> šajā organizācijā
org_labels_desc_manage=pārvaldīt
milestone=Atskaites punktus
milestones=Atskaites punkti
commits=Revīzijas
commit=Revīzija
@ -2593,6 +2595,7 @@ teams.invite.by=Uzaicināja %s
teams.invite.description=Nospiediet pogu zemāk, lai pievienotos komandai.
[admin]
dashboard=Infopanelis
self_check=Pašpārbaude

View File

@ -50,6 +50,7 @@ webauthn_reload=Vernieuwen
repository=Repository
organization=Organisatie
mirror=Kopie
issue_milestone=Mijlpaal
new_repo=Nieuwe repository
new_migrate=Nieuwe migratie
new_mirror=Nieuwe kopie
@ -940,6 +941,7 @@ labels=Labels
org_labels_desc=Organisatielabel dat gebruikt kan worden met <strong>alle repositories</strong> onder deze organisatie
org_labels_desc_manage=beheren
milestone=Mijlpaal
milestones=Mijlpalen
commits=Commits
commit=Commit
@ -2055,6 +2057,7 @@ teams.all_repositories_helper=Team heeft toegang tot alle repositories. Door dit
teams.all_repositories_read_permission_desc=Dit team heeft <strong>Lees</strong> toegang tot <strong>alle repositories</strong>: leden kunnen repositories bekijken en klonen.
[admin]
dashboard=Overzicht
users=Gebruikersacount

View File

@ -1934,6 +1934,7 @@ teams.all_repositories_write_permission_desc=Ten zespół nadaje uprawnienie <st
teams.all_repositories_admin_permission_desc=Ten zespół nadaje uprawnienia <strong>Administratora</strong> do <strong>wszystkich repozytoriów</strong>: jego członkowie mogą odczytywać, przesyłać oraz dodawać innych współtwórców do repozytoriów.
[admin]
dashboard=Pulpit
users=Konta użytkownika

View File

@ -52,6 +52,7 @@ webauthn_reload=Recarregar
repository=Repositório
organization=Organização
mirror=Espelhamento
issue_milestone=Marco
new_repo=Novo repositório
new_migrate=Nova migração
new_mirror=Novo espelhamento
@ -1119,6 +1120,7 @@ labels=Etiquetas
org_labels_desc=Rótulos de nível de organização que podem ser usados em <strong>todos os repositórios</strong> sob esta organização
org_labels_desc_manage=gerenciar
milestone=Marco
milestones=Marcos
commits=Commits
commit=Commit
@ -2551,6 +2553,7 @@ teams.invite.by=Convidado por %s
teams.invite.description=Por favor, clique no botão abaixo para se juntar à equipe.
[admin]
dashboard=Painel
identity_access=Identidade e acesso

View File

@ -54,6 +54,7 @@ webauthn_reload=Recarregar
repository=Repositório
organization=Organização
mirror=Réplica
issue_milestone=Etapa
new_repo=Novo repositório
new_migrate=Nova migração
new_mirror=Nova réplica
@ -1253,6 +1254,7 @@ labels=Rótulos
org_labels_desc=Rótulos ao nível da organização que podem ser usados em <strong>todos os repositórios</strong> desta organização
org_labels_desc_manage=gerir
milestone=Etapa
milestones=Etapas
commits=Cometimentos
commit=Cometimento
@ -1345,6 +1347,8 @@ editor.new_branch_name_desc=Nome do novo ramo…
editor.cancel=Cancelar
editor.filename_cannot_be_empty=O nome do ficheiro não pode estar em branco.
editor.filename_is_invalid=O nome do ficheiro é inválido: "%s".
editor.commit_email=Email do cometimento
editor.invalid_commit_email=O email do comentimento é inválido.
editor.branch_does_not_exist=O ramo "%s" não existe neste repositório.
editor.branch_already_exists=O ramo "%s" já existe neste repositório.
editor.directory_is_a_file=O nome da pasta "%s" já é usado como um nome de ficheiro neste repositório.
@ -2326,6 +2330,8 @@ settings.event_fork=Derivar
settings.event_fork_desc=Feita a derivação do repositório.
settings.event_wiki=Wiki
settings.event_wiki_desc=Página do wiki criada, renomeada, editada ou eliminada.
settings.event_statuses=Estados
settings.event_statuses_desc=Estado do cometimento modificado através da API.
settings.event_release=Lançamento
settings.event_release_desc=Lançamento publicado, modificado ou eliminado num repositório.
settings.event_push=Enviar
@ -2873,6 +2879,15 @@ view_as_role=Ver como: %s
view_as_public_hint=Está a ver o README como um utilizador público.
view_as_member_hint=Está a ver o README como um membro desta organização.
worktime=Tempo de trabalho
worktime.date_range_start=Data do início
worktime.date_range_end=Data do fim
worktime.query=Consulta
worktime.time=Tempo
worktime.by_repositories=Por repositórios
worktime.by_milestones=Por etapas
worktime.by_members=Por membros
[admin]
maintenance=Manutenção
dashboard=Painel de controlo

View File

@ -52,6 +52,7 @@ webauthn_reload=Обновить
repository=Репозиторий
organization=Организация
mirror=Зеркало
issue_milestone=Этап
new_repo=Новый репозиторий
new_migrate=Новая миграция
new_mirror=Новое зеркало
@ -1100,6 +1101,7 @@ labels=Метки
org_labels_desc=Метки уровня организации, которые можно использовать с <strong>всеми репозиториями</strong> в этой организации
org_labels_desc_manage=управлять
milestone=Этап
milestones=Этапы
commits=коммитов
commit=коммит
@ -2540,6 +2542,7 @@ teams.invite.by=Приглашен(а) %s
teams.invite.description=Нажмите на кнопку ниже, чтобы присоединиться к команде.
[admin]
dashboard=Панель
identity_access=Идентификация и доступ

View File

@ -1955,6 +1955,7 @@ teams.all_repositories_write_permission_desc=මෙම කණ්ඩායම ප
teams.all_repositories_admin_permission_desc=මෙම කණ්ඩායම ප්රදානය කරයි <strong>පරිපාලක</strong> වෙත ප්රවේශය <strong>සියලු ගබඩාවන්ට</strong>: සාමාජිකයින්ට කියවීමට, තල්ලු කිරීමට සහ ගබඩාවන්ට සහයෝගීකයින් එකතු කිරීමට.
[admin]
dashboard=උපකරණ පුවරුව
users=පරිශීලක ගිණුම්

View File

@ -53,6 +53,7 @@ webauthn_reload=Znovu načítať
repository=Repozitár
organization=Organizácia
mirror=Zrkadlo
issue_milestone=Míľnik
new_repo=Nový repozitár
new_migrate=Nová migrácia
new_mirror=Nové zrkadlo
@ -967,6 +968,7 @@ labels=Štítky
org_labels_desc=Štítky na úrovni organizácie, ktoré možno použiť so <strong>všetkými repozitármi</strong> v rámci tejto organizácie
org_labels_desc_manage=spravovať
milestone=Míľnik
milestones=Míľniky
commits=Commitov
release=Vydanie
@ -1236,6 +1238,7 @@ teams.all_repositories_write_permission_desc=Tomuto tímu je pridelený prístup
teams.all_repositories_admin_permission_desc=Tomuto tímu je pridelený <strong>Admin</strong> prístup ku <strong>všetkým repozitárom</strong>: členovia môžu prezerať, nahrávať do repozitárov a pridávať do nich spolupracovníkov.
[admin]
repositories=Repozitáre
hooks=Webhooky

View File

@ -1592,6 +1592,7 @@ teams.all_repositories_write_permission_desc=Detta team beviljar <strong>Skriv</
teams.all_repositories_admin_permission_desc=Detta team beviljar <strong>Admin</strong>-rättigheter till <strong>alla utvecklingskataloger</strong>: medlemmar kan läsa från, pusha till och lägga till kollaboratörer för utvecklingskatalogerna.
[admin]
dashboard=Instrumentpanel
users=Användarkonto

View File

@ -54,6 +54,7 @@ webauthn_reload=Yeniden yükle
repository=Depo
organization=Organizasyon
mirror=Yansı
issue_milestone=Dönüm noktası
new_repo=Yeni Depo
new_migrate=Yeni Göç
new_mirror=Yeni Yansı
@ -78,7 +79,7 @@ forks=Çatallar
activities=Etkinlikler
pull_requests=Değişiklik İstekleri
issues=Konular
milestones=Kilometre Taşları
milestones=Dönüm noktaları
ok=Tamam
cancel=İptal
@ -1128,7 +1129,7 @@ migrate_options_lfs_endpoint.description.local=Yerel bir sunucu yolu da destekle
migrate_options_lfs_endpoint.placeholder=Boş bırakılırsa, uç nokta klon URL'sinden türetilecektir
migrate_items=Göç Öğeleri
migrate_items_wiki=Wiki
migrate_items_milestones=Kilometre Taşları
migrate_items_milestones=Dönüm noktaları
migrate_items_labels=Etiketler
migrate_items_issues=Konular
migrate_items_pullrequests=Değişiklik İstekleri
@ -1212,6 +1213,7 @@ labels=Etiketler
org_labels_desc=Bu organizasyon altında <strong>tüm depolarla</strong> kullanılabilen organizasyon düzeyinde etiketler
org_labels_desc_manage=yönet
milestone=Dönüm noktası
milestones=Kilometre Taşları
commits=İşleme
commit=İşle
@ -2752,6 +2754,7 @@ teams.invite.by=%s tarafından davet edildi
teams.invite.description=Takıma katılmak için aşağıdaki düğmeye tıklayın.
[admin]
maintenance=Bakım
dashboard=Pano

View File

@ -37,6 +37,7 @@ webauthn_reload=Оновити
repository=Репозиторій
organization=Організація
mirror=Дзеркало
issue_milestone=Етап
new_repo=Новий репозиторій
new_migrate=Нова міграція
new_mirror=Нове дзеркало
@ -889,6 +890,7 @@ labels=Мітки
org_labels_desc=Мітки рівня організації можуть використовуватися <strong>в усіх репозиторіях</strong> цієї організації
org_labels_desc_manage=керувати
milestone=Етап
milestones=Етап
commits=Коміти
commit=Коміт
@ -2003,6 +2005,7 @@ teams.all_repositories_write_permission_desc=Ця команда надає до
teams.all_repositories_admin_permission_desc=Ця команда надає дозвіл <strong>Адміністрування</strong> для <strong>всіх репозиторіїв</strong>: учасники можуть переглядати, виконувати push та додавати співробітників.
[admin]
dashboard=Панель управління
users=Облікові записи користувачів

View File

@ -54,6 +54,7 @@ webauthn_reload=重新加载
repository=仓库
organization=组织
mirror=镜像
issue_milestone=里程碑
new_repo=创建仓库
new_migrate=迁移外部仓库
new_mirror=创建新的镜像
@ -1247,6 +1248,7 @@ labels=标签
org_labels_desc=组织级别的标签,可以被本组织下的 <strong>所有仓库</strong> 使用
org_labels_desc_manage=管理
milestone=里程碑
milestones=里程碑
commits=提交
commit=提交
@ -2854,6 +2856,7 @@ teams.invite.by=邀请人 %s
teams.invite.description=请点击下面的按钮加入团队。
[admin]
maintenance=维护
dashboard=管理面板

View File

@ -685,6 +685,7 @@ teams.delete_team_success=該團隊已被刪除。
teams.repositories=團隊儲存庫
[admin]
dashboard=控制面版
organizations=組織管理

View File

@ -54,6 +54,7 @@ webauthn_reload=重新載入
repository=儲存庫
organization=組織
mirror=鏡像
issue_milestone=里程碑
new_repo=新增儲存庫
new_migrate=遷移外部儲存庫
new_mirror=新鏡像
@ -1241,6 +1242,7 @@ labels=標籤
org_labels_desc=組織層級標籤可用於此組織下的<strong>所有存儲庫</strong>。
org_labels_desc_manage=管理
milestone=里程碑
milestones=里程碑
commits=提交歷史
commit=提交
@ -2845,6 +2847,7 @@ teams.invite.by=邀請人 %s
teams.invite.description=請點擊下方按鈕加入團隊。
[admin]
maintenance=維護
dashboard=資訊主頁

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 KiB

1
public/assets/img/svg/gitea-feishu.svg generated Normal file
View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="7 7 26 26" class="svg gitea-feishu" width="16" height="16" aria-hidden="true"><path fill="#00d6b9" d="m21.069 20.504.063-.06.125-.122.085-.084.256-.254.348-.344.299-.296.281-.278.293-.289.269-.266.374-.37.218-.206.419-.359.404-.306.598-.386.617-.33.606-.265.348-.127.177-.058a14.8 14.8 0 0 0-2.793-5.603 1.34 1.34 0 0 0-1.047-.502H12.221a.201.201 0 0 0-.119.364 31.5 31.5 0 0 1 8.943 10.162l.025-.023z"/><path fill="#3370ff" d="M16.791 30c5.57 0 10.423-3.074 12.955-7.618q.133-.239.258-.484a6 6 0 0 1-.425.699 6 6 0 0 1-.17.23 6 6 0 0 1-.225.274q-.092.105-.188.206a6 6 0 0 1-.407.384 6 6 0 0 1-.24.195 7 7 0 0 1-.292.21q-.094.065-.191.122c-.097.057-.134.081-.204.119q-.21.116-.428.215a6 6 0 0 1-.385.157 6 6 0 0 1-.43.138 6 6 0 0 1-.661.143 6 6 0 0 1-.491.055 6.125 6.125 0 0 1-1.543-.085 7 7 0 0 1-.38-.079l-.2-.051-.555-.155-.275-.081-.41-.125-.334-.107-.317-.104-.215-.073-.26-.091-.186-.066-.367-.134-.212-.081-.284-.11-.299-.119-.193-.079-.24-.1-.185-.078-.192-.084-.166-.073-.152-.067-.153-.07-.159-.073-.2-.093-.208-.099-.222-.108-.189-.093a31.2 31.2 0 0 1-8.822-6.583.202.202 0 0 0-.349.138l.005 9.52v.773c0 .448.222.87.595 1.118A14.75 14.75 0 0 0 16.791 30"/><path fill="#133c92" d="m29.746 22.382.051-.093zm.231-.435.014-.025.007-.012z"/><path fill="#133c9a" d="M33.151 16.582a8.45 8.45 0 0 0-3.744-.869 8.5 8.5 0 0 0-2.303.317l-.252.075-.177.058-.348.127-.606.265-.617.33-.598.386-.404.306-.419.359-.218.206-.374.37-.269.266-.293.289-.281.278-.299.296-.348.344-.256.254-.085.084-.125.122-.063.06-.095.09-.105.099a15 15 0 0 1-3.072 2.175l.2.093.159.073.153.07.152.067.166.073.192.084.185.078.24.1.193.079.299.119.284.11.212.081.367.134.186.066.26.09.215.073.317.104.334.107.41.125.275.081.555.155.2.051.379.079.433.062.585.037.525-.014.491-.055a6 6 0 0 0 .66-.143l.43-.138.385-.158.427-.215.204-.119.191-.122.292-.21.24-.195.407-.384.188-.206.225-.274.17-.23a6 6 0 0 0 .421-.693l.144-.288 1.305-2.599-.003.006a8.1 8.1 0 0 1 1.697-2.439z"/></svg>

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -477,26 +477,16 @@ func RenameUser(ctx *context.APIContext) {
return
}
oldName := ctx.ContextUser.Name
newName := web.GetForm(ctx).(*api.RenameUserOption).NewName
// Check if user name has been changed
// Check if username has been changed
if err := user_service.RenameUser(ctx, ctx.ContextUser, newName); err != nil {
switch {
case user_model.IsErrUserAlreadyExist(err):
ctx.Error(http.StatusUnprocessableEntity, "", ctx.Tr("form.username_been_taken"))
case db.IsErrNameReserved(err):
ctx.Error(http.StatusUnprocessableEntity, "", ctx.Tr("user.form.name_reserved", newName))
case db.IsErrNamePatternNotAllowed(err):
ctx.Error(http.StatusUnprocessableEntity, "", ctx.Tr("user.form.name_pattern_not_allowed", newName))
case db.IsErrNameCharsNotAllowed(err):
ctx.Error(http.StatusUnprocessableEntity, "", ctx.Tr("user.form.name_chars_not_allowed", newName))
default:
if user_model.IsErrUserAlreadyExist(err) || db.IsErrNameReserved(err) || db.IsErrNamePatternNotAllowed(err) || db.IsErrNameCharsNotAllowed(err) {
ctx.Error(http.StatusUnprocessableEntity, "", err)
} else {
ctx.ServerError("ChangeUserName", err)
}
return
}
log.Trace("User name changed: %s -> %s", oldName, newName)
ctx.Status(http.StatusNoContent)
}

View File

@ -268,12 +268,12 @@ func checkTokenPublicOnly() func(ctx *context.APIContext) {
return
}
case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryUser):
if ctx.ContextUser != nil && ctx.ContextUser.IsUser() && ctx.ContextUser.Visibility != api.VisibleTypePublic {
if ctx.ContextUser != nil && ctx.ContextUser.IsTokenAccessAllowed() && ctx.ContextUser.Visibility != api.VisibleTypePublic {
ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public users")
return
}
case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryActivityPub):
if ctx.ContextUser != nil && ctx.ContextUser.IsUser() && ctx.ContextUser.Visibility != api.VisibleTypePublic {
if ctx.ContextUser != nil && ctx.ContextUser.IsTokenAccessAllowed() && ctx.ContextUser.Visibility != api.VisibleTypePublic {
ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public activitypub")
return
}
@ -580,6 +580,16 @@ func reqWebhooksEnabled() func(ctx *context.APIContext) {
}
}
// reqStarsEnabled requires Starring to be enabled in the config.
func reqStarsEnabled() func(ctx *context.APIContext) {
return func(ctx *context.APIContext) {
if setting.Repository.DisableStars {
ctx.Error(http.StatusForbidden, "", "stars disabled by administrator")
return
}
}
}
func orgAssignment(args ...bool) func(ctx *context.APIContext) {
var (
assignOrg bool
@ -995,7 +1005,7 @@ func Routes() *web.Router {
m.Get("/{target}", user.CheckFollowing)
})
m.Get("/starred", user.GetStarredRepos)
m.Get("/starred", reqStarsEnabled(), user.GetStarredRepos)
m.Get("/subscriptions", user.GetWatchedRepos)
}, context.UserAssignmentAPI(), checkTokenPublicOnly())
@ -1086,7 +1096,7 @@ func Routes() *web.Router {
m.Put("", user.Star)
m.Delete("", user.Unstar)
}, repoAssignment(), checkTokenPublicOnly())
}, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryRepository))
}, reqStarsEnabled(), tokenRequiresScopes(auth_model.AccessTokenScopeCategoryRepository))
m.Get("/times", repo.ListMyTrackedTimes)
m.Get("/stopwatches", repo.GetStopwatches)
m.Get("/subscriptions", user.GetMyWatchedRepos)
@ -1145,11 +1155,17 @@ func Routes() *web.Router {
m.Post("/accept", repo.AcceptTransfer)
m.Post("/reject", repo.RejectTransfer)
}, reqToken())
addActionsRoutes(
m,
reqOwner(),
repo.NewAction(),
)
addActionsRoutes(m, reqOwner(), repo.NewAction()) // it adds the routes for secrets/variables and runner management
m.Group("/actions/workflows", func() {
m.Get("", repo.ActionsListRepositoryWorkflows)
m.Get("/{workflow_id}", repo.ActionsGetWorkflow)
m.Put("/{workflow_id}/disable", reqRepoWriter(unit.TypeActions), repo.ActionsDisableWorkflow)
m.Put("/{workflow_id}/enable", reqRepoWriter(unit.TypeActions), repo.ActionsEnableWorkflow)
m.Post("/{workflow_id}/dispatches", reqRepoWriter(unit.TypeActions), bind(api.CreateActionWorkflowDispatch{}), repo.ActionsDispatchWorkflow)
}, context.ReferencesGitRepo(), reqToken(), reqRepoReader(unit.TypeActions))
m.Group("/hooks/git", func() {
m.Combo("").Get(repo.ListGitHooks)
m.Group("/{id}", func() {
@ -1248,7 +1264,7 @@ func Routes() *web.Router {
m.Post("/markup", reqToken(), bind(api.MarkupOption{}), misc.Markup)
m.Post("/markdown", reqToken(), bind(api.MarkdownOption{}), misc.Markdown)
m.Post("/markdown/raw", reqToken(), misc.MarkdownRaw)
m.Get("/stargazers", repo.ListStargazers)
m.Get("/stargazers", reqStarsEnabled(), repo.ListStargazers)
m.Get("/subscribers", repo.ListSubscribers)
m.Group("/subscription", func() {
m.Get("", user.IsWatching)
@ -1530,6 +1546,7 @@ func Routes() *web.Router {
m.Combo("").Get(org.Get).
Patch(reqToken(), reqOrgOwnership(), bind(api.EditOrgOption{}), org.Edit).
Delete(reqToken(), reqOrgOwnership(), org.Delete)
m.Post("/rename", reqToken(), reqOrgOwnership(), bind(api.RenameOrgOption{}), org.Rename)
m.Combo("/repos").Get(user.ListOrgRepos).
Post(reqToken(), bind(api.CreateRepoOption{}), repo.CreateOrgRepo)
m.Group("/members", func() {

View File

@ -450,7 +450,11 @@ func (Action) UpdateVariable(ctx *context.APIContext) {
if opt.Name == "" {
opt.Name = ctx.PathParam("variablename")
}
if _, err := actions_service.UpdateVariable(ctx, v.ID, opt.Name, opt.Value); err != nil {
v.Name = opt.Name
v.Data = opt.Value
if _, err := actions_service.UpdateVariableNameData(ctx, v); err != nil {
if errors.Is(err, util.ErrInvalidArgument) {
ctx.Error(http.StatusBadRequest, "UpdateVariable", err)
} else {

View File

@ -315,6 +315,44 @@ func Get(ctx *context.APIContext) {
ctx.JSON(http.StatusOK, org)
}
func Rename(ctx *context.APIContext) {
// swagger:operation POST /orgs/{org}/rename organization renameOrg
// ---
// summary: Rename an organization
// produces:
// - application/json
// parameters:
// - name: org
// in: path
// description: existing org name
// type: string
// required: true
// - name: body
// in: body
// required: true
// schema:
// "$ref": "#/definitions/RenameOrgOption"
// responses:
// "204":
// "$ref": "#/responses/empty"
// "403":
// "$ref": "#/responses/forbidden"
// "422":
// "$ref": "#/responses/validationError"
form := web.GetForm(ctx).(*api.RenameOrgOption)
orgUser := ctx.Org.Organization.AsUser()
if err := user_service.RenameUser(ctx, orgUser, form.NewName); err != nil {
if user_model.IsErrUserAlreadyExist(err) || db.IsErrNameReserved(err) || db.IsErrNamePatternNotAllowed(err) || db.IsErrNameCharsNotAllowed(err) {
ctx.Error(http.StatusUnprocessableEntity, "RenameOrg", err)
} else {
ctx.ServerError("RenameOrg", err)
}
return
}
ctx.Status(http.StatusNoContent)
}
// Edit change an organization's information
func Edit(ctx *context.APIContext) {
// swagger:operation PATCH /orgs/{org} organization orgEdit

View File

@ -6,6 +6,7 @@ package repo
import (
"errors"
"net/http"
"strings"
actions_model "code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/models/db"
@ -19,6 +20,8 @@ import (
"code.gitea.io/gitea/services/context"
"code.gitea.io/gitea/services/convert"
secret_service "code.gitea.io/gitea/services/secrets"
"github.com/nektos/act/pkg/model"
)
// ListActionsSecrets list an repo's actions secrets
@ -414,7 +417,11 @@ func (Action) UpdateVariable(ctx *context.APIContext) {
if opt.Name == "" {
opt.Name = ctx.PathParam("variablename")
}
if _, err := actions_service.UpdateVariable(ctx, v.ID, opt.Name, opt.Value); err != nil {
v.Name = opt.Name
v.Data = opt.Value
if _, err := actions_service.UpdateVariableNameData(ctx, v); err != nil {
if errors.Is(err, util.ErrInvalidArgument) {
ctx.Error(http.StatusBadRequest, "UpdateVariable", err)
} else {
@ -581,3 +588,270 @@ func ListActionTasks(ctx *context.APIContext) {
ctx.JSON(http.StatusOK, &res)
}
func ActionsListRepositoryWorkflows(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/actions/workflows repository ActionsListRepositoryWorkflows
// ---
// summary: List repository workflows
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// responses:
// "200":
// "$ref": "#/responses/ActionWorkflowList"
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
// "404":
// "$ref": "#/responses/notFound"
// "422":
// "$ref": "#/responses/validationError"
// "500":
// "$ref": "#/responses/error"
workflows, err := actions_service.ListActionWorkflows(ctx)
if err != nil {
ctx.Error(http.StatusInternalServerError, "ListActionWorkflows", err)
return
}
ctx.JSON(http.StatusOK, &api.ActionWorkflowResponse{Workflows: workflows, TotalCount: int64(len(workflows))})
}
func ActionsGetWorkflow(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/actions/workflows/{workflow_id} repository ActionsGetWorkflow
// ---
// summary: Get a workflow
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: workflow_id
// in: path
// description: id of the workflow
// type: string
// required: true
// responses:
// "200":
// "$ref": "#/responses/ActionWorkflow"
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
// "404":
// "$ref": "#/responses/notFound"
// "422":
// "$ref": "#/responses/validationError"
// "500":
// "$ref": "#/responses/error"
workflowID := ctx.PathParam("workflow_id")
workflow, err := actions_service.GetActionWorkflow(ctx, workflowID)
if err != nil {
if errors.Is(err, util.ErrNotExist) {
ctx.Error(http.StatusNotFound, "GetActionWorkflow", err)
} else {
ctx.Error(http.StatusInternalServerError, "GetActionWorkflow", err)
}
return
}
ctx.JSON(http.StatusOK, workflow)
}
func ActionsDisableWorkflow(ctx *context.APIContext) {
// swagger:operation PUT /repos/{owner}/{repo}/actions/workflows/{workflow_id}/disable repository ActionsDisableWorkflow
// ---
// summary: Disable a workflow
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: workflow_id
// in: path
// description: id of the workflow
// type: string
// required: true
// responses:
// "204":
// description: No Content
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
// "404":
// "$ref": "#/responses/notFound"
// "422":
// "$ref": "#/responses/validationError"
workflowID := ctx.PathParam("workflow_id")
err := actions_service.EnableOrDisableWorkflow(ctx, workflowID, false)
if err != nil {
if errors.Is(err, util.ErrNotExist) {
ctx.Error(http.StatusNotFound, "DisableActionWorkflow", err)
} else {
ctx.Error(http.StatusInternalServerError, "DisableActionWorkflow", err)
}
return
}
ctx.Status(http.StatusNoContent)
}
func ActionsDispatchWorkflow(ctx *context.APIContext) {
// swagger:operation POST /repos/{owner}/{repo}/actions/workflows/{workflow_id}/dispatches repository ActionsDispatchWorkflow
// ---
// summary: Create a workflow dispatch event
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: workflow_id
// in: path
// description: id of the workflow
// type: string
// required: true
// - name: body
// in: body
// schema:
// "$ref": "#/definitions/CreateActionWorkflowDispatch"
// responses:
// "204":
// description: No Content
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
// "404":
// "$ref": "#/responses/notFound"
// "422":
// "$ref": "#/responses/validationError"
workflowID := ctx.PathParam("workflow_id")
opt := web.GetForm(ctx).(*api.CreateActionWorkflowDispatch)
if opt.Ref == "" {
ctx.Error(http.StatusUnprocessableEntity, "MissingWorkflowParameter", util.NewInvalidArgumentErrorf("ref is required parameter"))
return
}
err := actions_service.DispatchActionWorkflow(ctx, ctx.Doer, ctx.Repo.Repository, ctx.Repo.GitRepo, workflowID, opt.Ref, func(workflowDispatch *model.WorkflowDispatch, inputs map[string]any) error {
if strings.Contains(ctx.Req.Header.Get("Content-Type"), "form-urlencoded") {
// The chi framework's "Binding" doesn't support to bind the form map values into a map[string]string
// So we have to manually read the `inputs[key]` from the form
for name, config := range workflowDispatch.Inputs {
value := ctx.FormString("inputs["+name+"]", config.Default)
inputs[name] = value
}
} else {
for name, config := range workflowDispatch.Inputs {
value, ok := opt.Inputs[name]
if ok {
inputs[name] = value
} else {
inputs[name] = config.Default
}
}
}
return nil
})
if err != nil {
if errors.Is(err, util.ErrNotExist) {
ctx.Error(http.StatusNotFound, "DispatchActionWorkflow", err)
} else if errors.Is(err, util.ErrPermissionDenied) {
ctx.Error(http.StatusForbidden, "DispatchActionWorkflow", err)
} else {
ctx.Error(http.StatusInternalServerError, "DispatchActionWorkflow", err)
}
return
}
ctx.Status(http.StatusNoContent)
}
func ActionsEnableWorkflow(ctx *context.APIContext) {
// swagger:operation PUT /repos/{owner}/{repo}/actions/workflows/{workflow_id}/enable repository ActionsEnableWorkflow
// ---
// summary: Enable a workflow
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: workflow_id
// in: path
// description: id of the workflow
// type: string
// required: true
// responses:
// "204":
// description: No Content
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
// "404":
// "$ref": "#/responses/notFound"
// "409":
// "$ref": "#/responses/conflict"
// "422":
// "$ref": "#/responses/validationError"
workflowID := ctx.PathParam("workflow_id")
err := actions_service.EnableOrDisableWorkflow(ctx, workflowID, true)
if err != nil {
if errors.Is(err, util.ErrNotExist) {
ctx.Error(http.StatusNotFound, "EnableActionWorkflow", err)
} else {
ctx.Error(http.StatusInternalServerError, "EnableActionWorkflow", err)
}
return
}
ctx.Status(http.StatusNoContent)
}

View File

@ -44,6 +44,8 @@ func ListStargazers(ctx *context.APIContext) {
// "$ref": "#/responses/UserList"
// "404":
// "$ref": "#/responses/notFound"
// "403":
// "$ref": "#/responses/forbidden"
stargazers, err := repo_model.GetStargazers(ctx, ctx.Repo.Repository, utils.GetListOptions(ctx))
if err != nil {

View File

@ -32,3 +32,17 @@ type swaggerResponseVariableList struct {
// in:body
Body []api.ActionVariable `json:"body"`
}
// ActionWorkflow
// swagger:response ActionWorkflow
type swaggerResponseActionWorkflow struct {
// in:body
Body api.ActionWorkflow `json:"body"`
}
// ActionWorkflowList
// swagger:response ActionWorkflowList
type swaggerResponseActionWorkflowList struct {
// in:body
Body []api.ActionWorkflow `json:"body"`
}

View File

@ -208,6 +208,12 @@ type swaggerParameterBodies struct {
// in:body
CreateVariableOption api.CreateVariableOption
// in:body
RenameOrgOption api.RenameOrgOption
// in:body
CreateActionWorkflowDispatch api.CreateActionWorkflowDispatch
// in:body
UpdateVariableOption api.UpdateVariableOption
}

View File

@ -212,7 +212,11 @@ func UpdateVariable(ctx *context.APIContext) {
if opt.Name == "" {
opt.Name = ctx.PathParam("variablename")
}
if _, err := actions_service.UpdateVariable(ctx, v.ID, opt.Name, opt.Value); err != nil {
v.Name = opt.Name
v.Data = opt.Value
if _, err := actions_service.UpdateVariableNameData(ctx, v); err != nil {
if errors.Is(err, util.ErrInvalidArgument) {
ctx.Error(http.StatusBadRequest, "UpdateVariable", err)
} else {

View File

@ -66,6 +66,8 @@ func GetStarredRepos(ctx *context.APIContext) {
// "$ref": "#/responses/RepositoryList"
// "404":
// "$ref": "#/responses/notFound"
// "403":
// "$ref": "#/responses/forbidden"
private := ctx.ContextUser.ID == ctx.Doer.ID
repos, err := getStarredRepos(ctx, ctx.ContextUser, private)
@ -97,6 +99,8 @@ func GetMyStarredRepos(ctx *context.APIContext) {
// responses:
// "200":
// "$ref": "#/responses/RepositoryList"
// "403":
// "$ref": "#/responses/forbidden"
repos, err := getStarredRepos(ctx, ctx.Doer, true)
if err != nil {
@ -128,6 +132,8 @@ func IsStarring(ctx *context.APIContext) {
// "$ref": "#/responses/empty"
// "404":
// "$ref": "#/responses/notFound"
// "403":
// "$ref": "#/responses/forbidden"
if repo_model.IsStaring(ctx, ctx.Doer.ID, ctx.Repo.Repository.ID) {
ctx.Status(http.StatusNoContent)
@ -193,6 +199,8 @@ func Unstar(ctx *context.APIContext) {
// "$ref": "#/responses/empty"
// "404":
// "$ref": "#/responses/notFound"
// "403":
// "$ref": "#/responses/forbidden"
err := repo_model.StarRepo(ctx, ctx.Doer, ctx.Repo.Repository, false)
if err != nil {

View File

@ -34,7 +34,7 @@ func Home(ctx *context.Context) {
}
ctx.SetPathParam("org", uname)
context.HandleOrgAssignment(ctx)
context.OrgAssignment(context.OrgAssignmentOptions{})(ctx)
if ctx.Written() {
return
}

View File

@ -0,0 +1,74 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package org
import (
"net/http"
"time"
"code.gitea.io/gitea/models/organization"
"code.gitea.io/gitea/modules/templates"
"code.gitea.io/gitea/services/context"
)
const tplByRepos templates.TplName = "org/worktime"
// parseOrgTimes contains functionality that is required in all these functions,
// like parsing the date from the request, setting default dates, etc.
func parseOrgTimes(ctx *context.Context) (unixFrom, unixTo int64) {
rangeFrom := ctx.FormString("from")
rangeTo := ctx.FormString("to")
if rangeFrom == "" {
rangeFrom = time.Now().Format("2006-01") + "-01" // defaults to start of current month
}
if rangeTo == "" {
rangeTo = time.Now().Format("2006-01-02") // defaults to today
}
ctx.Data["RangeFrom"] = rangeFrom
ctx.Data["RangeTo"] = rangeTo
timeFrom, err := time.Parse("2006-01-02", rangeFrom)
if err != nil {
ctx.ServerError("time.Parse", err)
}
timeTo, err := time.Parse("2006-01-02", rangeTo)
if err != nil {
ctx.ServerError("time.Parse", err)
}
unixFrom = timeFrom.Unix()
unixTo = timeTo.Add(1440*time.Minute - 1*time.Second).Unix() // humans expect that we include the ending day too
return unixFrom, unixTo
}
func Worktime(ctx *context.Context) {
ctx.Data["PageIsOrgTimes"] = true
unixFrom, unixTo := parseOrgTimes(ctx)
if ctx.Written() {
return
}
worktimeBy := ctx.FormString("by")
ctx.Data["WorktimeBy"] = worktimeBy
var worktimeSumResult any
var err error
if worktimeBy == "milestones" {
worktimeSumResult, err = organization.GetWorktimeByMilestones(ctx.Org.Organization, unixFrom, unixTo)
ctx.Data["WorktimeByMilestones"] = true
} else if worktimeBy == "members" {
worktimeSumResult, err = organization.GetWorktimeByMembers(ctx.Org.Organization, unixFrom, unixTo)
ctx.Data["WorktimeByMembers"] = true
} else /* by repos */ {
worktimeSumResult, err = organization.GetWorktimeByRepos(ctx.Org.Organization, unixFrom, unixTo)
ctx.Data["WorktimeByRepos"] = true
}
if err != nil {
ctx.ServerError("GetWorktime", err)
return
}
ctx.Data["WorktimeSumResult"] = worktimeSumResult
ctx.HTML(http.StatusOK, tplByRepos)
}

View File

@ -20,8 +20,6 @@ import (
actions_model "code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/models/db"
git_model "code.gitea.io/gitea/models/git"
"code.gitea.io/gitea/models/perm"
access_model "code.gitea.io/gitea/models/perm/access"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
"code.gitea.io/gitea/modules/actions"
@ -30,16 +28,13 @@ import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/templates"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/web"
actions_service "code.gitea.io/gitea/services/actions"
context_module "code.gitea.io/gitea/services/context"
"code.gitea.io/gitea/services/convert"
"github.com/nektos/act/pkg/jobparser"
"github.com/nektos/act/pkg/model"
"xorm.io/builder"
)
@ -281,86 +276,100 @@ func ViewPost(ctx *context_module.Context) {
resp.State.CurrentJob.Steps = make([]*ViewJobStep, 0) // marshal to '[]' instead fo 'null' in json
resp.Logs.StepsLog = make([]*ViewStepLog, 0) // marshal to '[]' instead fo 'null' in json
if task != nil {
steps := actions.FullSteps(task)
for _, v := range steps {
resp.State.CurrentJob.Steps = append(resp.State.CurrentJob.Steps, &ViewJobStep{
Summary: v.Name,
Duration: v.Duration().String(),
Status: v.Status.String(),
})
}
for _, cursor := range req.LogCursors {
if !cursor.Expanded {
continue
}
step := steps[cursor.Step]
// if task log is expired, return a consistent log line
if task.LogExpired {
if cursor.Cursor == 0 {
resp.Logs.StepsLog = append(resp.Logs.StepsLog, &ViewStepLog{
Step: cursor.Step,
Cursor: 1,
Lines: []*ViewStepLogLine{
{
Index: 1,
Message: ctx.Locale.TrString("actions.runs.expire_log_message"),
// Timestamp doesn't mean anything when the log is expired.
// Set it to the task's updated time since it's probably the time when the log has expired.
Timestamp: float64(task.Updated.AsTime().UnixNano()) / float64(time.Second),
},
},
Started: int64(step.Started),
})
}
continue
}
logLines := make([]*ViewStepLogLine, 0) // marshal to '[]' instead fo 'null' in json
index := step.LogIndex + cursor.Cursor
validCursor := cursor.Cursor >= 0 &&
// !(cursor.Cursor < step.LogLength) when the frontend tries to fetch next line before it's ready.
// So return the same cursor and empty lines to let the frontend retry.
cursor.Cursor < step.LogLength &&
// !(index < task.LogIndexes[index]) when task data is older than step data.
// It can be fixed by making sure write/read tasks and steps in the same transaction,
// but it's easier to just treat it as fetching the next line before it's ready.
index < int64(len(task.LogIndexes))
if validCursor {
length := step.LogLength - cursor.Cursor
offset := task.LogIndexes[index]
logRows, err := actions.ReadLogs(ctx, task.LogInStorage, task.LogFilename, offset, length)
if err != nil {
ctx.ServerError("actions.ReadLogs", err)
return
}
for i, row := range logRows {
logLines = append(logLines, &ViewStepLogLine{
Index: cursor.Cursor + int64(i) + 1, // start at 1
Message: row.Content,
Timestamp: float64(row.Time.AsTime().UnixNano()) / float64(time.Second),
})
}
}
resp.Logs.StepsLog = append(resp.Logs.StepsLog, &ViewStepLog{
Step: cursor.Step,
Cursor: cursor.Cursor + int64(len(logLines)),
Lines: logLines,
Started: int64(step.Started),
})
steps, logs, err := convertToViewModel(ctx, req.LogCursors, task)
if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
}
resp.State.CurrentJob.Steps = append(resp.State.CurrentJob.Steps, steps...)
resp.Logs.StepsLog = append(resp.Logs.StepsLog, logs...)
}
ctx.JSON(http.StatusOK, resp)
}
func convertToViewModel(ctx *context_module.Context, cursors []LogCursor, task *actions_model.ActionTask) ([]*ViewJobStep, []*ViewStepLog, error) {
var viewJobs []*ViewJobStep
var logs []*ViewStepLog
steps := actions.FullSteps(task)
for _, v := range steps {
viewJobs = append(viewJobs, &ViewJobStep{
Summary: v.Name,
Duration: v.Duration().String(),
Status: v.Status.String(),
})
}
for _, cursor := range cursors {
if !cursor.Expanded {
continue
}
step := steps[cursor.Step]
// if task log is expired, return a consistent log line
if task.LogExpired {
if cursor.Cursor == 0 {
logs = append(logs, &ViewStepLog{
Step: cursor.Step,
Cursor: 1,
Lines: []*ViewStepLogLine{
{
Index: 1,
Message: ctx.Locale.TrString("actions.runs.expire_log_message"),
// Timestamp doesn't mean anything when the log is expired.
// Set it to the task's updated time since it's probably the time when the log has expired.
Timestamp: float64(task.Updated.AsTime().UnixNano()) / float64(time.Second),
},
},
Started: int64(step.Started),
})
}
continue
}
logLines := make([]*ViewStepLogLine, 0) // marshal to '[]' instead fo 'null' in json
index := step.LogIndex + cursor.Cursor
validCursor := cursor.Cursor >= 0 &&
// !(cursor.Cursor < step.LogLength) when the frontend tries to fetch next line before it's ready.
// So return the same cursor and empty lines to let the frontend retry.
cursor.Cursor < step.LogLength &&
// !(index < task.LogIndexes[index]) when task data is older than step data.
// It can be fixed by making sure write/read tasks and steps in the same transaction,
// but it's easier to just treat it as fetching the next line before it's ready.
index < int64(len(task.LogIndexes))
if validCursor {
length := step.LogLength - cursor.Cursor
offset := task.LogIndexes[index]
logRows, err := actions.ReadLogs(ctx, task.LogInStorage, task.LogFilename, offset, length)
if err != nil {
return nil, nil, fmt.Errorf("actions.ReadLogs: %w", err)
}
for i, row := range logRows {
logLines = append(logLines, &ViewStepLogLine{
Index: cursor.Cursor + int64(i) + 1, // start at 1
Message: row.Content,
Timestamp: float64(row.Time.AsTime().UnixNano()) / float64(time.Second),
})
}
}
logs = append(logs, &ViewStepLog{
Step: cursor.Step,
Cursor: cursor.Cursor + int64(len(logLines)),
Lines: logLines,
Started: int64(step.Started),
})
}
return viewJobs, logs, nil
}
// Rerun will rerun jobs in the given run
// If jobIndexStr is a blank string, it means rerun all jobs
func Rerun(ctx *context_module.Context) {
@ -614,11 +623,6 @@ func getRunJobs(ctx *context_module.Context, runIndex, jobIndex int64) (*actions
}
func ArtifactsDeleteView(ctx *context_module.Context) {
if !ctx.Repo.CanWrite(unit.TypeActions) {
ctx.Error(http.StatusForbidden, "no permission")
return
}
runIndex := getRunIndex(ctx)
artifactName := ctx.PathParam("artifact_name")
@ -783,142 +787,28 @@ func Run(ctx *context_module.Context) {
ctx.ServerError("ref", nil)
return
}
// can not rerun job when workflow is disabled
cfgUnit := ctx.Repo.Repository.MustGetUnit(ctx, unit.TypeActions)
cfg := cfgUnit.ActionsConfig()
if cfg.IsWorkflowDisabled(workflowID) {
ctx.Flash.Error(ctx.Tr("actions.workflow.disabled"))
ctx.Redirect(redirectURL)
return
}
// get target commit of run from specified ref
refName := git.RefName(ref)
var runTargetCommit *git.Commit
var err error
if refName.IsTag() {
runTargetCommit, err = ctx.Repo.GitRepo.GetTagCommit(refName.TagName())
} else if refName.IsBranch() {
runTargetCommit, err = ctx.Repo.GitRepo.GetBranchCommit(refName.BranchName())
} else {
ctx.Flash.Error(ctx.Tr("form.git_ref_name_error", ref))
ctx.Redirect(redirectURL)
return
}
if err != nil {
ctx.Flash.Error(ctx.Tr("form.target_ref_not_exist", ref))
ctx.Redirect(redirectURL)
return
}
// get workflow entry from runTargetCommit
entries, err := actions.ListWorkflows(runTargetCommit)
if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
}
// find workflow from commit
var workflows []*jobparser.SingleWorkflow
for _, entry := range entries {
if entry.Name() == workflowID {
content, err := actions.GetContentFromEntry(entry)
if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error())
return
}
workflows, err = jobparser.Parse(content)
if err != nil {
ctx.ServerError("workflow", err)
return
}
break
}
}
if len(workflows) == 0 {
ctx.Flash.Error(ctx.Tr("actions.workflow.not_found", workflowID))
ctx.Redirect(redirectURL)
return
}
// get inputs from post
workflow := &model.Workflow{
RawOn: workflows[0].RawOn,
}
inputs := make(map[string]any)
if workflowDispatch := workflow.WorkflowDispatchConfig(); workflowDispatch != nil {
err := actions_service.DispatchActionWorkflow(ctx, ctx.Doer, ctx.Repo.Repository, ctx.Repo.GitRepo, workflowID, ref, func(workflowDispatch *model.WorkflowDispatch, inputs map[string]any) error {
for name, config := range workflowDispatch.Inputs {
value := ctx.Req.PostFormValue(name)
if config.Type == "boolean" {
// https://www.w3.org/TR/html401/interact/forms.html
// https://stackoverflow.com/questions/11424037/do-checkbox-inputs-only-post-data-if-theyre-checked
// Checkboxes (and radio buttons) are on/off switches that may be toggled by the user.
// A switch is "on" when the control element's checked attribute is set.
// When a form is submitted, only "on" checkbox controls can become successful.
inputs[name] = strconv.FormatBool(value == "on")
inputs[name] = strconv.FormatBool(ctx.FormBool(name))
} else if value != "" {
inputs[name] = value
} else {
inputs[name] = config.Default
}
}
}
// ctx.Req.PostForm -> WorkflowDispatchPayload.Inputs -> ActionRun.EventPayload -> runner: ghc.Event
// https://docs.github.com/en/actions/learn-github-actions/contexts#github-context
// https://docs.github.com/en/webhooks/webhook-events-and-payloads#workflow_dispatch
workflowDispatchPayload := &api.WorkflowDispatchPayload{
Workflow: workflowID,
Ref: ref,
Repository: convert.ToRepo(ctx, ctx.Repo.Repository, access_model.Permission{AccessMode: perm.AccessModeNone}),
Inputs: inputs,
Sender: convert.ToUserWithAccessMode(ctx, ctx.Doer, perm.AccessModeNone),
}
var eventPayload []byte
if eventPayload, err = workflowDispatchPayload.JSONPayload(); err != nil {
ctx.ServerError("JSONPayload", err)
return
}
run := &actions_model.ActionRun{
Title: strings.SplitN(runTargetCommit.CommitMessage, "\n", 2)[0],
RepoID: ctx.Repo.Repository.ID,
OwnerID: ctx.Repo.Repository.OwnerID,
WorkflowID: workflowID,
TriggerUserID: ctx.Doer.ID,
Ref: ref,
CommitSHA: runTargetCommit.ID.String(),
IsForkPullRequest: false,
Event: "workflow_dispatch",
TriggerEvent: "workflow_dispatch",
EventPayload: string(eventPayload),
Status: actions_model.StatusWaiting,
}
// cancel running jobs of the same workflow
if err := actions_model.CancelPreviousJobs(
ctx,
run.RepoID,
run.Ref,
run.WorkflowID,
run.Event,
); err != nil {
log.Error("CancelRunningJobs: %v", err)
}
// Insert the action run and its associated jobs into the database
if err := actions_model.InsertRun(ctx, run, workflows); err != nil {
ctx.ServerError("workflow", err)
return
}
alljobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID})
return nil
})
if err != nil {
log.Error("FindRunJobs: %v", err)
if errLocale := util.ErrAsLocale(err); errLocale != nil {
ctx.Flash.Error(ctx.Tr(errLocale.TrKey, errLocale.TrArgs...))
ctx.Redirect(redirectURL)
} else {
ctx.ServerError("DispatchActionWorkflow", err)
}
return
}
actions_service.CreateCommitStatus(ctx, alljobs...)
ctx.Flash.Success(ctx.Tr("actions.workflow.run_success", workflowID))
ctx.Redirect(redirectURL)

View File

@ -22,7 +22,6 @@ import (
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/charset"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/gitgraph"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
@ -32,6 +31,7 @@ import (
"code.gitea.io/gitea/services/context"
"code.gitea.io/gitea/services/gitdiff"
repo_service "code.gitea.io/gitea/services/repository"
"code.gitea.io/gitea/services/repository/gitgraph"
)
const (

View File

@ -6,13 +6,10 @@ package repo
import (
"net/http"
"code.gitea.io/gitea/models/db"
issues_model "code.gitea.io/gitea/models/issues"
"code.gitea.io/gitea/models/unit"
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
"code.gitea.io/gitea/modules/optional"
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/services/context"
issue_service "code.gitea.io/gitea/services/issue"
)
// IssueSuggestions returns a list of issue suggestions
@ -29,54 +26,11 @@ func IssueSuggestions(ctx *context.Context) {
isPull = optional.Some(false)
}
searchOpt := &issue_indexer.SearchOptions{
Paginator: &db.ListOptions{
Page: 0,
PageSize: 5,
},
Keyword: keyword,
RepoIDs: []int64{ctx.Repo.Repository.ID},
IsPull: isPull,
IsClosed: nil,
SortBy: issue_indexer.SortByUpdatedDesc,
}
ids, _, err := issue_indexer.SearchIssues(ctx, searchOpt)
suggestions, err := issue_service.GetSuggestion(ctx, ctx.Repo.Repository, isPull, keyword)
if err != nil {
ctx.ServerError("SearchIssues", err)
ctx.ServerError("GetSuggestion", err)
return
}
issues, err := issues_model.GetIssuesByIDs(ctx, ids, true)
if err != nil {
ctx.ServerError("FindIssuesByIDs", err)
return
}
suggestions := make([]*structs.Issue, 0, len(issues))
for _, issue := range issues {
suggestion := &structs.Issue{
ID: issue.ID,
Index: issue.Index,
Title: issue.Title,
State: issue.State(),
}
if issue.IsPull {
if err := issue.LoadPullRequest(ctx); err != nil {
ctx.ServerError("LoadPullRequest", err)
return
}
if issue.PullRequest != nil {
suggestion.PullRequest = &structs.PullRequestMeta{
HasMerged: issue.PullRequest.HasMerged,
IsWorkInProgress: issue.PullRequest.IsWorkInProgress(ctx),
}
}
}
suggestions = append(suggestions, suggestion)
}
ctx.JSON(http.StatusOK, suggestions)
}

View File

@ -4,7 +4,6 @@
package repo
import (
stdCtx "context"
"fmt"
"math/big"
"net/http"
@ -40,86 +39,80 @@ import (
)
// roleDescriptor returns the role descriptor for a comment in/with the given repo, poster and issue
func roleDescriptor(ctx stdCtx.Context, repo *repo_model.Repository, poster *user_model.User, permsCache map[int64]access_model.Permission, issue *issues_model.Issue, hasOriginalAuthor bool) (issues_model.RoleDescriptor, error) {
roleDescriptor := issues_model.RoleDescriptor{}
func roleDescriptor(ctx *context.Context, repo *repo_model.Repository, poster *user_model.User, permsCache map[int64]access_model.Permission, issue *issues_model.Issue, hasOriginalAuthor bool) (roleDesc issues_model.RoleDescriptor, err error) {
if hasOriginalAuthor {
return roleDescriptor, nil
// the poster is a migrated user, so no need to detect the role
return roleDesc, nil
}
var perm access_model.Permission
var err error
if permsCache != nil {
var ok bool
perm, ok = permsCache[poster.ID]
if !ok {
perm, err = access_model.GetUserRepoPermission(ctx, repo, poster)
if err != nil {
return roleDescriptor, err
}
}
permsCache[poster.ID] = perm
} else {
if poster.IsGhost() || !poster.IsIndividual() {
return roleDesc, nil
}
roleDesc.IsPoster = issue.IsPoster(poster.ID) // check whether the comment's poster is the issue's poster
// Guess the role of the poster in the repo by permission
perm, hasPermCache := permsCache[poster.ID]
if !hasPermCache {
perm, err = access_model.GetUserRepoPermission(ctx, repo, poster)
if err != nil {
return roleDescriptor, err
return roleDesc, err
}
}
// If the poster is the actual poster of the issue, enable Poster role.
roleDescriptor.IsPoster = issue.IsPoster(poster.ID)
if permsCache != nil {
permsCache[poster.ID] = perm
}
// Check if the poster is owner of the repo.
if perm.IsOwner() {
// If the poster isn't an admin, enable the owner role.
// If the poster isn't a site admin, then is must be the repo's owner
if !poster.IsAdmin {
roleDescriptor.RoleInRepo = issues_model.RoleRepoOwner
return roleDescriptor, nil
roleDesc.RoleInRepo = issues_model.RoleRepoOwner
return roleDesc, nil
}
// Otherwise check if poster is the real repo admin.
ok, err := access_model.IsUserRealRepoAdmin(ctx, repo, poster)
// Otherwise (poster is site admin), check if poster is the real repo admin.
isRealRepoAdmin, err := access_model.IsUserRealRepoAdmin(ctx, repo, poster)
if err != nil {
return roleDescriptor, err
return roleDesc, err
}
if ok {
roleDescriptor.RoleInRepo = issues_model.RoleRepoOwner
return roleDescriptor, nil
if isRealRepoAdmin {
roleDesc.RoleInRepo = issues_model.RoleRepoOwner
return roleDesc, nil
}
}
// If repo is organization, check Member role
if err := repo.LoadOwner(ctx); err != nil {
return roleDescriptor, err
if err = repo.LoadOwner(ctx); err != nil {
return roleDesc, err
}
if repo.Owner.IsOrganization() {
if isMember, err := organization.IsOrganizationMember(ctx, repo.Owner.ID, poster.ID); err != nil {
return roleDescriptor, err
return roleDesc, err
} else if isMember {
roleDescriptor.RoleInRepo = issues_model.RoleRepoMember
return roleDescriptor, nil
roleDesc.RoleInRepo = issues_model.RoleRepoMember
return roleDesc, nil
}
}
// If the poster is the collaborator of the repo
if isCollaborator, err := repo_model.IsCollaborator(ctx, repo.ID, poster.ID); err != nil {
return roleDescriptor, err
return roleDesc, err
} else if isCollaborator {
roleDescriptor.RoleInRepo = issues_model.RoleRepoCollaborator
return roleDescriptor, nil
roleDesc.RoleInRepo = issues_model.RoleRepoCollaborator
return roleDesc, nil
}
hasMergedPR, err := issues_model.HasMergedPullRequestInRepo(ctx, repo.ID, poster.ID)
if err != nil {
return roleDescriptor, err
return roleDesc, err
} else if hasMergedPR {
roleDescriptor.RoleInRepo = issues_model.RoleRepoContributor
roleDesc.RoleInRepo = issues_model.RoleRepoContributor
} else if issue.IsPull {
// only display first time contributor in the first opening pull request
roleDescriptor.RoleInRepo = issues_model.RoleRepoFirstTimeContributor
roleDesc.RoleInRepo = issues_model.RoleRepoFirstTimeContributor
}
return roleDescriptor, nil
return roleDesc, nil
}
func getBranchData(ctx *context.Context, issue *issues_model.Issue) {

View File

@ -304,31 +304,6 @@ func CreatePost(ctx *context.Context) {
handleCreateError(ctx, ctxUser, err, "CreatePost", tplCreate, &form)
}
const (
tplWatchUnwatch templates.TplName = "repo/watch_unwatch"
tplStarUnstar templates.TplName = "repo/star_unstar"
)
func acceptTransfer(ctx *context.Context) {
err := repo_service.AcceptTransferOwnership(ctx, ctx.Repo.Repository, ctx.Doer)
if err == nil {
ctx.Flash.Success(ctx.Tr("repo.settings.transfer.success"))
ctx.Redirect(ctx.Repo.Repository.Link())
return
}
handleActionError(ctx, err)
}
func rejectTransfer(ctx *context.Context) {
err := repo_service.RejectRepositoryTransfer(ctx, ctx.Repo.Repository, ctx.Doer)
if err == nil {
ctx.Flash.Success(ctx.Tr("repo.settings.transfer.rejected"))
ctx.Redirect(ctx.Repo.Repository.Link())
return
}
handleActionError(ctx, err)
}
func handleActionError(ctx *context.Context, err error) {
if errors.Is(err, user_model.ErrBlockedUser) {
ctx.Flash.Error(ctx.Tr("repo.action.blocked_user"))
@ -339,72 +314,6 @@ func handleActionError(ctx *context.Context, err error) {
}
}
// Action response for actions to a repository
func Action(ctx *context.Context) {
var err error
switch ctx.PathParam("action") {
case "watch":
err = repo_model.WatchRepo(ctx, ctx.Doer, ctx.Repo.Repository, true)
case "unwatch":
err = repo_model.WatchRepo(ctx, ctx.Doer, ctx.Repo.Repository, false)
case "star":
err = repo_model.StarRepo(ctx, ctx.Doer, ctx.Repo.Repository, true)
case "unstar":
err = repo_model.StarRepo(ctx, ctx.Doer, ctx.Repo.Repository, false)
case "accept_transfer":
acceptTransfer(ctx)
return
case "reject_transfer":
rejectTransfer(ctx)
return
case "desc": // FIXME: this is not used
if !ctx.Repo.IsOwner() {
ctx.Error(http.StatusNotFound)
return
}
ctx.Repo.Repository.Description = ctx.FormString("desc")
ctx.Repo.Repository.Website = ctx.FormString("site")
err = repo_service.UpdateRepository(ctx, ctx.Repo.Repository, false)
}
if err != nil {
handleActionError(ctx, err)
return
}
switch ctx.PathParam("action") {
case "watch", "unwatch":
ctx.Data["IsWatchingRepo"] = repo_model.IsWatching(ctx, ctx.Doer.ID, ctx.Repo.Repository.ID)
case "star", "unstar":
ctx.Data["IsStaringRepo"] = repo_model.IsStaring(ctx, ctx.Doer.ID, ctx.Repo.Repository.ID)
}
// see the `hx-trigger="refreshUserCards ..."` comments in tmpl
ctx.RespHeader().Add("hx-trigger", "refreshUserCards")
switch ctx.PathParam("action") {
case "watch", "unwatch", "star", "unstar":
// we have to reload the repository because NumStars or NumWatching (used in the templates) has just changed
ctx.Data["Repository"], err = repo_model.GetRepositoryByName(ctx, ctx.Repo.Repository.OwnerID, ctx.Repo.Repository.Name)
if err != nil {
ctx.ServerError(fmt.Sprintf("Action (%s)", ctx.PathParam("action")), err)
return
}
}
switch ctx.PathParam("action") {
case "watch", "unwatch":
ctx.HTML(http.StatusOK, tplWatchUnwatch)
return
case "star", "unstar":
ctx.HTML(http.StatusOK, tplStarUnstar)
return
}
ctx.RedirectToCurrentSite(ctx.FormString("redirect_to"), ctx.Repo.RepoLink)
}
// RedirectDownload return a file based on the following infos:
func RedirectDownload(ctx *context.Context) {
var (

View File

@ -1,140 +0,0 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package setting
import (
"errors"
"net/http"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/templates"
shared "code.gitea.io/gitea/routers/web/shared/actions"
shared_user "code.gitea.io/gitea/routers/web/shared/user"
"code.gitea.io/gitea/services/context"
)
const (
tplRepoVariables templates.TplName = "repo/settings/actions"
tplOrgVariables templates.TplName = "org/settings/actions"
tplUserVariables templates.TplName = "user/settings/actions"
tplAdminVariables templates.TplName = "admin/actions"
)
type variablesCtx struct {
OwnerID int64
RepoID int64
IsRepo bool
IsOrg bool
IsUser bool
IsGlobal bool
VariablesTemplate templates.TplName
RedirectLink string
}
func getVariablesCtx(ctx *context.Context) (*variablesCtx, error) {
if ctx.Data["PageIsRepoSettings"] == true {
return &variablesCtx{
OwnerID: 0,
RepoID: ctx.Repo.Repository.ID,
IsRepo: true,
VariablesTemplate: tplRepoVariables,
RedirectLink: ctx.Repo.RepoLink + "/settings/actions/variables",
}, nil
}
if ctx.Data["PageIsOrgSettings"] == true {
err := shared_user.LoadHeaderCount(ctx)
if err != nil {
ctx.ServerError("LoadHeaderCount", err)
return nil, nil
}
return &variablesCtx{
OwnerID: ctx.ContextUser.ID,
RepoID: 0,
IsOrg: true,
VariablesTemplate: tplOrgVariables,
RedirectLink: ctx.Org.OrgLink + "/settings/actions/variables",
}, nil
}
if ctx.Data["PageIsUserSettings"] == true {
return &variablesCtx{
OwnerID: ctx.Doer.ID,
RepoID: 0,
IsUser: true,
VariablesTemplate: tplUserVariables,
RedirectLink: setting.AppSubURL + "/user/settings/actions/variables",
}, nil
}
if ctx.Data["PageIsAdmin"] == true {
return &variablesCtx{
OwnerID: 0,
RepoID: 0,
IsGlobal: true,
VariablesTemplate: tplAdminVariables,
RedirectLink: setting.AppSubURL + "/-/admin/actions/variables",
}, nil
}
return nil, errors.New("unable to set Variables context")
}
func Variables(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("actions.variables")
ctx.Data["PageType"] = "variables"
ctx.Data["PageIsSharedSettingsVariables"] = true
vCtx, err := getVariablesCtx(ctx)
if err != nil {
ctx.ServerError("getVariablesCtx", err)
return
}
shared.SetVariablesContext(ctx, vCtx.OwnerID, vCtx.RepoID)
if ctx.Written() {
return
}
ctx.HTML(http.StatusOK, vCtx.VariablesTemplate)
}
func VariableCreate(ctx *context.Context) {
vCtx, err := getVariablesCtx(ctx)
if err != nil {
ctx.ServerError("getVariablesCtx", err)
return
}
if ctx.HasError() { // form binding validation error
ctx.JSONError(ctx.GetErrMsg())
return
}
shared.CreateVariable(ctx, vCtx.OwnerID, vCtx.RepoID, vCtx.RedirectLink)
}
func VariableUpdate(ctx *context.Context) {
vCtx, err := getVariablesCtx(ctx)
if err != nil {
ctx.ServerError("getVariablesCtx", err)
return
}
if ctx.HasError() { // form binding validation error
ctx.JSONError(ctx.GetErrMsg())
return
}
shared.UpdateVariable(ctx, vCtx.RedirectLink)
}
func VariableDelete(ctx *context.Context) {
vCtx, err := getVariablesCtx(ctx)
if err != nil {
ctx.ServerError("getVariablesCtx", err)
return
}
shared.DeleteVariable(ctx, vCtx.RedirectLink)
}

Some files were not shown because too many files have changed in this diff Show More