2017-04-21 13:32:31 +02:00
|
|
|
// Copyright 2017 Gitea. All rights reserved.
|
2022-11-27 19:20:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2017-04-21 13:32:31 +02:00
|
|
|
|
2022-06-12 17:51:54 +02:00
|
|
|
package git
|
2017-04-21 13:32:31 +02:00
|
|
|
|
|
|
|
import (
|
2021-12-10 02:27:50 +01:00
|
|
|
"context"
|
2019-06-30 09:57:59 +02:00
|
|
|
"crypto/sha1"
|
2022-11-30 17:41:49 +01:00
|
|
|
"errors"
|
2017-04-21 13:32:31 +02:00
|
|
|
"fmt"
|
2021-11-16 19:18:25 +01:00
|
|
|
"net/url"
|
2022-12-02 04:15:36 +01:00
|
|
|
"strconv"
|
2017-04-21 13:32:31 +02:00
|
|
|
"strings"
|
2019-09-18 07:39:45 +02:00
|
|
|
"time"
|
2017-04-21 13:32:31 +02:00
|
|
|
|
2021-12-10 09:14:24 +01:00
|
|
|
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
2021-09-19 13:49:59 +02:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2021-12-10 02:27:50 +01:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 10:49:20 +01:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2022-06-12 17:51:54 +02:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
2017-04-21 13:32:31 +02:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2019-05-11 12:21:34 +02:00
|
|
|
api "code.gitea.io/gitea/modules/structs"
|
2019-08-15 16:46:21 +02:00
|
|
|
"code.gitea.io/gitea/modules/timeutil"
|
2023-08-21 09:26:10 +02:00
|
|
|
"code.gitea.io/gitea/modules/translation"
|
2019-07-25 12:55:06 +02:00
|
|
|
|
2023-05-13 23:59:01 +02:00
|
|
|
"xorm.io/builder"
|
2024-03-28 09:01:15 +01:00
|
|
|
"xorm.io/xorm"
|
2017-04-21 13:32:31 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// CommitStatus holds a single Status of a single Commit
|
|
|
|
type CommitStatus struct {
|
2021-12-10 02:27:50 +01:00
|
|
|
ID int64 `xorm:"pk autoincr"`
|
|
|
|
Index int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
|
|
|
|
RepoID int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
|
|
|
|
Repo *repo_model.Repository `xorm:"-"`
|
|
|
|
State api.CommitStatusState `xorm:"VARCHAR(7) NOT NULL"`
|
|
|
|
SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_sha_index)"`
|
|
|
|
TargetURL string `xorm:"TEXT"`
|
|
|
|
Description string `xorm:"TEXT"`
|
2024-01-19 17:05:02 +01:00
|
|
|
ContextHash string `xorm:"VARCHAR(64) index"`
|
2021-12-10 02:27:50 +01:00
|
|
|
Context string `xorm:"TEXT"`
|
|
|
|
Creator *user_model.User `xorm:"-"`
|
2017-04-21 13:32:31 +02:00
|
|
|
CreatorID int64
|
|
|
|
|
2019-08-15 16:46:21 +02:00
|
|
|
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
|
|
|
|
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
|
2021-09-19 13:49:59 +02:00
|
|
|
func init() {
|
|
|
|
db.RegisterModel(new(CommitStatus))
|
2021-09-23 12:50:06 +02:00
|
|
|
db.RegisterModel(new(CommitStatusIndex))
|
|
|
|
}
|
|
|
|
|
2022-12-02 04:15:36 +01:00
|
|
|
func postgresGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
|
|
|
|
res, err := db.GetEngine(ctx).Query("INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
|
|
|
|
"VALUES (?,?,1) ON CONFLICT (repo_id, sha) DO UPDATE SET max_index = `commit_status_index`.max_index+1 RETURNING max_index",
|
|
|
|
repoID, sha)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if len(res) == 0 {
|
|
|
|
return 0, db.ErrGetResourceIndexFailed
|
|
|
|
}
|
|
|
|
return strconv.ParseInt(string(res[0]["max_index"]), 10, 64)
|
|
|
|
}
|
|
|
|
|
2023-06-05 12:33:47 +02:00
|
|
|
func mysqlGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
|
|
|
|
if _, err := db.GetEngine(ctx).Exec("INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
|
|
|
|
"VALUES (?,?,1) ON DUPLICATE KEY UPDATE max_index = max_index+1",
|
|
|
|
repoID, sha); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var idx int64
|
|
|
|
_, err := db.GetEngine(ctx).SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id = ? AND sha = ?",
|
|
|
|
repoID, sha).Get(&idx)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if idx == 0 {
|
|
|
|
return 0, errors.New("cannot get the correct index")
|
|
|
|
}
|
|
|
|
return idx, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 12:50:06 +02:00
|
|
|
// GetNextCommitStatusIndex retried 3 times to generate a resource index
|
2022-11-30 17:41:49 +01:00
|
|
|
func GetNextCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
|
2023-12-19 08:20:47 +01:00
|
|
|
_, err := git.NewIDFromString(sha)
|
2023-12-13 22:02:00 +01:00
|
|
|
if err != nil {
|
2023-06-15 02:14:43 +02:00
|
|
|
return 0, git.ErrInvalidSHA{SHA: sha}
|
|
|
|
}
|
|
|
|
|
2023-06-05 12:33:47 +02:00
|
|
|
switch {
|
|
|
|
case setting.Database.Type.IsPostgreSQL():
|
2022-12-02 04:15:36 +01:00
|
|
|
return postgresGetCommitStatusIndex(ctx, repoID, sha)
|
2023-06-05 12:33:47 +02:00
|
|
|
case setting.Database.Type.IsMySQL():
|
|
|
|
return mysqlGetCommitStatusIndex(ctx, repoID, sha)
|
2022-12-02 04:15:36 +01:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:41:49 +01:00
|
|
|
e := db.GetEngine(ctx)
|
2021-09-23 12:50:06 +02:00
|
|
|
|
2022-11-30 17:41:49 +01:00
|
|
|
// try to update the max_index to next value, and acquire the write-lock for the record
|
|
|
|
res, err := e.Exec("UPDATE `commit_status_index` SET max_index=max_index+1 WHERE repo_id=? AND sha=?", repoID, sha)
|
2021-09-23 12:50:06 +02:00
|
|
|
if err != nil {
|
2023-06-05 12:33:47 +02:00
|
|
|
return 0, fmt.Errorf("update failed: %w", err)
|
2021-09-23 12:50:06 +02:00
|
|
|
}
|
2022-11-30 17:41:49 +01:00
|
|
|
affected, err := res.RowsAffected()
|
2021-09-23 12:50:06 +02:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2022-11-30 17:41:49 +01:00
|
|
|
if affected == 0 {
|
|
|
|
// this slow path is only for the first time of creating a resource index
|
|
|
|
_, errIns := e.Exec("INSERT INTO `commit_status_index` (repo_id, sha, max_index) VALUES (?, ?, 0)", repoID, sha)
|
|
|
|
res, err = e.Exec("UPDATE `commit_status_index` SET max_index=max_index+1 WHERE repo_id=? AND sha=?", repoID, sha)
|
|
|
|
if err != nil {
|
2023-06-05 12:33:47 +02:00
|
|
|
return 0, fmt.Errorf("update2 failed: %w", err)
|
2022-11-30 17:41:49 +01:00
|
|
|
}
|
|
|
|
affected, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
2023-06-05 12:33:47 +02:00
|
|
|
return 0, fmt.Errorf("RowsAffected failed: %w", err)
|
2022-11-30 17:41:49 +01:00
|
|
|
}
|
|
|
|
// if the update still can not update any records, the record must not exist and there must be some errors (insert error)
|
|
|
|
if affected == 0 {
|
|
|
|
if errIns == nil {
|
|
|
|
return 0, errors.New("impossible error when GetNextCommitStatusIndex, insert and update both succeeded but no record is updated")
|
|
|
|
}
|
2023-06-05 12:33:47 +02:00
|
|
|
return 0, fmt.Errorf("insert failed: %w", errIns)
|
2022-11-30 17:41:49 +01:00
|
|
|
}
|
2021-09-23 12:50:06 +02:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:41:49 +01:00
|
|
|
// now, the new index is in database (protected by the transaction and write-lock)
|
|
|
|
var newIdx int64
|
|
|
|
has, err := e.SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id=? AND sha=?", repoID, sha).Get(&newIdx)
|
2021-09-23 12:50:06 +02:00
|
|
|
if err != nil {
|
2023-06-05 12:33:47 +02:00
|
|
|
return 0, fmt.Errorf("select failed: %w", err)
|
2021-09-23 12:50:06 +02:00
|
|
|
}
|
|
|
|
if !has {
|
2022-11-30 17:41:49 +01:00
|
|
|
return 0, errors.New("impossible error when GetNextCommitStatusIndex, upsert succeeded but no record can be selected")
|
2021-09-23 12:50:06 +02:00
|
|
|
}
|
2022-11-30 17:41:49 +01:00
|
|
|
return newIdx, nil
|
2021-09-19 13:49:59 +02:00
|
|
|
}
|
|
|
|
|
2024-07-28 17:11:40 +02:00
|
|
|
func (status *CommitStatus) loadRepository(ctx context.Context) (err error) {
|
2017-04-21 13:32:31 +02:00
|
|
|
if status.Repo == nil {
|
2022-12-03 03:48:26 +01:00
|
|
|
status.Repo, err = repo_model.GetRepositoryByID(ctx, status.RepoID)
|
2017-04-21 13:32:31 +02:00
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("getRepositoryByID [%d]: %w", status.RepoID, err)
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
}
|
2024-07-28 17:11:40 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (status *CommitStatus) loadCreator(ctx context.Context) (err error) {
|
2017-04-21 13:32:31 +02:00
|
|
|
if status.Creator == nil && status.CreatorID > 0 {
|
2022-12-03 03:48:26 +01:00
|
|
|
status.Creator, err = user_model.GetUserByID(ctx, status.CreatorID)
|
2017-04-21 13:32:31 +02:00
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("getUserByID [%d]: %w", status.CreatorID, err)
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-07-28 17:11:40 +02:00
|
|
|
func (status *CommitStatus) loadAttributes(ctx context.Context) (err error) {
|
|
|
|
if err := status.loadRepository(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return status.loadCreator(ctx)
|
|
|
|
}
|
|
|
|
|
2017-04-21 13:32:31 +02:00
|
|
|
// APIURL returns the absolute APIURL to this commit-status.
|
2023-01-09 04:50:54 +01:00
|
|
|
func (status *CommitStatus) APIURL(ctx context.Context) string {
|
|
|
|
_ = status.loadAttributes(ctx)
|
2021-11-16 19:18:25 +01:00
|
|
|
return status.Repo.APIURL() + "/statuses/" + url.PathEscape(status.SHA)
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
|
2023-08-21 09:26:10 +02:00
|
|
|
// LocaleString returns the locale string name of the Status
|
|
|
|
func (status *CommitStatus) LocaleString(lang translation.Locale) string {
|
2024-02-14 22:48:45 +01:00
|
|
|
return lang.TrString("repo.commitstatus." + status.State.String())
|
2023-08-21 09:26:10 +02:00
|
|
|
}
|
|
|
|
|
2024-07-28 17:11:40 +02:00
|
|
|
// HideActionsURL set `TargetURL` to an empty string if the status comes from Gitea Actions
|
|
|
|
func (status *CommitStatus) HideActionsURL(ctx context.Context) {
|
2024-07-29 08:51:02 +02:00
|
|
|
if status.RepoID == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-07-28 17:11:40 +02:00
|
|
|
if status.Repo == nil {
|
|
|
|
if err := status.loadRepository(ctx); err != nil {
|
|
|
|
log.Error("loadRepository: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
prefix := fmt.Sprintf("%s/actions", status.Repo.Link())
|
|
|
|
if strings.HasPrefix(status.TargetURL, prefix) {
|
|
|
|
status.TargetURL = ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-14 08:51:32 +02:00
|
|
|
// CalcCommitStatus returns commit status state via some status, the commit statues should order by id desc
|
|
|
|
func CalcCommitStatus(statuses []*CommitStatus) *CommitStatus {
|
2024-03-19 14:03:28 +01:00
|
|
|
if len(statuses) == 0 {
|
|
|
|
return nil
|
2017-09-14 08:51:32 +02:00
|
|
|
}
|
2024-03-19 14:03:28 +01:00
|
|
|
|
|
|
|
latestWorstStatus := statuses[0]
|
|
|
|
for _, status := range statuses[1:] {
|
|
|
|
if status.State.NoBetterThan(latestWorstStatus.State) {
|
|
|
|
latestWorstStatus = status
|
2017-09-14 08:51:32 +02:00
|
|
|
}
|
|
|
|
}
|
2024-03-19 14:03:28 +01:00
|
|
|
return latestWorstStatus
|
2017-09-14 08:51:32 +02:00
|
|
|
}
|
|
|
|
|
2019-07-25 12:55:06 +02:00
|
|
|
// CommitStatusOptions holds the options for query commit statuses
|
|
|
|
type CommitStatusOptions struct {
|
2021-09-24 13:32:56 +02:00
|
|
|
db.ListOptions
|
2024-01-15 15:07:32 +01:00
|
|
|
RepoID int64
|
|
|
|
SHA string
|
2019-07-25 12:55:06 +02:00
|
|
|
State string
|
|
|
|
SortType string
|
|
|
|
}
|
|
|
|
|
2024-01-15 15:07:32 +01:00
|
|
|
func (opts *CommitStatusOptions) ToConds() builder.Cond {
|
|
|
|
var cond builder.Cond = builder.Eq{
|
|
|
|
"repo_id": opts.RepoID,
|
|
|
|
"sha": opts.SHA,
|
2019-07-25 12:55:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
switch opts.State {
|
|
|
|
case "pending", "success", "error", "failure", "warning":
|
2024-01-15 15:07:32 +01:00
|
|
|
cond = cond.And(builder.Eq{
|
|
|
|
"state": opts.State,
|
|
|
|
})
|
2019-07-25 12:55:06 +02:00
|
|
|
}
|
2024-01-15 15:07:32 +01:00
|
|
|
|
|
|
|
return cond
|
2019-07-25 12:55:06 +02:00
|
|
|
}
|
|
|
|
|
2024-01-15 15:07:32 +01:00
|
|
|
func (opts *CommitStatusOptions) ToOrders() string {
|
|
|
|
switch opts.SortType {
|
2019-07-25 12:55:06 +02:00
|
|
|
case "oldest":
|
2024-01-15 15:07:32 +01:00
|
|
|
return "created_unix ASC"
|
2019-07-25 12:55:06 +02:00
|
|
|
case "recentupdate":
|
2024-01-15 15:07:32 +01:00
|
|
|
return "updated_unix DESC"
|
2019-07-25 12:55:06 +02:00
|
|
|
case "leastupdate":
|
2024-01-15 15:07:32 +01:00
|
|
|
return "updated_unix ASC"
|
2019-07-25 12:55:06 +02:00
|
|
|
case "leastindex":
|
2024-01-15 15:07:32 +01:00
|
|
|
return "`index` DESC"
|
2019-07-25 12:55:06 +02:00
|
|
|
case "highestindex":
|
2024-01-15 15:07:32 +01:00
|
|
|
return "`index` ASC"
|
2019-07-25 12:55:06 +02:00
|
|
|
default:
|
2024-01-15 15:07:32 +01:00
|
|
|
return "created_unix DESC"
|
2019-07-25 12:55:06 +02:00
|
|
|
}
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
|
2021-09-23 12:50:06 +02:00
|
|
|
// CommitStatusIndex represents a table for commit status index
|
|
|
|
type CommitStatusIndex struct {
|
|
|
|
ID int64
|
|
|
|
RepoID int64 `xorm:"unique(repo_sha)"`
|
|
|
|
SHA string `xorm:"unique(repo_sha)"`
|
|
|
|
MaxIndex int64 `xorm:"index"`
|
|
|
|
}
|
|
|
|
|
2017-04-21 13:32:31 +02:00
|
|
|
// GetLatestCommitStatus returns all statuses with a unique context for a given commit.
|
2022-05-20 16:08:52 +02:00
|
|
|
func GetLatestCommitStatus(ctx context.Context, repoID int64, sha string, listOptions db.ListOptions) ([]*CommitStatus, int64, error) {
|
2024-03-28 09:01:15 +01:00
|
|
|
getBase := func() *xorm.Session {
|
|
|
|
return db.GetEngine(ctx).Table(&CommitStatus{}).
|
|
|
|
Where("repo_id = ?", repoID).And("sha = ?", sha)
|
|
|
|
}
|
|
|
|
indices := make([]int64, 0, 10)
|
|
|
|
sess := getBase().Select("max( `index` ) as `index`").
|
|
|
|
GroupBy("context_hash").OrderBy("max( `index` ) desc")
|
2023-07-31 04:21:09 +02:00
|
|
|
if !listOptions.IsListAll() {
|
|
|
|
sess = db.SetSessionPagination(sess, &listOptions)
|
|
|
|
}
|
2024-03-28 09:01:15 +01:00
|
|
|
count, err := sess.FindAndCount(&indices)
|
2017-05-07 16:40:31 +02:00
|
|
|
if err != nil {
|
2021-12-15 06:39:34 +01:00
|
|
|
return nil, count, err
|
2017-05-07 16:40:31 +02:00
|
|
|
}
|
2024-03-28 09:01:15 +01:00
|
|
|
statuses := make([]*CommitStatus, 0, len(indices))
|
|
|
|
if len(indices) == 0 {
|
2021-12-15 06:39:34 +01:00
|
|
|
return statuses, count, nil
|
2017-05-07 16:40:31 +02:00
|
|
|
}
|
2024-03-28 09:01:15 +01:00
|
|
|
return statuses, count, getBase().And(builder.In("`index`", indices)).Find(&statuses)
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
|
2023-05-13 23:59:01 +02:00
|
|
|
// GetLatestCommitStatusForPairs returns all statuses with a unique context for a given list of repo-sha pairs
|
2024-04-12 03:41:50 +02:00
|
|
|
func GetLatestCommitStatusForPairs(ctx context.Context, repoSHAs []RepoSHA) (map[int64][]*CommitStatus, error) {
|
2023-05-13 23:59:01 +02:00
|
|
|
type result struct {
|
2024-03-28 09:01:15 +01:00
|
|
|
Index int64
|
2023-05-13 23:59:01 +02:00
|
|
|
RepoID int64
|
2024-04-12 03:41:50 +02:00
|
|
|
SHA string
|
2023-05-13 23:59:01 +02:00
|
|
|
}
|
|
|
|
|
2024-04-12 03:41:50 +02:00
|
|
|
results := make([]result, 0, len(repoSHAs))
|
2023-05-13 23:59:01 +02:00
|
|
|
|
2024-03-28 09:01:15 +01:00
|
|
|
getBase := func() *xorm.Session {
|
|
|
|
return db.GetEngine(ctx).Table(&CommitStatus{})
|
|
|
|
}
|
2023-05-13 23:59:01 +02:00
|
|
|
|
|
|
|
// Create a disjunction of conditions for each repoID and SHA pair
|
2024-04-12 03:41:50 +02:00
|
|
|
conds := make([]builder.Cond, 0, len(repoSHAs))
|
|
|
|
for _, repoSHA := range repoSHAs {
|
|
|
|
conds = append(conds, builder.Eq{"repo_id": repoSHA.RepoID, "sha": repoSHA.SHA})
|
2023-05-13 23:59:01 +02:00
|
|
|
}
|
2024-03-28 09:01:15 +01:00
|
|
|
sess := getBase().Where(builder.Or(conds...)).
|
2024-04-12 03:41:50 +02:00
|
|
|
Select("max( `index` ) as `index`, repo_id, sha").
|
|
|
|
GroupBy("context_hash, repo_id, sha").OrderBy("max( `index` ) desc")
|
2023-05-13 23:59:01 +02:00
|
|
|
|
|
|
|
err := sess.Find(&results)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
repoStatuses := make(map[int64][]*CommitStatus)
|
|
|
|
|
2024-03-28 09:01:15 +01:00
|
|
|
if len(results) > 0 {
|
|
|
|
statuses := make([]*CommitStatus, 0, len(results))
|
|
|
|
|
|
|
|
conds = make([]builder.Cond, 0, len(results))
|
|
|
|
for _, result := range results {
|
|
|
|
cond := builder.Eq{
|
|
|
|
"`index`": result.Index,
|
|
|
|
"repo_id": result.RepoID,
|
2024-04-12 03:41:50 +02:00
|
|
|
"sha": result.SHA,
|
2024-03-28 09:01:15 +01:00
|
|
|
}
|
|
|
|
conds = append(conds, cond)
|
|
|
|
}
|
|
|
|
err = getBase().Where(builder.Or(conds...)).Find(&statuses)
|
2023-05-13 23:59:01 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Group the statuses by repo ID
|
|
|
|
for _, status := range statuses {
|
|
|
|
repoStatuses[status.RepoID] = append(repoStatuses[status.RepoID], status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return repoStatuses, nil
|
|
|
|
}
|
|
|
|
|
2023-07-03 05:32:21 +02:00
|
|
|
// GetLatestCommitStatusForRepoCommitIDs returns all statuses with a unique context for a given list of repo-sha pairs
|
|
|
|
func GetLatestCommitStatusForRepoCommitIDs(ctx context.Context, repoID int64, commitIDs []string) (map[string][]*CommitStatus, error) {
|
|
|
|
type result struct {
|
2024-03-28 09:01:15 +01:00
|
|
|
Index int64
|
|
|
|
SHA string
|
2023-07-03 05:32:21 +02:00
|
|
|
}
|
|
|
|
|
2024-03-28 09:01:15 +01:00
|
|
|
getBase := func() *xorm.Session {
|
|
|
|
return db.GetEngine(ctx).Table(&CommitStatus{}).Where("repo_id = ?", repoID)
|
|
|
|
}
|
2023-07-03 05:32:21 +02:00
|
|
|
results := make([]result, 0, len(commitIDs))
|
|
|
|
|
|
|
|
conds := make([]builder.Cond, 0, len(commitIDs))
|
|
|
|
for _, sha := range commitIDs {
|
|
|
|
conds = append(conds, builder.Eq{"sha": sha})
|
|
|
|
}
|
2024-03-28 09:01:15 +01:00
|
|
|
sess := getBase().And(builder.Or(conds...)).
|
|
|
|
Select("max( `index` ) as `index`, sha").
|
|
|
|
GroupBy("context_hash, sha").OrderBy("max( `index` ) desc")
|
2023-07-03 05:32:21 +02:00
|
|
|
|
|
|
|
err := sess.Find(&results)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
repoStatuses := make(map[string][]*CommitStatus)
|
|
|
|
|
2024-03-28 09:01:15 +01:00
|
|
|
if len(results) > 0 {
|
|
|
|
statuses := make([]*CommitStatus, 0, len(results))
|
|
|
|
|
|
|
|
conds = make([]builder.Cond, 0, len(results))
|
|
|
|
for _, result := range results {
|
|
|
|
conds = append(conds, builder.Eq{"`index`": result.Index, "sha": result.SHA})
|
|
|
|
}
|
|
|
|
err = getBase().And(builder.Or(conds...)).Find(&statuses)
|
2023-07-03 05:32:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-03-28 09:01:15 +01:00
|
|
|
// Group the statuses by commit
|
2023-07-03 05:32:21 +02:00
|
|
|
for _, status := range statuses {
|
|
|
|
repoStatuses[status.SHA] = append(repoStatuses[status.SHA], status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return repoStatuses, nil
|
|
|
|
}
|
|
|
|
|
2019-09-18 07:39:45 +02:00
|
|
|
// FindRepoRecentCommitStatusContexts returns repository's recent commit status contexts
|
2023-01-09 04:50:54 +01:00
|
|
|
func FindRepoRecentCommitStatusContexts(ctx context.Context, repoID int64, before time.Duration) ([]string, error) {
|
2019-09-18 07:39:45 +02:00
|
|
|
start := timeutil.TimeStampNow().AddDuration(-before)
|
2024-03-28 09:01:15 +01:00
|
|
|
|
2024-04-30 05:53:16 +02:00
|
|
|
var contexts []string
|
|
|
|
if err := db.GetEngine(ctx).Table("commit_status").
|
|
|
|
Where("repo_id = ?", repoID).And("updated_unix >= ?", start).
|
|
|
|
Cols("context").Distinct().Find(&contexts); err != nil {
|
2019-09-18 07:39:45 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-04-30 05:53:16 +02:00
|
|
|
return contexts, nil
|
2019-09-18 07:39:45 +02:00
|
|
|
}
|
|
|
|
|
2017-04-21 13:32:31 +02:00
|
|
|
// NewCommitStatusOptions holds options for creating a CommitStatus
|
|
|
|
type NewCommitStatusOptions struct {
|
2021-12-10 02:27:50 +01:00
|
|
|
Repo *repo_model.Repository
|
2021-11-24 10:49:20 +01:00
|
|
|
Creator *user_model.User
|
2023-12-13 22:02:00 +01:00
|
|
|
SHA git.ObjectID
|
2017-04-21 13:32:31 +02:00
|
|
|
CommitStatus *CommitStatus
|
|
|
|
}
|
|
|
|
|
2019-06-30 09:57:59 +02:00
|
|
|
// NewCommitStatus save commit statuses into database
|
2023-01-09 04:50:54 +01:00
|
|
|
func NewCommitStatus(ctx context.Context, opts NewCommitStatusOptions) error {
|
2017-04-21 13:32:31 +02:00
|
|
|
if opts.Repo == nil {
|
2019-06-30 09:57:59 +02:00
|
|
|
return fmt.Errorf("NewCommitStatus[nil, %s]: no repository specified", opts.SHA)
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
|
2019-06-30 09:57:59 +02:00
|
|
|
repoPath := opts.Repo.RepoPath()
|
2017-04-21 13:32:31 +02:00
|
|
|
if opts.Creator == nil {
|
2019-06-30 09:57:59 +02:00
|
|
|
return fmt.Errorf("NewCommitStatus[%s, %s]: no user specified", repoPath, opts.SHA)
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
|
2023-01-09 04:50:54 +01:00
|
|
|
ctx, committer, err := db.TxContext(ctx)
|
2021-09-19 13:49:59 +02:00
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("NewCommitStatus[repo_id: %d, user_id: %d, sha: %s]: %w", opts.Repo.ID, opts.Creator.ID, opts.SHA, err)
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
2021-09-19 13:49:59 +02:00
|
|
|
defer committer.Close()
|
2017-04-21 13:32:31 +02:00
|
|
|
|
2022-11-30 17:41:49 +01:00
|
|
|
// Get the next Status Index
|
2023-12-13 22:02:00 +01:00
|
|
|
idx, err := GetNextCommitStatusIndex(ctx, opts.Repo.ID, opts.SHA.String())
|
2022-11-30 17:41:49 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("generate commit status index failed: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-06-30 09:57:59 +02:00
|
|
|
opts.CommitStatus.Description = strings.TrimSpace(opts.CommitStatus.Description)
|
|
|
|
opts.CommitStatus.Context = strings.TrimSpace(opts.CommitStatus.Context)
|
|
|
|
opts.CommitStatus.TargetURL = strings.TrimSpace(opts.CommitStatus.TargetURL)
|
2023-12-13 22:02:00 +01:00
|
|
|
opts.CommitStatus.SHA = opts.SHA.String()
|
2019-06-30 09:57:59 +02:00
|
|
|
opts.CommitStatus.CreatorID = opts.Creator.ID
|
|
|
|
opts.CommitStatus.RepoID = opts.Repo.ID
|
2021-09-23 12:50:06 +02:00
|
|
|
opts.CommitStatus.Index = idx
|
2019-06-30 09:57:59 +02:00
|
|
|
log.Debug("NewCommitStatus[%s, %s]: %d", repoPath, opts.SHA, opts.CommitStatus.Index)
|
|
|
|
|
|
|
|
opts.CommitStatus.ContextHash = hashCommitStatusContext(opts.CommitStatus.Context)
|
2017-04-21 13:32:31 +02:00
|
|
|
|
|
|
|
// Insert new CommitStatus
|
2021-09-23 17:45:36 +02:00
|
|
|
if _, err = db.GetEngine(ctx).Insert(opts.CommitStatus); err != nil {
|
2022-11-30 17:41:49 +01:00
|
|
|
return fmt.Errorf("insert CommitStatus[%s, %s]: %w", repoPath, opts.SHA, err)
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
|
|
|
|
2021-09-19 13:49:59 +02:00
|
|
|
return committer.Commit()
|
2017-04-21 13:32:31 +02:00
|
|
|
}
|
2017-05-07 16:40:31 +02:00
|
|
|
|
|
|
|
// SignCommitWithStatuses represents a commit with validation of signature and status state.
|
|
|
|
type SignCommitWithStatuses struct {
|
2020-12-20 04:13:12 +01:00
|
|
|
Status *CommitStatus
|
|
|
|
Statuses []*CommitStatus
|
2021-12-10 09:14:24 +01:00
|
|
|
*asymkey_model.SignCommit
|
2017-05-07 16:40:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ParseCommitsWithStatus checks commits latest statuses and calculates its worst status state
|
2023-01-09 04:50:54 +01:00
|
|
|
func ParseCommitsWithStatus(ctx context.Context, oldCommits []*asymkey_model.SignCommit, repo *repo_model.Repository) []*SignCommitWithStatuses {
|
2021-08-09 20:08:51 +02:00
|
|
|
newCommits := make([]*SignCommitWithStatuses, 0, len(oldCommits))
|
|
|
|
|
|
|
|
for _, c := range oldCommits {
|
|
|
|
commit := &SignCommitWithStatuses{
|
|
|
|
SignCommit: c,
|
2017-05-07 16:40:31 +02:00
|
|
|
}
|
2023-01-09 04:50:54 +01:00
|
|
|
statuses, _, err := GetLatestCommitStatus(ctx, repo.ID, commit.ID.String(), db.ListOptions{})
|
2017-05-07 16:40:31 +02:00
|
|
|
if err != nil {
|
2019-04-02 09:48:31 +02:00
|
|
|
log.Error("GetLatestCommitStatus: %v", err)
|
2017-05-07 16:40:31 +02:00
|
|
|
} else {
|
2020-12-20 04:13:12 +01:00
|
|
|
commit.Statuses = statuses
|
2017-09-14 08:51:32 +02:00
|
|
|
commit.Status = CalcCommitStatus(statuses)
|
2017-05-07 16:40:31 +02:00
|
|
|
}
|
|
|
|
|
2021-08-09 20:08:51 +02:00
|
|
|
newCommits = append(newCommits, commit)
|
2017-05-07 16:40:31 +02:00
|
|
|
}
|
|
|
|
return newCommits
|
|
|
|
}
|
2019-06-30 09:57:59 +02:00
|
|
|
|
|
|
|
// hashCommitStatusContext hash context
|
|
|
|
func hashCommitStatusContext(context string) string {
|
|
|
|
return fmt.Sprintf("%x", sha1.Sum([]byte(context)))
|
|
|
|
}
|
2022-06-12 17:51:54 +02:00
|
|
|
|
|
|
|
// ConvertFromGitCommit converts git commits into SignCommitWithStatuses
|
2023-01-09 04:50:54 +01:00
|
|
|
func ConvertFromGitCommit(ctx context.Context, commits []*git.Commit, repo *repo_model.Repository) []*SignCommitWithStatuses {
|
|
|
|
return ParseCommitsWithStatus(ctx,
|
2022-06-12 17:51:54 +02:00
|
|
|
asymkey_model.ParseCommitsWithSignature(
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
|
|
|
ctx,
|
|
|
|
user_model.ValidateCommitsWithEmails(ctx, commits),
|
2022-06-12 17:51:54 +02:00
|
|
|
repo.GetTrustModel(),
|
|
|
|
func(user *user_model.User) (bool, error) {
|
2023-09-29 14:12:54 +02:00
|
|
|
return repo_model.IsOwnerMemberCollaborator(ctx, repo, user.ID)
|
2022-06-12 17:51:54 +02:00
|
|
|
},
|
|
|
|
),
|
|
|
|
repo,
|
|
|
|
)
|
|
|
|
}
|
2024-07-28 17:11:40 +02:00
|
|
|
|
|
|
|
// CommitStatusesHideActionsURL hide Gitea Actions urls
|
|
|
|
func CommitStatusesHideActionsURL(ctx context.Context, statuses []*CommitStatus) {
|
|
|
|
idToRepos := make(map[int64]*repo_model.Repository)
|
|
|
|
for _, status := range statuses {
|
2024-07-30 04:56:25 +02:00
|
|
|
if status == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-07-28 17:11:40 +02:00
|
|
|
if status.Repo == nil {
|
|
|
|
status.Repo = idToRepos[status.RepoID]
|
|
|
|
}
|
|
|
|
status.HideActionsURL(ctx)
|
|
|
|
idToRepos[status.RepoID] = status.Repo
|
|
|
|
}
|
|
|
|
}
|