mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-12-01 13:44:06 +01:00
Merge pull request '[gitea] week 2024-33 cherry pick (gitea/main -> forgejo)' (#4924) from earl-warren/wcp/2024-33 into forgejo
Some checks failed
/ release (push) Waiting to run
testing / test-remote-cacher (map[image:registry.redict.io/redict:7.3.0-scratch port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:redis:7.2 port:6379]) (push) Blocked by required conditions
testing / backend-checks (push) Waiting to run
testing / frontend-checks (push) Waiting to run
testing / test-unit (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/valkey/valkey:7.2.5-alpine3.19 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:ghcr.io/microsoft/garnet-alpine:1.0.14 port:6379]) (push) Blocked by required conditions
testing / test-mysql (push) Blocked by required conditions
testing / test-pgsql (push) Blocked by required conditions
testing / test-sqlite (push) Blocked by required conditions
testing / security-check (push) Blocked by required conditions
Integration tests for the release process / release-simulation (push) Has been cancelled
Some checks failed
/ release (push) Waiting to run
testing / test-remote-cacher (map[image:registry.redict.io/redict:7.3.0-scratch port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:redis:7.2 port:6379]) (push) Blocked by required conditions
testing / backend-checks (push) Waiting to run
testing / frontend-checks (push) Waiting to run
testing / test-unit (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/valkey/valkey:7.2.5-alpine3.19 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:ghcr.io/microsoft/garnet-alpine:1.0.14 port:6379]) (push) Blocked by required conditions
testing / test-mysql (push) Blocked by required conditions
testing / test-pgsql (push) Blocked by required conditions
testing / test-sqlite (push) Blocked by required conditions
testing / security-check (push) Blocked by required conditions
Integration tests for the release process / release-simulation (push) Has been cancelled
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/4924 Reviewed-by: Michael Kriese <michael.kriese@gmx.de>
This commit is contained in:
commit
5a66691607
24 changed files with 813 additions and 26 deletions
|
@ -260,6 +260,11 @@ code.gitea.io/gitea/modules/web
|
||||||
code.gitea.io/gitea/modules/web/middleware
|
code.gitea.io/gitea/modules/web/middleware
|
||||||
DeleteLocaleCookie
|
DeleteLocaleCookie
|
||||||
|
|
||||||
|
code.gitea.io/gitea/modules/zstd
|
||||||
|
NewWriter
|
||||||
|
Writer.Write
|
||||||
|
Writer.Close
|
||||||
|
|
||||||
code.gitea.io/gitea/routers/web
|
code.gitea.io/gitea/routers/web
|
||||||
NotFound
|
NotFound
|
||||||
|
|
||||||
|
|
10
assets/go-licenses.json
generated
10
assets/go-licenses.json
generated
File diff suppressed because one or more lines are too long
|
@ -2712,6 +2712,12 @@ LEVEL = Info
|
||||||
;DEFAULT_ACTIONS_URL = https://code.forgejo.org
|
;DEFAULT_ACTIONS_URL = https://code.forgejo.org
|
||||||
;; Logs retention time in days. Old logs will be deleted after this period.
|
;; Logs retention time in days. Old logs will be deleted after this period.
|
||||||
;LOG_RETENTION_DAYS = 365
|
;LOG_RETENTION_DAYS = 365
|
||||||
|
;; Log compression type, `none` for no compression, `zstd` for zstd compression.
|
||||||
|
;; Other compression types like `gzip` are NOT supported, since seekable stream is required for log view.
|
||||||
|
;; It's always recommended to use compression when using local disk as log storage if CPU or memory is not a bottleneck.
|
||||||
|
;; And for object storage services like S3, which is billed for requests, it would cause extra 2 times of get requests for each log view.
|
||||||
|
;; But it will save storage space and network bandwidth, so it's still recommended to use compression.
|
||||||
|
;LOG_COMPRESSION = zstd
|
||||||
;; Default artifact retention time in days. Artifacts could have their own retention periods by setting the `retention-days` option in `actions/upload-artifact` step.
|
;; Default artifact retention time in days. Artifacts could have their own retention periods by setting the `retention-days` option in `actions/upload-artifact` step.
|
||||||
;ARTIFACT_RETENTION_DAYS = 90
|
;ARTIFACT_RETENTION_DAYS = 90
|
||||||
;; Timeout to stop the task which have running status, but haven't been updated for a long time
|
;; Timeout to stop the task which have running status, but haven't been updated for a long time
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -19,6 +19,7 @@ require (
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
|
||||||
github.com/ProtonMail/go-crypto v1.0.0
|
github.com/ProtonMail/go-crypto v1.0.0
|
||||||
github.com/PuerkitoBio/goquery v1.9.2
|
github.com/PuerkitoBio/goquery v1.9.2
|
||||||
|
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2
|
||||||
github.com/alecthomas/chroma/v2 v2.14.0
|
github.com/alecthomas/chroma/v2 v2.14.0
|
||||||
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb
|
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb
|
||||||
github.com/blevesearch/bleve/v2 v2.4.2
|
github.com/blevesearch/bleve/v2 v2.4.2
|
||||||
|
@ -200,6 +201,7 @@ require (
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
|
github.com/google/btree v1.1.2 // indirect
|
||||||
github.com/google/go-cmp v0.6.0 // indirect
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
github.com/google/go-querystring v1.1.0 // indirect
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
github.com/google/go-tpm v0.9.0 // indirect
|
github.com/google/go-tpm v0.9.0 // indirect
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -65,6 +65,8 @@ github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4
|
||||||
github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk=
|
github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk=
|
||||||
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
|
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
|
||||||
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
||||||
|
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2 h1:cSXom2MoKJ9KPPw29RoZtHvUETY4F4n/kXl8m9btnQ0=
|
||||||
|
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2/go.mod h1:JitQWJ8JuV4Y87l8VsHiiwhb3cgdyn68mX40s7NT6PA=
|
||||||
github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
|
github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
|
||||||
github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||||
github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs=
|
github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs=
|
||||||
|
@ -350,6 +352,8 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
||||||
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||||
|
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
|
|
@ -502,7 +502,13 @@ func convertTimestamp(timestamp *timestamppb.Timestamp) timeutil.TimeStamp {
|
||||||
}
|
}
|
||||||
|
|
||||||
func logFileName(repoFullName string, taskID int64) string {
|
func logFileName(repoFullName string, taskID int64) string {
|
||||||
return fmt.Sprintf("%s/%02x/%d.log", repoFullName, taskID%256, taskID)
|
ret := fmt.Sprintf("%s/%02x/%d.log", repoFullName, taskID%256, taskID)
|
||||||
|
|
||||||
|
if setting.Actions.LogCompression.IsZstd() {
|
||||||
|
ret += ".zst"
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTaskIDFromCache(token string) int64 {
|
func getTaskIDFromCache(token string) int64 {
|
||||||
|
|
|
@ -163,6 +163,7 @@ type PullRequest struct {
|
||||||
Issue *Issue `xorm:"-"`
|
Issue *Issue `xorm:"-"`
|
||||||
Index int64
|
Index int64
|
||||||
RequestedReviewers []*user_model.User `xorm:"-"`
|
RequestedReviewers []*user_model.User `xorm:"-"`
|
||||||
|
RequestedReviewersTeams []*org_model.Team `xorm:"-"`
|
||||||
isRequestedReviewersLoaded bool `xorm:"-"`
|
isRequestedReviewersLoaded bool `xorm:"-"`
|
||||||
|
|
||||||
HeadRepoID int64 `xorm:"INDEX"`
|
HeadRepoID int64 `xorm:"INDEX"`
|
||||||
|
@ -303,7 +304,28 @@ func (pr *PullRequest) LoadRequestedReviewers(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
pr.isRequestedReviewersLoaded = true
|
pr.isRequestedReviewersLoaded = true
|
||||||
for _, review := range reviews {
|
for _, review := range reviews {
|
||||||
pr.RequestedReviewers = append(pr.RequestedReviewers, review.Reviewer)
|
if review.ReviewerID != 0 {
|
||||||
|
pr.RequestedReviewers = append(pr.RequestedReviewers, review.Reviewer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadRequestedReviewersTeams loads the requested reviewers teams.
|
||||||
|
func (pr *PullRequest) LoadRequestedReviewersTeams(ctx context.Context) error {
|
||||||
|
reviews, err := GetReviewsByIssueID(ctx, pr.Issue.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = reviews.LoadReviewersTeams(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, review := range reviews {
|
||||||
|
if review.ReviewerTeamID != 0 {
|
||||||
|
pr.RequestedReviewersTeams = append(pr.RequestedReviewersTeams, review.ReviewerTeam)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"code.gitea.io/gitea/models/db"
|
"code.gitea.io/gitea/models/db"
|
||||||
|
organization_model "code.gitea.io/gitea/models/organization"
|
||||||
user_model "code.gitea.io/gitea/models/user"
|
user_model "code.gitea.io/gitea/models/user"
|
||||||
"code.gitea.io/gitea/modules/container"
|
"code.gitea.io/gitea/modules/container"
|
||||||
"code.gitea.io/gitea/modules/optional"
|
"code.gitea.io/gitea/modules/optional"
|
||||||
|
@ -37,6 +38,34 @@ func (reviews ReviewList) LoadReviewers(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadReviewersTeams loads reviewers teams
|
||||||
|
func (reviews ReviewList) LoadReviewersTeams(ctx context.Context) error {
|
||||||
|
reviewersTeamsIDs := make([]int64, 0)
|
||||||
|
for _, review := range reviews {
|
||||||
|
if review.ReviewerTeamID != 0 {
|
||||||
|
reviewersTeamsIDs = append(reviewersTeamsIDs, review.ReviewerTeamID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
teamsMap := make(map[int64]*organization_model.Team, 0)
|
||||||
|
for _, teamID := range reviewersTeamsIDs {
|
||||||
|
team, err := organization_model.GetTeamByID(ctx, teamID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
teamsMap[teamID] = team
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, review := range reviews {
|
||||||
|
if review.ReviewerTeamID != 0 {
|
||||||
|
review.ReviewerTeam = teamsMap[review.ReviewerTeamID]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (reviews ReviewList) LoadIssues(ctx context.Context) error {
|
func (reviews ReviewList) LoadIssues(ctx context.Context) error {
|
||||||
issueIDs := container.FilterSlice(reviews, func(review *Review) (int64, bool) {
|
issueIDs := container.FilterSlice(reviews, func(review *Review) (int64, bool) {
|
||||||
return review.IssueID, true
|
return review.IssueID, true
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"code.gitea.io/gitea/models/dbfs"
|
"code.gitea.io/gitea/models/dbfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/storage"
|
"code.gitea.io/gitea/modules/storage"
|
||||||
|
"code.gitea.io/gitea/modules/zstd"
|
||||||
|
|
||||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
@ -28,6 +29,9 @@ const (
|
||||||
defaultBufSize = MaxLineSize
|
defaultBufSize = MaxLineSize
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// WriteLogs appends logs to DBFS file for temporary storage.
|
||||||
|
// It doesn't respect the file format in the filename like ".zst", since it's difficult to reopen a closed compressed file and append new content.
|
||||||
|
// Why doesn't it store logs in object storage directly? Because it's not efficient to append content to object storage.
|
||||||
func WriteLogs(ctx context.Context, filename string, offset int64, rows []*runnerv1.LogRow) ([]int, error) {
|
func WriteLogs(ctx context.Context, filename string, offset int64, rows []*runnerv1.LogRow) ([]int, error) {
|
||||||
flag := os.O_WRONLY
|
flag := os.O_WRONLY
|
||||||
if offset == 0 {
|
if offset == 0 {
|
||||||
|
@ -106,6 +110,17 @@ func ReadLogs(ctx context.Context, inStorage bool, filename string, offset, limi
|
||||||
return rows, nil
|
return rows, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// logZstdBlockSize is the block size for zstd compression.
|
||||||
|
// 128KB leads the compression ratio to be close to the regular zstd compression.
|
||||||
|
// And it means each read from the underlying object storage will be at least 128KB*(compression ratio).
|
||||||
|
// The compression ratio is about 30% for text files, so the actual read size is about 38KB, which should be acceptable.
|
||||||
|
logZstdBlockSize = 128 * 1024 // 128KB
|
||||||
|
)
|
||||||
|
|
||||||
|
// TransferLogs transfers logs from DBFS to object storage.
|
||||||
|
// It happens when the file is complete and no more logs will be appended.
|
||||||
|
// It respects the file format in the filename like ".zst", and compresses the content if needed.
|
||||||
func TransferLogs(ctx context.Context, filename string) (func(), error) {
|
func TransferLogs(ctx context.Context, filename string) (func(), error) {
|
||||||
name := DBFSPrefix + filename
|
name := DBFSPrefix + filename
|
||||||
remove := func() {
|
remove := func() {
|
||||||
|
@ -119,7 +134,26 @@ func TransferLogs(ctx context.Context, filename string) (func(), error) {
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
if _, err := storage.Actions.Save(filename, f, -1); err != nil {
|
var reader io.Reader = f
|
||||||
|
if strings.HasSuffix(filename, ".zst") {
|
||||||
|
r, w := io.Pipe()
|
||||||
|
reader = r
|
||||||
|
zstdWriter, err := zstd.NewSeekableWriter(w, logZstdBlockSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("zstd NewSeekableWriter: %w", err)
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
_ = w.CloseWithError(zstdWriter.Close())
|
||||||
|
}()
|
||||||
|
if _, err := io.Copy(zstdWriter, f); err != nil {
|
||||||
|
_ = w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := storage.Actions.Save(filename, reader, -1); err != nil {
|
||||||
return nil, fmt.Errorf("storage save %q: %w", filename, err)
|
return nil, fmt.Errorf("storage save %q: %w", filename, err)
|
||||||
}
|
}
|
||||||
return remove, nil
|
return remove, nil
|
||||||
|
@ -150,11 +184,22 @@ func OpenLogs(ctx context.Context, inStorage bool, filename string) (io.ReadSeek
|
||||||
}
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := storage.Actions.Open(filename)
|
f, err := storage.Actions.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("storage open %q: %w", filename, err)
|
return nil, fmt.Errorf("storage open %q: %w", filename, err)
|
||||||
}
|
}
|
||||||
return f, nil
|
|
||||||
|
var reader io.ReadSeekCloser = f
|
||||||
|
if strings.HasSuffix(filename, ".zst") {
|
||||||
|
r, err := zstd.NewSeekableReader(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("zstd NewSeekableReader: %w", err)
|
||||||
|
}
|
||||||
|
reader = r
|
||||||
|
}
|
||||||
|
|
||||||
|
return reader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func FormatLog(timestamp time.Time, content string) string {
|
func FormatLog(timestamp time.Time, content string) string {
|
||||||
|
|
|
@ -95,3 +95,103 @@ func BenchmarkGetRefsBySha(b *testing.B) {
|
||||||
_, _ = bareRepo5.GetRefsBySha("c83380d7056593c51a699d12b9c00627bd5743e9", "")
|
_, _ = bareRepo5.GetRefsBySha("c83380d7056593c51a699d12b9c00627bd5743e9", "")
|
||||||
_, _ = bareRepo5.GetRefsBySha("58a4bcc53ac13e7ff76127e0fb518b5262bf09af", "")
|
_, _ = bareRepo5.GetRefsBySha("58a4bcc53ac13e7ff76127e0fb518b5262bf09af", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRepository_IsObjectExist(t *testing.T) {
|
||||||
|
repo, err := openRepositoryWithDefaultContext(filepath.Join(testReposDir, "repo1_bare"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer repo.Close()
|
||||||
|
|
||||||
|
supportShortHash := true
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
arg string
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
arg: "",
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "branch",
|
||||||
|
arg: "master",
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "commit hash",
|
||||||
|
arg: "ce064814f4a0d337b333e646ece456cd39fab612",
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short commit hash",
|
||||||
|
arg: "ce06481",
|
||||||
|
want: supportShortHash,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "blob hash",
|
||||||
|
arg: "153f451b9ee7fa1da317ab17a127e9fd9d384310",
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short blob hash",
|
||||||
|
arg: "153f451",
|
||||||
|
want: supportShortHash,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, tt.want, repo.IsObjectExist(tt.arg))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepository_IsReferenceExist(t *testing.T) {
|
||||||
|
repo, err := openRepositoryWithDefaultContext(filepath.Join(testReposDir, "repo1_bare"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer repo.Close()
|
||||||
|
|
||||||
|
supportBlobHash := true
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
arg string
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
arg: "",
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "branch",
|
||||||
|
arg: "master",
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "commit hash",
|
||||||
|
arg: "ce064814f4a0d337b333e646ece456cd39fab612",
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short commit hash",
|
||||||
|
arg: "ce06481",
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "blob hash",
|
||||||
|
arg: "153f451b9ee7fa1da317ab17a127e9fd9d384310",
|
||||||
|
want: supportBlobHash,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short blob hash",
|
||||||
|
arg: "153f451",
|
||||||
|
want: supportBlobHash,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, tt.want, repo.IsReferenceExist(tt.arg))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1197,7 +1197,7 @@ func hashCurrentPatternProcessor(ctx *RenderContext, node *html.Node) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
exist = ctx.GitRepo.IsObjectExist(hash)
|
exist = ctx.GitRepo.IsReferenceExist(hash)
|
||||||
ctx.ShaExistCache[hash] = exist
|
ctx.ShaExistCache[hash] = exist
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,8 +13,7 @@ import (
|
||||||
"code.gitea.io/gitea/modules/json"
|
"code.gitea.io/gitea/modules/json"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
"code.gitea.io/gitea/modules/validation"
|
"code.gitea.io/gitea/modules/validation"
|
||||||
|
"code.gitea.io/gitea/modules/zstd"
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -10,8 +10,9 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/zstd"
|
||||||
|
|
||||||
"github.com/dsnet/compress/bzip2"
|
"github.com/dsnet/compress/bzip2"
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
|
@ -14,9 +14,9 @@ import (
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
"code.gitea.io/gitea/modules/validation"
|
"code.gitea.io/gitea/modules/validation"
|
||||||
|
"code.gitea.io/gitea/modules/zstd"
|
||||||
|
|
||||||
"github.com/blakesmith/ar"
|
"github.com/blakesmith/ar"
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"github.com/ulikunitz/xz"
|
"github.com/ulikunitz/xz"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -10,8 +10,9 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/zstd"
|
||||||
|
|
||||||
"github.com/blakesmith/ar"
|
"github.com/blakesmith/ar"
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/ulikunitz/xz"
|
"github.com/ulikunitz/xz"
|
||||||
|
|
|
@ -15,6 +15,7 @@ var (
|
||||||
Enabled bool
|
Enabled bool
|
||||||
LogStorage *Storage // how the created logs should be stored
|
LogStorage *Storage // how the created logs should be stored
|
||||||
LogRetentionDays int64 `ini:"LOG_RETENTION_DAYS"`
|
LogRetentionDays int64 `ini:"LOG_RETENTION_DAYS"`
|
||||||
|
LogCompression logCompression `ini:"LOG_COMPRESSION"`
|
||||||
ArtifactStorage *Storage // how the created artifacts should be stored
|
ArtifactStorage *Storage // how the created artifacts should be stored
|
||||||
ArtifactRetentionDays int64 `ini:"ARTIFACT_RETENTION_DAYS"`
|
ArtifactRetentionDays int64 `ini:"ARTIFACT_RETENTION_DAYS"`
|
||||||
DefaultActionsURL defaultActionsURL `ini:"DEFAULT_ACTIONS_URL"`
|
DefaultActionsURL defaultActionsURL `ini:"DEFAULT_ACTIONS_URL"`
|
||||||
|
@ -50,6 +51,20 @@ const (
|
||||||
defaultActionsURLSelf = "self" // the root URL of the self-hosted instance
|
defaultActionsURLSelf = "self" // the root URL of the self-hosted instance
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type logCompression string
|
||||||
|
|
||||||
|
func (c logCompression) IsValid() bool {
|
||||||
|
return c.IsNone() || c.IsZstd()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c logCompression) IsNone() bool {
|
||||||
|
return strings.ToLower(string(c)) == "none"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c logCompression) IsZstd() bool {
|
||||||
|
return c == "" || strings.ToLower(string(c)) == "zstd"
|
||||||
|
}
|
||||||
|
|
||||||
func loadActionsFrom(rootCfg ConfigProvider) error {
|
func loadActionsFrom(rootCfg ConfigProvider) error {
|
||||||
sec := rootCfg.Section("actions")
|
sec := rootCfg.Section("actions")
|
||||||
err := sec.MapTo(&Actions)
|
err := sec.MapTo(&Actions)
|
||||||
|
@ -83,5 +98,9 @@ func loadActionsFrom(rootCfg ConfigProvider) error {
|
||||||
Actions.EndlessTaskTimeout = sec.Key("ENDLESS_TASK_TIMEOUT").MustDuration(3 * time.Hour)
|
Actions.EndlessTaskTimeout = sec.Key("ENDLESS_TASK_TIMEOUT").MustDuration(3 * time.Hour)
|
||||||
Actions.AbandonedJobTimeout = sec.Key("ABANDONED_JOB_TIMEOUT").MustDuration(24 * time.Hour)
|
Actions.AbandonedJobTimeout = sec.Key("ABANDONED_JOB_TIMEOUT").MustDuration(24 * time.Hour)
|
||||||
|
|
||||||
|
if !Actions.LogCompression.IsValid() {
|
||||||
|
return fmt.Errorf("invalid [actions] LOG_COMPRESSION: %q", Actions.LogCompression)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,21 +9,22 @@ import (
|
||||||
|
|
||||||
// PullRequest represents a pull request
|
// PullRequest represents a pull request
|
||||||
type PullRequest struct {
|
type PullRequest struct {
|
||||||
ID int64 `json:"id"`
|
ID int64 `json:"id"`
|
||||||
URL string `json:"url"`
|
URL string `json:"url"`
|
||||||
Index int64 `json:"number"`
|
Index int64 `json:"number"`
|
||||||
Poster *User `json:"user"`
|
Poster *User `json:"user"`
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
Body string `json:"body"`
|
Body string `json:"body"`
|
||||||
Labels []*Label `json:"labels"`
|
Labels []*Label `json:"labels"`
|
||||||
Milestone *Milestone `json:"milestone"`
|
Milestone *Milestone `json:"milestone"`
|
||||||
Assignee *User `json:"assignee"`
|
Assignee *User `json:"assignee"`
|
||||||
Assignees []*User `json:"assignees"`
|
Assignees []*User `json:"assignees"`
|
||||||
RequestedReviewers []*User `json:"requested_reviewers"`
|
RequestedReviewers []*User `json:"requested_reviewers"`
|
||||||
State StateType `json:"state"`
|
RequestedReviewersTeams []*Team `json:"requested_reviewers_teams"`
|
||||||
Draft bool `json:"draft"`
|
State StateType `json:"state"`
|
||||||
IsLocked bool `json:"is_locked"`
|
Draft bool `json:"draft"`
|
||||||
Comments int `json:"comments"`
|
IsLocked bool `json:"is_locked"`
|
||||||
|
Comments int `json:"comments"`
|
||||||
// number of review comments made on the diff of a PR review (not including comments on commits or issues in a PR)
|
// number of review comments made on the diff of a PR review (not including comments on commits or issues in a PR)
|
||||||
ReviewComments int `json:"review_comments"`
|
ReviewComments int `json:"review_comments"`
|
||||||
Additions int `json:"additions"`
|
Additions int `json:"additions"`
|
||||||
|
|
46
modules/zstd/option.go
Normal file
46
modules/zstd/option.go
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import "github.com/klauspost/compress/zstd"
|
||||||
|
|
||||||
|
type WriterOption = zstd.EOption
|
||||||
|
|
||||||
|
var (
|
||||||
|
WithEncoderCRC = zstd.WithEncoderCRC
|
||||||
|
WithEncoderConcurrency = zstd.WithEncoderConcurrency
|
||||||
|
WithWindowSize = zstd.WithWindowSize
|
||||||
|
WithEncoderPadding = zstd.WithEncoderPadding
|
||||||
|
WithEncoderLevel = zstd.WithEncoderLevel
|
||||||
|
WithZeroFrames = zstd.WithZeroFrames
|
||||||
|
WithAllLitEntropyCompression = zstd.WithAllLitEntropyCompression
|
||||||
|
WithNoEntropyCompression = zstd.WithNoEntropyCompression
|
||||||
|
WithSingleSegment = zstd.WithSingleSegment
|
||||||
|
WithLowerEncoderMem = zstd.WithLowerEncoderMem
|
||||||
|
WithEncoderDict = zstd.WithEncoderDict
|
||||||
|
WithEncoderDictRaw = zstd.WithEncoderDictRaw
|
||||||
|
)
|
||||||
|
|
||||||
|
type EncoderLevel = zstd.EncoderLevel
|
||||||
|
|
||||||
|
const (
|
||||||
|
SpeedFastest EncoderLevel = zstd.SpeedFastest
|
||||||
|
SpeedDefault EncoderLevel = zstd.SpeedDefault
|
||||||
|
SpeedBetterCompression EncoderLevel = zstd.SpeedBetterCompression
|
||||||
|
SpeedBestCompression EncoderLevel = zstd.SpeedBestCompression
|
||||||
|
)
|
||||||
|
|
||||||
|
type ReaderOption = zstd.DOption
|
||||||
|
|
||||||
|
var (
|
||||||
|
WithDecoderLowmem = zstd.WithDecoderLowmem
|
||||||
|
WithDecoderConcurrency = zstd.WithDecoderConcurrency
|
||||||
|
WithDecoderMaxMemory = zstd.WithDecoderMaxMemory
|
||||||
|
WithDecoderDicts = zstd.WithDecoderDicts
|
||||||
|
WithDecoderDictRaw = zstd.WithDecoderDictRaw
|
||||||
|
WithDecoderMaxWindow = zstd.WithDecoderMaxWindow
|
||||||
|
WithDecodeAllCapLimit = zstd.WithDecodeAllCapLimit
|
||||||
|
WithDecodeBuffersBelow = zstd.WithDecodeBuffersBelow
|
||||||
|
IgnoreChecksum = zstd.IgnoreChecksum
|
||||||
|
)
|
163
modules/zstd/zstd.go
Normal file
163
modules/zstd/zstd.go
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
// Package zstd provides a high-level API for reading and writing zstd-compressed data.
|
||||||
|
// It supports both regular and seekable zstd streams.
|
||||||
|
// It's not a new wheel, but a wrapper around the zstd and zstd-seekable-format-go packages.
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
seekable "github.com/SaveTheRbtz/zstd-seekable-format-go/pkg"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Writer zstd.Encoder
|
||||||
|
|
||||||
|
var _ io.WriteCloser = (*Writer)(nil)
|
||||||
|
|
||||||
|
// NewWriter returns a new zstd writer.
|
||||||
|
func NewWriter(w io.Writer, opts ...WriterOption) (*Writer, error) {
|
||||||
|
zstdW, err := zstd.NewWriter(w, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return (*Writer)(zstdW), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Write(p []byte) (int, error) {
|
||||||
|
return (*zstd.Encoder)(w).Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Close() error {
|
||||||
|
return (*zstd.Encoder)(w).Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Reader zstd.Decoder
|
||||||
|
|
||||||
|
var _ io.ReadCloser = (*Reader)(nil)
|
||||||
|
|
||||||
|
// NewReader returns a new zstd reader.
|
||||||
|
func NewReader(r io.Reader, opts ...ReaderOption) (*Reader, error) {
|
||||||
|
zstdR, err := zstd.NewReader(r, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return (*Reader)(zstdR), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
|
return (*zstd.Decoder)(r).Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Close() error {
|
||||||
|
(*zstd.Decoder)(r).Close() // no error returned
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SeekableWriter struct {
|
||||||
|
buf []byte
|
||||||
|
n int
|
||||||
|
w seekable.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.WriteCloser = (*SeekableWriter)(nil)
|
||||||
|
|
||||||
|
// NewSeekableWriter returns a zstd writer to compress data to seekable format.
|
||||||
|
// blockSize is an important parameter, it should be decided according to the actual business requirements.
|
||||||
|
// If it's too small, the compression ratio could be very bad, even no compression at all.
|
||||||
|
// If it's too large, it could cost more traffic when reading the data partially from underlying storage.
|
||||||
|
func NewSeekableWriter(w io.Writer, blockSize int, opts ...WriterOption) (*SeekableWriter, error) {
|
||||||
|
zstdW, err := zstd.NewWriter(nil, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
seekableW, err := seekable.NewWriter(w, zstdW)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SeekableWriter{
|
||||||
|
buf: make([]byte, blockSize),
|
||||||
|
w: seekableW,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SeekableWriter) Write(p []byte) (int, error) {
|
||||||
|
written := 0
|
||||||
|
for len(p) > 0 {
|
||||||
|
n := copy(w.buf[w.n:], p)
|
||||||
|
w.n += n
|
||||||
|
written += n
|
||||||
|
p = p[n:]
|
||||||
|
|
||||||
|
if w.n == len(w.buf) {
|
||||||
|
if _, err := w.w.Write(w.buf); err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
w.n = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SeekableWriter) Close() error {
|
||||||
|
if w.n > 0 {
|
||||||
|
if _, err := w.w.Write(w.buf[:w.n]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return w.w.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type SeekableReader struct {
|
||||||
|
r seekable.Reader
|
||||||
|
c func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.ReadSeekCloser = (*SeekableReader)(nil)
|
||||||
|
|
||||||
|
// NewSeekableReader returns a zstd reader to decompress data from seekable format.
|
||||||
|
func NewSeekableReader(r io.ReadSeeker, opts ...ReaderOption) (*SeekableReader, error) {
|
||||||
|
zstdR, err := zstd.NewReader(nil, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
seekableR, err := seekable.NewReader(r, zstdR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := &SeekableReader{
|
||||||
|
r: seekableR,
|
||||||
|
}
|
||||||
|
if closer, ok := r.(io.Closer); ok {
|
||||||
|
ret.c = closer.Close
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SeekableReader) Read(p []byte) (int, error) {
|
||||||
|
return r.r.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SeekableReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
return r.r.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SeekableReader) Close() error {
|
||||||
|
return errors.Join(
|
||||||
|
func() error {
|
||||||
|
if r.c != nil {
|
||||||
|
return r.c()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}(),
|
||||||
|
r.r.Close(),
|
||||||
|
)
|
||||||
|
}
|
304
modules/zstd/zstd_test.go
Normal file
304
modules/zstd/zstd_test.go
Normal file
|
@ -0,0 +1,304 @@
|
||||||
|
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWriterReader(t *testing.T) {
|
||||||
|
testData := prepareTestData(t, 20_000_000)
|
||||||
|
|
||||||
|
result := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
|
t.Run("regular", func(t *testing.T) {
|
||||||
|
result.Reset()
|
||||||
|
writer, err := NewWriter(result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, writer.Close())
|
||||||
|
|
||||||
|
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||||
|
|
||||||
|
reader, err := NewReader(result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data, err := io.ReadAll(reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, reader.Close())
|
||||||
|
|
||||||
|
assert.Equal(t, testData, data)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with options", func(t *testing.T) {
|
||||||
|
result.Reset()
|
||||||
|
writer, err := NewWriter(result, WithEncoderLevel(SpeedBestCompression))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, writer.Close())
|
||||||
|
|
||||||
|
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||||
|
|
||||||
|
reader, err := NewReader(result, WithDecoderLowmem(true))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data, err := io.ReadAll(reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, reader.Close())
|
||||||
|
|
||||||
|
assert.Equal(t, testData, data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeekableWriterReader(t *testing.T) {
|
||||||
|
testData := prepareTestData(t, 20_000_000)
|
||||||
|
|
||||||
|
result := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
|
t.Run("regular", func(t *testing.T) {
|
||||||
|
result.Reset()
|
||||||
|
blockSize := 100_000
|
||||||
|
|
||||||
|
writer, err := NewSeekableWriter(result, blockSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, writer.Close())
|
||||||
|
|
||||||
|
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||||
|
|
||||||
|
reader, err := NewSeekableReader(bytes.NewReader(result.Bytes()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data, err := io.ReadAll(reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, reader.Close())
|
||||||
|
|
||||||
|
assert.Equal(t, testData, data)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("seek read", func(t *testing.T) {
|
||||||
|
result.Reset()
|
||||||
|
blockSize := 100_000
|
||||||
|
|
||||||
|
writer, err := NewSeekableWriter(result, blockSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, writer.Close())
|
||||||
|
|
||||||
|
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||||
|
|
||||||
|
assertReader := &assertReadSeeker{r: bytes.NewReader(result.Bytes())}
|
||||||
|
|
||||||
|
reader, err := NewSeekableReader(assertReader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = reader.Seek(10_000_000, io.SeekStart)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data := make([]byte, 1000)
|
||||||
|
_, err = io.ReadFull(reader, data)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, reader.Close())
|
||||||
|
|
||||||
|
assert.Equal(t, testData[10_000_000:10_000_000+1000], data)
|
||||||
|
|
||||||
|
// Should seek 3 times,
|
||||||
|
// the first two times are for getting the index,
|
||||||
|
// and the third time is for reading the data.
|
||||||
|
assert.Equal(t, 3, assertReader.SeekTimes)
|
||||||
|
// Should read less than 2 blocks,
|
||||||
|
// even if the compression ratio is not good and the data is not in the same block.
|
||||||
|
assert.Less(t, assertReader.ReadBytes, blockSize*2)
|
||||||
|
// Should close the underlying reader if it is Closer.
|
||||||
|
assert.True(t, assertReader.Closed)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("tidy data", func(t *testing.T) {
|
||||||
|
testData := prepareTestData(t, 1000) // data size is less than a block
|
||||||
|
|
||||||
|
result.Reset()
|
||||||
|
blockSize := 100_000
|
||||||
|
|
||||||
|
writer, err := NewSeekableWriter(result, blockSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, writer.Close())
|
||||||
|
|
||||||
|
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||||
|
|
||||||
|
reader, err := NewSeekableReader(bytes.NewReader(result.Bytes()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data, err := io.ReadAll(reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, reader.Close())
|
||||||
|
|
||||||
|
assert.Equal(t, testData, data)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("tidy block", func(t *testing.T) {
|
||||||
|
result.Reset()
|
||||||
|
blockSize := 100
|
||||||
|
|
||||||
|
writer, err := NewSeekableWriter(result, blockSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, writer.Close())
|
||||||
|
|
||||||
|
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||||
|
// A too small block size will cause a bad compression rate,
|
||||||
|
// even the compressed data is larger than the original data.
|
||||||
|
assert.Greater(t, result.Len(), len(testData))
|
||||||
|
|
||||||
|
reader, err := NewSeekableReader(bytes.NewReader(result.Bytes()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data, err := io.ReadAll(reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, reader.Close())
|
||||||
|
|
||||||
|
assert.Equal(t, testData, data)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("compatible reader", func(t *testing.T) {
|
||||||
|
result.Reset()
|
||||||
|
blockSize := 100_000
|
||||||
|
|
||||||
|
writer, err := NewSeekableWriter(result, blockSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, writer.Close())
|
||||||
|
|
||||||
|
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||||
|
|
||||||
|
// It should be able to read the data with a regular reader.
|
||||||
|
reader, err := NewReader(bytes.NewReader(result.Bytes()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data, err := io.ReadAll(reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, reader.Close())
|
||||||
|
|
||||||
|
assert.Equal(t, testData, data)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("wrong reader", func(t *testing.T) {
|
||||||
|
result.Reset()
|
||||||
|
|
||||||
|
// Use a regular writer to compress the data.
|
||||||
|
writer, err := NewWriter(result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, writer.Close())
|
||||||
|
|
||||||
|
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||||
|
|
||||||
|
// But use a seekable reader to read the data, it should fail.
|
||||||
|
_, err = NewSeekableReader(bytes.NewReader(result.Bytes()))
|
||||||
|
require.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareTestData prepares test data to test compression.
|
||||||
|
// Random data is not suitable for testing compression,
|
||||||
|
// so it collects code files from the project to get enough data.
|
||||||
|
func prepareTestData(t *testing.T, size int) []byte {
|
||||||
|
// .../gitea/modules/zstd
|
||||||
|
dir, err := os.Getwd()
|
||||||
|
require.NoError(t, err)
|
||||||
|
// .../gitea/
|
||||||
|
dir = filepath.Join(dir, "../../")
|
||||||
|
|
||||||
|
textExt := []string{".go", ".tmpl", ".ts", ".yml", ".css"} // add more if not enough data collected
|
||||||
|
isText := func(info os.FileInfo) bool {
|
||||||
|
if info.Size() == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, ext := range textExt {
|
||||||
|
if strings.HasSuffix(info.Name(), ext) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]byte, size)
|
||||||
|
n := 0
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
queue := []string{dir}
|
||||||
|
for len(queue) > 0 && n < size {
|
||||||
|
file := queue[0]
|
||||||
|
queue = queue[1:]
|
||||||
|
info, err := os.Stat(file)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if info.IsDir() {
|
||||||
|
entries, err := os.ReadDir(file)
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, entry := range entries {
|
||||||
|
queue = append(queue, filepath.Join(file, entry.Name()))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !isText(info) { // text file only
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
data, err := os.ReadFile(file)
|
||||||
|
require.NoError(t, err)
|
||||||
|
n += copy(ret[n:], data)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
if n < size {
|
||||||
|
require.Failf(t, "Not enough data", "Only %d bytes collected from %d files", n, count)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
type assertReadSeeker struct {
|
||||||
|
r io.ReadSeeker
|
||||||
|
SeekTimes int
|
||||||
|
ReadBytes int
|
||||||
|
Closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *assertReadSeeker) Read(p []byte) (int, error) {
|
||||||
|
n, err := a.r.Read(p)
|
||||||
|
a.ReadBytes += n
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *assertReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
a.SeekTimes++
|
||||||
|
return a.r.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *assertReadSeeker) Close() error {
|
||||||
|
a.Closed = true
|
||||||
|
return nil
|
||||||
|
}
|
2
release-notes/4924.md
Normal file
2
release-notes/4924.md
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/9812b7af91b69386c5d4c08982aece7bd8f9a174) /repos/{owner}/{repo}/pulls/{index} [requested_reviewers contains null for teams](https://codeberg.org/forgejo/forgejo/issues/4108).
|
||||||
|
feat: [commit](https://codeberg.org/forgejo/forgejo/commit/bf7373a2520ae56a1dc00416efa02de9749b63d3) Forgejo Actions logs are compressed by default. It can be disabled by setting `[actions].LOG_COMPRESSION=none`.
|
|
@ -106,10 +106,25 @@ func ToAPIPullRequest(ctx context.Context, pr *issues_model.PullRequest, doer *u
|
||||||
log.Error("LoadRequestedReviewers[%d]: %v", pr.ID, err)
|
log.Error("LoadRequestedReviewers[%d]: %v", pr.ID, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if err = pr.LoadRequestedReviewersTeams(ctx); err != nil {
|
||||||
|
log.Error("LoadRequestedReviewersTeams[%d]: %v", pr.ID, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
for _, reviewer := range pr.RequestedReviewers {
|
for _, reviewer := range pr.RequestedReviewers {
|
||||||
apiPullRequest.RequestedReviewers = append(apiPullRequest.RequestedReviewers, ToUser(ctx, reviewer, nil))
|
apiPullRequest.RequestedReviewers = append(apiPullRequest.RequestedReviewers, ToUser(ctx, reviewer, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, reviewerTeam := range pr.RequestedReviewersTeams {
|
||||||
|
convertedTeam, err := ToTeam(ctx, reviewerTeam, true)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("LoadRequestedReviewersTeams[%d]: %v", pr.ID, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
apiPullRequest.RequestedReviewersTeams = append(apiPullRequest.RequestedReviewersTeams, convertedTeam)
|
||||||
|
}
|
||||||
|
|
||||||
if pr.Issue.ClosedUnix != 0 {
|
if pr.Issue.ClosedUnix != 0 {
|
||||||
apiPullRequest.Closed = pr.Issue.ClosedUnix.AsTimePtr()
|
apiPullRequest.Closed = pr.Issue.ClosedUnix.AsTimePtr()
|
||||||
}
|
}
|
||||||
|
|
7
templates/swagger/v1_json.tmpl
generated
7
templates/swagger/v1_json.tmpl
generated
|
@ -25088,6 +25088,13 @@
|
||||||
},
|
},
|
||||||
"x-go-name": "RequestedReviewers"
|
"x-go-name": "RequestedReviewers"
|
||||||
},
|
},
|
||||||
|
"requested_reviewers_teams": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/Team"
|
||||||
|
},
|
||||||
|
"x-go-name": "RequestedReviewersTeams"
|
||||||
|
},
|
||||||
"review_comments": {
|
"review_comments": {
|
||||||
"description": "number of review comments made on the diff of a PR review (not including comments on commits or issues in a PR)",
|
"description": "number of review comments made on the diff of a PR review (not including comments on commits or issues in a PR)",
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
|
|
|
@ -17,10 +17,10 @@ import (
|
||||||
"code.gitea.io/gitea/models/unittest"
|
"code.gitea.io/gitea/models/unittest"
|
||||||
user_model "code.gitea.io/gitea/models/user"
|
user_model "code.gitea.io/gitea/models/user"
|
||||||
conda_module "code.gitea.io/gitea/modules/packages/conda"
|
conda_module "code.gitea.io/gitea/modules/packages/conda"
|
||||||
|
"code.gitea.io/gitea/modules/zstd"
|
||||||
"code.gitea.io/gitea/tests"
|
"code.gitea.io/gitea/tests"
|
||||||
|
|
||||||
"github.com/dsnet/compress/bzip2"
|
"github.com/dsnet/compress/bzip2"
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in a new issue