mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-14 05:56:14 +01:00
Merge pull request '[gitea] week 2024-45 cherry pick (gitea/main -> forgejo)' (#5789) from algernon/wcp/2024-45 into forgejo
Some checks are pending
/ release (push) Waiting to run
testing / backend-checks (push) Waiting to run
testing / frontend-checks (push) Waiting to run
testing / test-unit (push) Blocked by required conditions
testing / test-e2e (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/bitnami/redis:7.2 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/bitnami/valkey:7.2 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:ghcr.io/microsoft/garnet-alpine:1.0.14 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:registry.redict.io/redict:7.3.0-scratch port:6379]) (push) Blocked by required conditions
testing / test-mysql (push) Blocked by required conditions
testing / test-pgsql (push) Blocked by required conditions
testing / test-sqlite (push) Blocked by required conditions
testing / security-check (push) Blocked by required conditions
Some checks are pending
/ release (push) Waiting to run
testing / backend-checks (push) Waiting to run
testing / frontend-checks (push) Waiting to run
testing / test-unit (push) Blocked by required conditions
testing / test-e2e (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/bitnami/redis:7.2 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:docker.io/bitnami/valkey:7.2 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:ghcr.io/microsoft/garnet-alpine:1.0.14 port:6379]) (push) Blocked by required conditions
testing / test-remote-cacher (map[image:registry.redict.io/redict:7.3.0-scratch port:6379]) (push) Blocked by required conditions
testing / test-mysql (push) Blocked by required conditions
testing / test-pgsql (push) Blocked by required conditions
testing / test-sqlite (push) Blocked by required conditions
testing / security-check (push) Blocked by required conditions
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/5789 Reviewed-by: Gusted <gusted@noreply.codeberg.org>
This commit is contained in:
commit
36b18fb6cc
52 changed files with 350 additions and 164 deletions
|
@ -53,8 +53,6 @@ func (e Emoji) MarshalJSON() ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var err error
|
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
// generate data
|
// generate data
|
||||||
|
@ -83,8 +81,6 @@ var replacer = strings.NewReplacer(
|
||||||
var emojiRE = regexp.MustCompile(`\{Emoji:"([^"]*)"`)
|
var emojiRE = regexp.MustCompile(`\{Emoji:"([^"]*)"`)
|
||||||
|
|
||||||
func generate() ([]byte, error) {
|
func generate() ([]byte, error) {
|
||||||
var err error
|
|
||||||
|
|
||||||
// load gemoji data
|
// load gemoji data
|
||||||
res, err := http.Get(gemojiURL)
|
res, err := http.Get(gemojiURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -91,7 +91,7 @@ func TestMigrateActionsArtifacts(t *testing.T) {
|
||||||
|
|
||||||
srcStorage, _ := createLocalStorage(t)
|
srcStorage, _ := createLocalStorage(t)
|
||||||
defer test.MockVariableValue(&storage.ActionsArtifacts, srcStorage)()
|
defer test.MockVariableValue(&storage.ActionsArtifacts, srcStorage)()
|
||||||
id := int64(0)
|
id := int64(42)
|
||||||
|
|
||||||
addArtifact := func(storagePath string, status actions.ArtifactStatus) {
|
addArtifact := func(storagePath string, status actions.ArtifactStatus) {
|
||||||
id++
|
id++
|
||||||
|
|
|
@ -328,6 +328,10 @@ RUN_USER = ; git
|
||||||
;; Maximum number of locks returned per page
|
;; Maximum number of locks returned per page
|
||||||
;LFS_LOCKS_PAGING_NUM = 50
|
;LFS_LOCKS_PAGING_NUM = 50
|
||||||
;;
|
;;
|
||||||
|
;; When clients make lfs batch requests, reject them if there are more pointers than this number
|
||||||
|
;; zero means 'unlimited'
|
||||||
|
;LFS_MAX_BATCH_SIZE = 0
|
||||||
|
;;
|
||||||
;; Allow graceful restarts using SIGHUP to fork
|
;; Allow graceful restarts using SIGHUP to fork
|
||||||
;ALLOW_GRACEFUL_RESTARTS = true
|
;ALLOW_GRACEFUL_RESTARTS = true
|
||||||
;;
|
;;
|
||||||
|
@ -2672,6 +2676,10 @@ LEVEL = Info
|
||||||
;; override the minio base path if storage type is minio
|
;; override the minio base path if storage type is minio
|
||||||
;MINIO_BASE_PATH = lfs/
|
;MINIO_BASE_PATH = lfs/
|
||||||
|
|
||||||
|
;[lfs_client]
|
||||||
|
;; When mirroring an upstream lfs endpoint, limit the number of pointers in each batch request to this number
|
||||||
|
;BATCH_SIZE = 20
|
||||||
|
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
;; settings for packages, will override storage setting
|
;; settings for packages, will override storage setting
|
||||||
|
|
71
models/fixtures/action_artifact.yml
Normal file
71
models/fixtures/action_artifact.yml
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
-
|
||||||
|
id: 1
|
||||||
|
run_id: 791
|
||||||
|
runner_id: 1
|
||||||
|
repo_id: 4
|
||||||
|
owner_id: 1
|
||||||
|
commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
|
||||||
|
storage_path: "26/1/1712166500347189545.chunk"
|
||||||
|
file_size: 1024
|
||||||
|
file_compressed_size: 1024
|
||||||
|
content_encoding: ""
|
||||||
|
artifact_path: "abc.txt"
|
||||||
|
artifact_name: "artifact-download"
|
||||||
|
status: 1
|
||||||
|
created_unix: 1712338649
|
||||||
|
updated_unix: 1712338649
|
||||||
|
expired_unix: 1720114649
|
||||||
|
|
||||||
|
-
|
||||||
|
id: 19
|
||||||
|
run_id: 791
|
||||||
|
runner_id: 1
|
||||||
|
repo_id: 4
|
||||||
|
owner_id: 1
|
||||||
|
commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
|
||||||
|
storage_path: "26/19/1712348022422036662.chunk"
|
||||||
|
file_size: 1024
|
||||||
|
file_compressed_size: 1024
|
||||||
|
content_encoding: ""
|
||||||
|
artifact_path: "abc.txt"
|
||||||
|
artifact_name: "multi-file-download"
|
||||||
|
status: 2
|
||||||
|
created_unix: 1712348022
|
||||||
|
updated_unix: 1712348022
|
||||||
|
expired_unix: 1720124022
|
||||||
|
|
||||||
|
-
|
||||||
|
id: 20
|
||||||
|
run_id: 791
|
||||||
|
runner_id: 1
|
||||||
|
repo_id: 4
|
||||||
|
owner_id: 1
|
||||||
|
commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
|
||||||
|
storage_path: "26/20/1712348022423431524.chunk"
|
||||||
|
file_size: 1024
|
||||||
|
file_compressed_size: 1024
|
||||||
|
content_encoding: ""
|
||||||
|
artifact_path: "xyz/def.txt"
|
||||||
|
artifact_name: "multi-file-download"
|
||||||
|
status: 2
|
||||||
|
created_unix: 1712348022
|
||||||
|
updated_unix: 1712348022
|
||||||
|
expired_unix: 1720124022
|
||||||
|
|
||||||
|
-
|
||||||
|
id: 22
|
||||||
|
run_id: 792
|
||||||
|
runner_id: 1
|
||||||
|
repo_id: 4
|
||||||
|
owner_id: 1
|
||||||
|
commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
|
||||||
|
storage_path: "27/5/1730330775594233150.chunk"
|
||||||
|
file_size: 1024
|
||||||
|
file_compressed_size: 1024
|
||||||
|
content_encoding: "application/zip"
|
||||||
|
artifact_path: "artifact-v4-download.zip"
|
||||||
|
artifact_name: "artifact-v4-download"
|
||||||
|
status: 2
|
||||||
|
created_unix: 1730330775
|
||||||
|
updated_unix: 1730330775
|
||||||
|
expired_unix: 1738106775
|
|
@ -136,8 +136,6 @@ var ErrLFSObjectNotExist = db.ErrNotExist{Resource: "LFS Meta object"}
|
||||||
// NewLFSMetaObject stores a given populated LFSMetaObject structure in the database
|
// NewLFSMetaObject stores a given populated LFSMetaObject structure in the database
|
||||||
// if it is not already present.
|
// if it is not already present.
|
||||||
func NewLFSMetaObject(ctx context.Context, repoID int64, p lfs.Pointer) (*LFSMetaObject, error) {
|
func NewLFSMetaObject(ctx context.Context, repoID int64, p lfs.Pointer) (*LFSMetaObject, error) {
|
||||||
var err error
|
|
||||||
|
|
||||||
ctx, committer, err := db.TxContext(ctx)
|
ctx, committer, err := db.TxContext(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -79,14 +79,20 @@ func IsRuleNameSpecial(ruleName string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (protectBranch *ProtectedBranch) loadGlob() {
|
func (protectBranch *ProtectedBranch) loadGlob() {
|
||||||
if protectBranch.globRule == nil {
|
if protectBranch.isPlainName || protectBranch.globRule != nil {
|
||||||
var err error
|
return
|
||||||
protectBranch.globRule, err = glob.Compile(protectBranch.RuleName, '/')
|
}
|
||||||
if err != nil {
|
// detect if it is not glob
|
||||||
log.Warn("Invalid glob rule for ProtectedBranch[%d]: %s %v", protectBranch.ID, protectBranch.RuleName, err)
|
if !IsRuleNameSpecial(protectBranch.RuleName) {
|
||||||
protectBranch.globRule = glob.MustCompile(glob.QuoteMeta(protectBranch.RuleName), '/')
|
protectBranch.isPlainName = true
|
||||||
}
|
return
|
||||||
protectBranch.isPlainName = !IsRuleNameSpecial(protectBranch.RuleName)
|
}
|
||||||
|
// now we load the glob
|
||||||
|
var err error
|
||||||
|
protectBranch.globRule, err = glob.Compile(protectBranch.RuleName, '/')
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Invalid glob rule for ProtectedBranch[%d]: %s %v", protectBranch.ID, protectBranch.RuleName, err)
|
||||||
|
protectBranch.globRule = glob.MustCompile(glob.QuoteMeta(protectBranch.RuleName), '/')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,3 +75,32 @@ func TestBranchRuleMatchPriority(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBranchRuleSort(t *testing.T) {
|
||||||
|
in := []*ProtectedBranch{{
|
||||||
|
RuleName: "b",
|
||||||
|
CreatedUnix: 1,
|
||||||
|
}, {
|
||||||
|
RuleName: "b/*",
|
||||||
|
CreatedUnix: 3,
|
||||||
|
}, {
|
||||||
|
RuleName: "a/*",
|
||||||
|
CreatedUnix: 2,
|
||||||
|
}, {
|
||||||
|
RuleName: "c",
|
||||||
|
CreatedUnix: 0,
|
||||||
|
}, {
|
||||||
|
RuleName: "a",
|
||||||
|
CreatedUnix: 4,
|
||||||
|
}}
|
||||||
|
expect := []string{"c", "b", "a", "a/*", "b/*"}
|
||||||
|
|
||||||
|
pbr := ProtectedBranchRules(in)
|
||||||
|
pbr.sort()
|
||||||
|
|
||||||
|
var got []string
|
||||||
|
for i := range pbr {
|
||||||
|
got = append(got, pbr[i].RuleName)
|
||||||
|
}
|
||||||
|
assert.Equal(t, expect, got)
|
||||||
|
}
|
|
@ -231,8 +231,7 @@ func TestGetLabelsByOrgID(t *testing.T) {
|
||||||
testSuccess(3, "reversealphabetically", []int64{4, 3})
|
testSuccess(3, "reversealphabetically", []int64{4, 3})
|
||||||
testSuccess(3, "default", []int64{3, 4})
|
testSuccess(3, "default", []int64{3, 4})
|
||||||
|
|
||||||
var err error
|
_, err := issues_model.GetLabelsByOrgID(db.DefaultContext, 0, "leastissues", db.ListOptions{})
|
||||||
_, err = issues_model.GetLabelsByOrgID(db.DefaultContext, 0, "leastissues", db.ListOptions{})
|
|
||||||
assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
|
assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
|
||||||
|
|
||||||
_, err = issues_model.GetLabelsByOrgID(db.DefaultContext, -1, "leastissues", db.ListOptions{})
|
_, err = issues_model.GetLabelsByOrgID(db.DefaultContext, -1, "leastissues", db.ListOptions{})
|
||||||
|
|
|
@ -41,14 +41,12 @@ func TestMaybeRemoveBOM(t *testing.T) {
|
||||||
|
|
||||||
func TestToUTF8(t *testing.T) {
|
func TestToUTF8(t *testing.T) {
|
||||||
resetDefaultCharsetsOrder()
|
resetDefaultCharsetsOrder()
|
||||||
var res string
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Note: golang compiler seems so behave differently depending on the current
|
// Note: golang compiler seems so behave differently depending on the current
|
||||||
// locale, so some conversions might behave differently. For that reason, we don't
|
// locale, so some conversions might behave differently. For that reason, we don't
|
||||||
// depend on particular conversions but in expected behaviors.
|
// depend on particular conversions but in expected behaviors.
|
||||||
|
|
||||||
res, err = ToUTF8([]byte{0x41, 0x42, 0x43}, ConvertOpts{})
|
res, err := ToUTF8([]byte{0x41, 0x42, 0x43}, ConvertOpts{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "ABC", res)
|
assert.Equal(t, "ABC", res)
|
||||||
|
|
||||||
|
|
|
@ -50,25 +50,35 @@ func (repo *Repository) readTreeToIndex(id ObjectID, indexFilename ...string) er
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadTreeToTemporaryIndex reads a treeish to a temporary index file
|
// ReadTreeToTemporaryIndex reads a treeish to a temporary index file
|
||||||
func (repo *Repository) ReadTreeToTemporaryIndex(treeish string) (filename, tmpDir string, cancel context.CancelFunc, err error) {
|
func (repo *Repository) ReadTreeToTemporaryIndex(treeish string) (tmpIndexFilename, tmpDir string, cancel context.CancelFunc, err error) {
|
||||||
tmpDir, err = os.MkdirTemp("", "index")
|
defer func() {
|
||||||
if err != nil {
|
// if error happens and there is a cancel function, do clean up
|
||||||
return filename, tmpDir, cancel, err
|
if err != nil && cancel != nil {
|
||||||
}
|
cancel()
|
||||||
|
cancel = nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
filename = filepath.Join(tmpDir, ".tmp-index")
|
removeDirFn := func(dir string) func() { // it can't use the return value "tmpDir" directly because it is empty when error occurs
|
||||||
cancel = func() {
|
return func() {
|
||||||
err := util.RemoveAll(tmpDir)
|
if err := util.RemoveAll(dir); err != nil {
|
||||||
if err != nil {
|
log.Error("failed to remove tmp index dir: %v", err)
|
||||||
log.Error("failed to remove tmp index file: %v", err)
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = repo.ReadTreeToIndex(treeish, filename)
|
|
||||||
|
tmpDir, err = os.MkdirTemp("", "index")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
defer cancel()
|
return "", "", nil, err
|
||||||
return "", "", func() {}, err
|
|
||||||
}
|
}
|
||||||
return filename, tmpDir, cancel, err
|
|
||||||
|
tmpIndexFilename = filepath.Join(tmpDir, ".tmp-index")
|
||||||
|
cancel = removeDirFn(tmpDir)
|
||||||
|
err = repo.ReadTreeToIndex(treeish, tmpIndexFilename)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", cancel, err
|
||||||
|
}
|
||||||
|
return tmpIndexFilename, tmpDir, cancel, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmptyIndex empties the index
|
// EmptyIndex empties the index
|
||||||
|
|
|
@ -16,10 +16,9 @@ import (
|
||||||
"code.gitea.io/gitea/modules/json"
|
"code.gitea.io/gitea/modules/json"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/proxy"
|
"code.gitea.io/gitea/modules/proxy"
|
||||||
|
"code.gitea.io/gitea/modules/setting"
|
||||||
)
|
)
|
||||||
|
|
||||||
const httpBatchSize = 20
|
|
||||||
|
|
||||||
// HTTPClient is used to communicate with the LFS server
|
// HTTPClient is used to communicate with the LFS server
|
||||||
// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
|
// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
|
||||||
type HTTPClient struct {
|
type HTTPClient struct {
|
||||||
|
@ -30,7 +29,7 @@ type HTTPClient struct {
|
||||||
|
|
||||||
// BatchSize returns the preferred size of batchs to process
|
// BatchSize returns the preferred size of batchs to process
|
||||||
func (c *HTTPClient) BatchSize() int {
|
func (c *HTTPClient) BatchSize() int {
|
||||||
return httpBatchSize
|
return setting.LFSClient.BatchSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHTTPClient(endpoint *url.URL, httpTransport *http.Transport) *HTTPClient {
|
func newHTTPClient(endpoint *url.URL, httpTransport *http.Transport) *HTTPClient {
|
||||||
|
|
|
@ -203,8 +203,7 @@ func (r *HTMLRenderer) renderIcon(w util.BufWriter, source []byte, node ast.Node
|
||||||
return ast.WalkContinue, nil
|
return ast.WalkContinue, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
_, err := w.WriteString(fmt.Sprintf(`<i class="icon %s"></i>`, name))
|
||||||
_, err = w.WriteString(fmt.Sprintf(`<i class="icon %s"></i>`, name))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ast.WalkStop, err
|
return ast.WalkStop, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,8 +37,8 @@ func (s *ContentStore) ShouldServeDirect() bool {
|
||||||
return setting.Packages.Storage.MinioConfig.ServeDirect
|
return setting.Packages.Storage.MinioConfig.ServeDirect
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ContentStore) GetServeDirectURL(key BlobHash256Key, filename string) (*url.URL, error) {
|
func (s *ContentStore) GetServeDirectURL(key BlobHash256Key, filename string, reqParams url.Values) (*url.URL, error) {
|
||||||
return s.store.URL(KeyToRelativePath(key), filename)
|
return s.store.URL(KeyToRelativePath(key), filename, reqParams)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Workaround to be removed in v1.20
|
// FIXME: Workaround to be removed in v1.20
|
||||||
|
|
|
@ -341,9 +341,10 @@ func pullMirrorReleaseSync(ctx context.Context, repo *repo_model.Repository, git
|
||||||
|
|
||||||
for _, tag := range updates {
|
for _, tag := range updates {
|
||||||
if _, err := db.GetEngine(ctx).Where("repo_id = ? AND lower_tag_name = ?", repo.ID, strings.ToLower(tag.Name)).
|
if _, err := db.GetEngine(ctx).Where("repo_id = ? AND lower_tag_name = ?", repo.ID, strings.ToLower(tag.Name)).
|
||||||
Cols("sha1").
|
Cols("sha1", "created_unix").
|
||||||
Update(&repo_model.Release{
|
Update(&repo_model.Release{
|
||||||
Sha1: tag.Object.String(),
|
Sha1: tag.Object.String(),
|
||||||
|
CreatedUnix: timeutil.TimeStamp(tag.Tagger.When.Unix()),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("unable to update tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
|
return fmt.Errorf("unable to update tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,22 +10,31 @@ import (
|
||||||
"code.gitea.io/gitea/modules/generate"
|
"code.gitea.io/gitea/modules/generate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LFS represents the configuration for Git LFS
|
// LFS represents the server-side configuration for Git LFS.
|
||||||
|
// Ideally these options should be in a section like "[lfs_server]",
|
||||||
|
// but they are in "[server]" section due to historical reasons.
|
||||||
|
// Could be refactored in the future while keeping backwards compatibility.
|
||||||
var LFS = struct {
|
var LFS = struct {
|
||||||
StartServer bool `ini:"LFS_START_SERVER"`
|
StartServer bool `ini:"LFS_START_SERVER"`
|
||||||
JWTSecretBytes []byte `ini:"-"`
|
JWTSecretBytes []byte `ini:"-"`
|
||||||
HTTPAuthExpiry time.Duration `ini:"LFS_HTTP_AUTH_EXPIRY"`
|
HTTPAuthExpiry time.Duration `ini:"LFS_HTTP_AUTH_EXPIRY"`
|
||||||
MaxFileSize int64 `ini:"LFS_MAX_FILE_SIZE"`
|
MaxFileSize int64 `ini:"LFS_MAX_FILE_SIZE"`
|
||||||
LocksPagingNum int `ini:"LFS_LOCKS_PAGING_NUM"`
|
LocksPagingNum int `ini:"LFS_LOCKS_PAGING_NUM"`
|
||||||
|
MaxBatchSize int `ini:"LFS_MAX_BATCH_SIZE"`
|
||||||
|
|
||||||
Storage *Storage
|
Storage *Storage
|
||||||
}{}
|
}{}
|
||||||
|
|
||||||
|
// LFSClient represents configuration for Gitea's LFS clients, for example: mirroring upstream Git LFS
|
||||||
|
var LFSClient = struct {
|
||||||
|
BatchSize int `ini:"BATCH_SIZE"`
|
||||||
|
}{}
|
||||||
|
|
||||||
func loadLFSFrom(rootCfg ConfigProvider) error {
|
func loadLFSFrom(rootCfg ConfigProvider) error {
|
||||||
|
mustMapSetting(rootCfg, "lfs_client", &LFSClient)
|
||||||
|
|
||||||
|
mustMapSetting(rootCfg, "server", &LFS)
|
||||||
sec := rootCfg.Section("server")
|
sec := rootCfg.Section("server")
|
||||||
if err := sec.MapTo(&LFS); err != nil {
|
|
||||||
return fmt.Errorf("failed to map LFS settings: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lfsSec, _ := rootCfg.GetSection("lfs")
|
lfsSec, _ := rootCfg.GetSection("lfs")
|
||||||
|
|
||||||
|
@ -52,6 +61,10 @@ func loadLFSFrom(rootCfg ConfigProvider) error {
|
||||||
LFS.LocksPagingNum = 50
|
LFS.LocksPagingNum = 50
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if LFSClient.BatchSize < 1 {
|
||||||
|
LFSClient.BatchSize = 20
|
||||||
|
}
|
||||||
|
|
||||||
LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(24 * time.Hour)
|
LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(24 * time.Hour)
|
||||||
|
|
||||||
if !LFS.StartServer || !InstallLock {
|
if !LFS.StartServer || !InstallLock {
|
||||||
|
|
|
@ -100,3 +100,19 @@ STORAGE_TYPE = minio
|
||||||
assert.EqualValues(t, "gitea", LFS.Storage.MinioConfig.Bucket)
|
assert.EqualValues(t, "gitea", LFS.Storage.MinioConfig.Bucket)
|
||||||
assert.EqualValues(t, "lfs/", LFS.Storage.MinioConfig.BasePath)
|
assert.EqualValues(t, "lfs/", LFS.Storage.MinioConfig.BasePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_LFSClientServerConfigs(t *testing.T) {
|
||||||
|
iniStr := `
|
||||||
|
[server]
|
||||||
|
LFS_MAX_BATCH_SIZE = 100
|
||||||
|
[lfs_client]
|
||||||
|
# will default to 20
|
||||||
|
BATCH_SIZE = 0
|
||||||
|
`
|
||||||
|
cfg, err := NewConfigProviderFromData(iniStr)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.NoError(t, loadLFSFrom(cfg))
|
||||||
|
assert.EqualValues(t, 100, LFS.MaxBatchSize)
|
||||||
|
assert.EqualValues(t, 20, LFSClient.BatchSize)
|
||||||
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@ func (s DiscardStorage) Delete(_ string) error {
|
||||||
return fmt.Errorf("%s", s)
|
return fmt.Errorf("%s", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s DiscardStorage) URL(_, _ string) (*url.URL, error) {
|
func (s DiscardStorage) URL(_, _ string, _ url.Values) (*url.URL, error) {
|
||||||
return nil, fmt.Errorf("%s", s)
|
return nil, fmt.Errorf("%s", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ func Test_discardStorage(t *testing.T) {
|
||||||
require.Error(t, err, string(tt))
|
require.Error(t, err, string(tt))
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
got, err := tt.URL("path", "name")
|
got, err := tt.URL("path", "name", nil)
|
||||||
assert.Nil(t, got)
|
assert.Nil(t, got)
|
||||||
require.Errorf(t, err, string(tt))
|
require.Errorf(t, err, string(tt))
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ func (l *LocalStorage) Delete(path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// URL gets the redirect URL to a file
|
// URL gets the redirect URL to a file
|
||||||
func (l *LocalStorage) URL(path, name string) (*url.URL, error) {
|
func (l *LocalStorage) URL(path, name string, reqParams url.Values) (*url.URL, error) {
|
||||||
return nil, ErrURLNotSupported
|
return nil, ErrURLNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -276,8 +276,12 @@ func (m *MinioStorage) Delete(path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// URL gets the redirect URL to a file. The presigned link is valid for 5 minutes.
|
// URL gets the redirect URL to a file. The presigned link is valid for 5 minutes.
|
||||||
func (m *MinioStorage) URL(path, name string) (*url.URL, error) {
|
func (m *MinioStorage) URL(path, name string, serveDirectReqParams url.Values) (*url.URL, error) {
|
||||||
reqParams := make(url.Values)
|
// copy serveDirectReqParams
|
||||||
|
reqParams, err := url.ParseQuery(serveDirectReqParams.Encode())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
// TODO it may be good to embed images with 'inline' like ServeData does, but we don't want to have to read the file, do we?
|
// TODO it may be good to embed images with 'inline' like ServeData does, but we don't want to have to read the file, do we?
|
||||||
reqParams.Set("response-content-disposition", "attachment; filename=\""+quoteEscaper.Replace(name)+"\"")
|
reqParams.Set("response-content-disposition", "attachment; filename=\""+quoteEscaper.Replace(name)+"\"")
|
||||||
u, err := m.client.PresignedGetObject(m.ctx, m.bucket, m.buildMinioPath(path), 5*time.Minute, reqParams)
|
u, err := m.client.PresignedGetObject(m.ctx, m.bucket, m.buildMinioPath(path), 5*time.Minute, reqParams)
|
||||||
|
|
|
@ -63,7 +63,7 @@ type ObjectStorage interface {
|
||||||
Save(path string, r io.Reader, size int64) (int64, error)
|
Save(path string, r io.Reader, size int64) (int64, error)
|
||||||
Stat(path string) (os.FileInfo, error)
|
Stat(path string) (os.FileInfo, error)
|
||||||
Delete(path string) error
|
Delete(path string) error
|
||||||
URL(path, name string) (*url.URL, error)
|
URL(path, name string, reqParams url.Values) (*url.URL, error)
|
||||||
IterateObjects(path string, iterator func(path string, obj Object) error) error
|
IterateObjects(path string, iterator func(path string, obj Object) error) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ var (
|
||||||
ActionsArtifacts ObjectStorage = UninitializedStorage
|
ActionsArtifacts ObjectStorage = UninitializedStorage
|
||||||
)
|
)
|
||||||
|
|
||||||
// Init init the stoarge
|
// Init init the storage
|
||||||
func Init() error {
|
func Init() error {
|
||||||
for _, f := range []func() error{
|
for _, f := range []func() error{
|
||||||
initAttachments,
|
initAttachments,
|
||||||
|
|
6
release-notes/5789.md
Normal file
6
release-notes/5789.md
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/362ad0ba39bdbc87202e349678e21fc2a75ff7cb) Update force-pushed tags too when syncing mirrors
|
||||||
|
chore: [commit](https://codeberg.org/forgejo/forgejo/commit/b308bcca7c950b7f0d127ee4282019c2a9923299) Improved diff view performance
|
||||||
|
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/4c5bdddf7751a35985c08ba6506f1f30103749d6) Fix `missing signature key` error when pulling Docker images with `SERVE_DIRECT` enabled
|
||||||
|
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/2c5fdb108ff9e23e8f907fb6afe59177c6bb202e) Fix the missing menu in organization project view page
|
||||||
|
feat: [commit](https://codeberg.org/forgejo/forgejo/commit/1e595979625e54d375a0eaa440b84ef5e17af160) Add new [lfs_client].BATCH_SIZE and [server].LFS_MAX_BATCH_SIZE config settings.
|
||||||
|
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/2358c0d899faec8311e46dcb0550041496bcd532) Properly clean temporary index files
|
|
@ -437,7 +437,7 @@ func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) {
|
||||||
for _, artifact := range artifacts {
|
for _, artifact := range artifacts {
|
||||||
var downloadURL string
|
var downloadURL string
|
||||||
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
||||||
u, err := ar.fs.URL(artifact.StoragePath, artifact.ArtifactName)
|
u, err := ar.fs.URL(artifact.StoragePath, artifact.ArtifactName, nil)
|
||||||
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
|
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
|
||||||
log.Error("Error getting serve direct url: %v", err)
|
log.Error("Error getting serve direct url: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -530,7 +530,7 @@ func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
|
||||||
respData := GetSignedArtifactURLResponse{}
|
respData := GetSignedArtifactURLResponse{}
|
||||||
|
|
||||||
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
||||||
u, err := storage.ActionsArtifacts.URL(artifact.StoragePath, artifact.ArtifactPath)
|
u, err := storage.ActionsArtifacts.URL(artifact.StoragePath, artifact.ArtifactPath, nil)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
respData.SignedUrl = u.String()
|
respData.SignedUrl = u.String()
|
||||||
}
|
}
|
||||||
|
|
|
@ -689,7 +689,9 @@ func DeleteManifest(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func serveBlob(ctx *context.Context, pfd *packages_model.PackageFileDescriptor) {
|
func serveBlob(ctx *context.Context, pfd *packages_model.PackageFileDescriptor) {
|
||||||
s, u, _, err := packages_service.GetPackageBlobStream(ctx, pfd.File, pfd.Blob)
|
serveDirectReqParams := make(url.Values)
|
||||||
|
serveDirectReqParams.Set("response-content-type", pfd.Properties.GetByName(container_module.PropertyMediaType))
|
||||||
|
s, u, _, err := packages_service.GetPackageBlobStream(ctx, pfd.File, pfd.Blob, serveDirectReqParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
apiError(ctx, http.StatusInternalServerError, err)
|
apiError(ctx, http.StatusInternalServerError, err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -217,7 +217,7 @@ func servePackageFile(ctx *context.Context, params parameters, serveContent bool
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
s, u, _, err := packages_service.GetPackageBlobStream(ctx, pf, pb)
|
s, u, _, err := packages_service.GetPackageBlobStream(ctx, pf, pb, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
apiError(ctx, http.StatusInternalServerError, err)
|
apiError(ctx, http.StatusInternalServerError, err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -214,7 +214,7 @@ func GetRawFileOrLFS(ctx *context.APIContext) {
|
||||||
|
|
||||||
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name())
|
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name(), nil)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
ctx.Redirect(u.String())
|
ctx.Redirect(u.String())
|
||||||
return
|
return
|
||||||
|
@ -341,7 +341,7 @@ func download(ctx *context.APIContext, archiveName string, archiver *repo_model.
|
||||||
rPath := archiver.RelativePath()
|
rPath := archiver.RelativePath()
|
||||||
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
|
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.RepoArchives.URL(rPath, downloadName)
|
u, err := storage.RepoArchives.URL(rPath, downloadName, nil)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
ctx.Redirect(u.String())
|
ctx.Redirect(u.String())
|
||||||
return
|
return
|
||||||
|
|
|
@ -202,7 +202,6 @@ func Search(ctx *context.APIContext) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
|
||||||
repos, count, err := repo_model.SearchRepository(ctx, opts)
|
repos, count, err := repo_model.SearchRepository(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.JSON(http.StatusInternalServerError, api.SearchError{
|
ctx.JSON(http.StatusInternalServerError, api.SearchError{
|
||||||
|
|
|
@ -130,7 +130,6 @@ func ListMyRepos(ctx *context.APIContext) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
|
||||||
repos, count, err := repo_model.SearchRepository(ctx, opts)
|
repos, count, err := repo_model.SearchRepository(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.Error(http.StatusInternalServerError, "SearchRepository", err)
|
ctx.Error(http.StatusInternalServerError, "SearchRepository", err)
|
||||||
|
|
|
@ -39,7 +39,7 @@ func storageHandler(storageSetting *setting.Storage, prefix string, objStore sto
|
||||||
rPath := strings.TrimPrefix(req.URL.Path, "/"+prefix+"/")
|
rPath := strings.TrimPrefix(req.URL.Path, "/"+prefix+"/")
|
||||||
rPath = util.PathJoinRelX(rPath)
|
rPath = util.PathJoinRelX(rPath)
|
||||||
|
|
||||||
u, err := objStore.URL(rPath, path.Base(rPath))
|
u, err := objStore.URL(rPath, path.Base(rPath), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) || errors.Is(err, os.ErrNotExist) {
|
if os.IsNotExist(err) || errors.Is(err, os.ErrNotExist) {
|
||||||
log.Warn("Unable to find %s %s", prefix, rPath)
|
log.Warn("Unable to find %s %s", prefix, rPath)
|
||||||
|
|
|
@ -307,7 +307,6 @@ func ViewPost(ctx *context_module.Context) {
|
||||||
if validCursor {
|
if validCursor {
|
||||||
length := step.LogLength - cursor.Cursor
|
length := step.LogLength - cursor.Cursor
|
||||||
offset := task.LogIndexes[index]
|
offset := task.LogIndexes[index]
|
||||||
var err error
|
|
||||||
logRows, err := actions.ReadLogs(ctx, task.LogInStorage, task.LogFilename, offset, length)
|
logRows, err := actions.ReadLogs(ctx, task.LogInStorage, task.LogFilename, offset, length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
ctx.Error(http.StatusInternalServerError, err.Error())
|
||||||
|
@ -689,7 +688,8 @@ func ArtifactsDownloadView(ctx *context_module.Context) {
|
||||||
if len(artifacts) == 1 && artifacts[0].ArtifactName+".zip" == artifacts[0].ArtifactPath && artifacts[0].ContentEncoding == "application/zip" {
|
if len(artifacts) == 1 && artifacts[0].ArtifactName+".zip" == artifacts[0].ArtifactPath && artifacts[0].ContentEncoding == "application/zip" {
|
||||||
art := artifacts[0]
|
art := artifacts[0]
|
||||||
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
||||||
u, err := storage.ActionsArtifacts.URL(art.StoragePath, art.ArtifactPath)
|
u, err := storage.ActionsArtifacts.URL(art.StoragePath, art.ArtifactPath, nil)
|
||||||
|
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
ctx.Redirect(u.String())
|
ctx.Redirect(u.String())
|
||||||
return
|
return
|
||||||
|
|
|
@ -94,7 +94,6 @@ func ActivityAuthors(ctx *context.Context) {
|
||||||
timeFrom = timeUntil.Add(-time.Hour * 168)
|
timeFrom = timeUntil.Add(-time.Hour * 168)
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
|
||||||
authors, err := activities_model.GetActivityStatsTopAuthors(ctx, ctx.Repo.Repository, timeFrom, 10)
|
authors, err := activities_model.GetActivityStatsTopAuthors(ctx, ctx.Repo.Repository, timeFrom, 10)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("GetActivityStatsTopAuthors", err)
|
ctx.ServerError("GetActivityStatsTopAuthors", err)
|
||||||
|
|
|
@ -134,7 +134,7 @@ func ServeAttachment(ctx *context.Context, uuid string) {
|
||||||
|
|
||||||
if setting.Attachment.Storage.MinioConfig.ServeDirect {
|
if setting.Attachment.Storage.MinioConfig.ServeDirect {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.Attachments.URL(attach.RelativePath(), attach.Name)
|
u, err := storage.Attachments.URL(attach.RelativePath(), attach.Name, nil)
|
||||||
|
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
ctx.Redirect(u.String())
|
ctx.Redirect(u.String())
|
||||||
|
|
|
@ -338,6 +338,7 @@ func Diff(ctx *context.Context) {
|
||||||
MaxLineCharacters: setting.Git.MaxGitDiffLineCharacters,
|
MaxLineCharacters: setting.Git.MaxGitDiffLineCharacters,
|
||||||
MaxFiles: maxFiles,
|
MaxFiles: maxFiles,
|
||||||
WhitespaceBehavior: gitdiff.GetWhitespaceFlag(ctx.Data["WhitespaceBehavior"].(string)),
|
WhitespaceBehavior: gitdiff.GetWhitespaceFlag(ctx.Data["WhitespaceBehavior"].(string)),
|
||||||
|
FileOnly: fileOnly,
|
||||||
}, files...)
|
}, files...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.NotFound("GetDiff", err)
|
ctx.NotFound("GetDiff", err)
|
||||||
|
|
|
@ -611,6 +611,8 @@ func PrepareCompareDiff(
|
||||||
maxLines, maxFiles = -1, -1
|
maxLines, maxFiles = -1, -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fileOnly := ctx.FormBool("file-only")
|
||||||
|
|
||||||
diff, err := gitdiff.GetDiff(ctx, ci.HeadGitRepo,
|
diff, err := gitdiff.GetDiff(ctx, ci.HeadGitRepo,
|
||||||
&gitdiff.DiffOptions{
|
&gitdiff.DiffOptions{
|
||||||
BeforeCommitID: beforeCommitID,
|
BeforeCommitID: beforeCommitID,
|
||||||
|
@ -621,6 +623,7 @@ func PrepareCompareDiff(
|
||||||
MaxFiles: maxFiles,
|
MaxFiles: maxFiles,
|
||||||
WhitespaceBehavior: whitespaceBehavior,
|
WhitespaceBehavior: whitespaceBehavior,
|
||||||
DirectComparison: ci.DirectComparison,
|
DirectComparison: ci.DirectComparison,
|
||||||
|
FileOnly: fileOnly,
|
||||||
}, ctx.FormStrings("files")...)
|
}, ctx.FormStrings("files")...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("GetDiffRangeWithWhitespaceBehavior", err)
|
ctx.ServerError("GetDiffRangeWithWhitespaceBehavior", err)
|
||||||
|
|
|
@ -54,8 +54,8 @@ func ServeBlobOrLFS(ctx *context.Context, blob *git.Blob, lastModified *time.Tim
|
||||||
}
|
}
|
||||||
|
|
||||||
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage, blob storage), redirect to this directly.
|
||||||
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name())
|
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name(), nil)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
ctx.Redirect(u.String())
|
ctx.Redirect(u.String())
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -614,12 +614,12 @@ func PrepareViewPullInfo(ctx *context.Context, issue *issues_model.Issue) *git.C
|
||||||
var headBranchSha string
|
var headBranchSha string
|
||||||
// HeadRepo may be missing
|
// HeadRepo may be missing
|
||||||
if pull.HeadRepo != nil {
|
if pull.HeadRepo != nil {
|
||||||
headGitRepo, err := gitrepo.OpenRepository(ctx, pull.HeadRepo)
|
headGitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, pull.HeadRepo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("OpenRepository", err)
|
ctx.ServerError("RepositoryFromContextOrOpen", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
defer headGitRepo.Close()
|
defer closer.Close()
|
||||||
|
|
||||||
if pull.Flow == issues_model.PullRequestFlowGithub {
|
if pull.Flow == issues_model.PullRequestFlowGithub {
|
||||||
headBranchExist = headGitRepo.IsBranchExist(pull.HeadBranch)
|
headBranchExist = headGitRepo.IsBranchExist(pull.HeadBranch)
|
||||||
|
@ -966,6 +966,7 @@ func viewPullFiles(ctx *context.Context, specifiedStartCommit, specifiedEndCommi
|
||||||
MaxLineCharacters: setting.Git.MaxGitDiffLineCharacters,
|
MaxLineCharacters: setting.Git.MaxGitDiffLineCharacters,
|
||||||
MaxFiles: maxFiles,
|
MaxFiles: maxFiles,
|
||||||
WhitespaceBehavior: gitdiff.GetWhitespaceFlag(ctx.Data["WhitespaceBehavior"].(string)),
|
WhitespaceBehavior: gitdiff.GetWhitespaceFlag(ctx.Data["WhitespaceBehavior"].(string)),
|
||||||
|
FileOnly: fileOnly,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !willShowSpecifiedCommit {
|
if !willShowSpecifiedCommit {
|
||||||
|
|
|
@ -505,7 +505,7 @@ func download(ctx *context.Context, archiveName string, archiver *repo_model.Rep
|
||||||
rPath := archiver.RelativePath()
|
rPath := archiver.RelativePath()
|
||||||
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
|
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.RepoArchives.URL(rPath, downloadName)
|
u, err := storage.RepoArchives.URL(rPath, downloadName, nil)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
if archiver.ReleaseID != 0 {
|
if archiver.ReleaseID != 0 {
|
||||||
err = repo_model.CountArchiveDownload(ctx, ctx.Repo.Repository.ID, archiver.ReleaseID, archiver.Type)
|
err = repo_model.CountArchiveDownload(ctx, ctx.Repo.Repository.ID, archiver.ReleaseID, archiver.Type)
|
||||||
|
|
|
@ -147,7 +147,6 @@ func FindReadmeFileInEntries(ctx *context.Context, entries []*git.TreeEntry, try
|
||||||
// this should be impossible; if subTreeEntry exists so should this.
|
// this should be impossible; if subTreeEntry exists so should this.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var err error
|
|
||||||
childEntries, err := subTree.ListEntries()
|
childEntries, err := subTree.ListEntries()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
|
|
|
@ -379,18 +379,11 @@ func (diffFile *DiffFile) GetType() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTailSection creates a fake DiffLineSection if the last section is not the end of the file
|
// GetTailSection creates a fake DiffLineSection if the last section is not the end of the file
|
||||||
func (diffFile *DiffFile) GetTailSection(gitRepo *git.Repository, leftCommitID, rightCommitID string) *DiffSection {
|
func (diffFile *DiffFile) GetTailSection(gitRepo *git.Repository, leftCommit, rightCommit *git.Commit) *DiffSection {
|
||||||
if len(diffFile.Sections) == 0 || diffFile.Type != DiffFileChange || diffFile.IsBin || diffFile.IsLFSFile {
|
if len(diffFile.Sections) == 0 || diffFile.Type != DiffFileChange || diffFile.IsBin || diffFile.IsLFSFile {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
leftCommit, err := gitRepo.GetCommit(leftCommitID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
rightCommit, err := gitRepo.GetCommit(rightCommitID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
lastSection := diffFile.Sections[len(diffFile.Sections)-1]
|
lastSection := diffFile.Sections[len(diffFile.Sections)-1]
|
||||||
lastLine := lastSection.Lines[len(lastSection.Lines)-1]
|
lastLine := lastSection.Lines[len(lastSection.Lines)-1]
|
||||||
leftLineCount := getCommitFileLineCount(leftCommit, diffFile.Name)
|
leftLineCount := getCommitFileLineCount(leftCommit, diffFile.Name)
|
||||||
|
@ -532,11 +525,6 @@ parsingLoop:
|
||||||
lastFile := createDiffFile(diff, line)
|
lastFile := createDiffFile(diff, line)
|
||||||
diff.End = lastFile.Name
|
diff.End = lastFile.Name
|
||||||
diff.IsIncomplete = true
|
diff.IsIncomplete = true
|
||||||
_, err := io.Copy(io.Discard, reader)
|
|
||||||
if err != nil {
|
|
||||||
// By the definition of io.Copy this never returns io.EOF
|
|
||||||
return diff, fmt.Errorf("error during io.Copy: %w", err)
|
|
||||||
}
|
|
||||||
break parsingLoop
|
break parsingLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1097,6 +1085,7 @@ type DiffOptions struct {
|
||||||
MaxFiles int
|
MaxFiles int
|
||||||
WhitespaceBehavior git.TrustedCmdArgs
|
WhitespaceBehavior git.TrustedCmdArgs
|
||||||
DirectComparison bool
|
DirectComparison bool
|
||||||
|
FileOnly bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDiff builds a Diff between two commits of a repository.
|
// GetDiff builds a Diff between two commits of a repository.
|
||||||
|
@ -1105,12 +1094,16 @@ type DiffOptions struct {
|
||||||
func GetDiff(ctx context.Context, gitRepo *git.Repository, opts *DiffOptions, files ...string) (*Diff, error) {
|
func GetDiff(ctx context.Context, gitRepo *git.Repository, opts *DiffOptions, files ...string) (*Diff, error) {
|
||||||
repoPath := gitRepo.Path
|
repoPath := gitRepo.Path
|
||||||
|
|
||||||
|
var beforeCommit *git.Commit
|
||||||
commit, err := gitRepo.GetCommit(opts.AfterCommitID)
|
commit, err := gitRepo.GetCommit(opts.AfterCommitID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdDiff := git.NewCommand(gitRepo.Ctx)
|
cmdCtx, cmdCancel := context.WithCancel(ctx)
|
||||||
|
defer cmdCancel()
|
||||||
|
|
||||||
|
cmdDiff := git.NewCommand(cmdCtx)
|
||||||
objectFormat, err := gitRepo.GetObjectFormat()
|
objectFormat, err := gitRepo.GetObjectFormat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1132,6 +1125,12 @@ func GetDiff(ctx context.Context, gitRepo *git.Repository, opts *DiffOptions, fi
|
||||||
AddArguments(opts.WhitespaceBehavior...).
|
AddArguments(opts.WhitespaceBehavior...).
|
||||||
AddDynamicArguments(actualBeforeCommitID, opts.AfterCommitID)
|
AddDynamicArguments(actualBeforeCommitID, opts.AfterCommitID)
|
||||||
opts.BeforeCommitID = actualBeforeCommitID
|
opts.BeforeCommitID = actualBeforeCommitID
|
||||||
|
|
||||||
|
var err error
|
||||||
|
beforeCommit, err = gitRepo.GetCommit(opts.BeforeCommitID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// In git 2.31, git diff learned --skip-to which we can use to shortcut skip to file
|
// In git 2.31, git diff learned --skip-to which we can use to shortcut skip to file
|
||||||
|
@ -1166,7 +1165,9 @@ func GetDiff(ctx context.Context, gitRepo *git.Repository, opts *DiffOptions, fi
|
||||||
_ = writer.Close()
|
_ = writer.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
diff, err := ParsePatch(ctx, opts.MaxLines, opts.MaxLineCharacters, opts.MaxFiles, reader, parsePatchSkipToFile)
|
diff, err := ParsePatch(cmdCtx, opts.MaxLines, opts.MaxLineCharacters, opts.MaxFiles, reader, parsePatchSkipToFile)
|
||||||
|
// Ensure the git process is killed if it didn't exit already
|
||||||
|
cmdCancel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to ParsePatch: %w", err)
|
return nil, fmt.Errorf("unable to ParsePatch: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1207,37 +1208,28 @@ func GetDiff(ctx context.Context, gitRepo *git.Repository, opts *DiffOptions, fi
|
||||||
diffFile.IsGenerated = analyze.IsGenerated(diffFile.Name)
|
diffFile.IsGenerated = analyze.IsGenerated(diffFile.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
tailSection := diffFile.GetTailSection(gitRepo, opts.BeforeCommitID, opts.AfterCommitID)
|
tailSection := diffFile.GetTailSection(gitRepo, beforeCommit, commit)
|
||||||
if tailSection != nil {
|
if tailSection != nil {
|
||||||
diffFile.Sections = append(diffFile.Sections, tailSection)
|
diffFile.Sections = append(diffFile.Sections, tailSection)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
separator := "..."
|
if opts.FileOnly {
|
||||||
if opts.DirectComparison {
|
return diff, nil
|
||||||
separator = ".."
|
|
||||||
}
|
}
|
||||||
|
|
||||||
diffPaths := []string{opts.BeforeCommitID + separator + opts.AfterCommitID}
|
stats, err := GetPullDiffStats(gitRepo, opts)
|
||||||
if len(opts.BeforeCommitID) == 0 || opts.BeforeCommitID == objectFormat.EmptyObjectID().String() {
|
|
||||||
diffPaths = []string{objectFormat.EmptyTree().String(), opts.AfterCommitID}
|
|
||||||
}
|
|
||||||
diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, nil, diffPaths...)
|
|
||||||
if err != nil && strings.Contains(err.Error(), "no merge base") {
|
|
||||||
// git >= 2.28 now returns an error if base and head have become unrelated.
|
|
||||||
// previously it would return the results of git diff --shortstat base head so let's try that...
|
|
||||||
diffPaths = []string{opts.BeforeCommitID, opts.AfterCommitID}
|
|
||||||
diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, nil, diffPaths...)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
diff.NumFiles, diff.TotalAddition, diff.TotalDeletion = stats.NumFiles, stats.TotalAddition, stats.TotalDeletion
|
||||||
|
|
||||||
return diff, nil
|
return diff, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type PullDiffStats struct {
|
type PullDiffStats struct {
|
||||||
TotalAddition, TotalDeletion int
|
NumFiles, TotalAddition, TotalDeletion int
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPullDiffStats
|
// GetPullDiffStats
|
||||||
|
@ -1261,12 +1253,12 @@ func GetPullDiffStats(gitRepo *git.Repository, opts *DiffOptions) (*PullDiffStat
|
||||||
diffPaths = []string{objectFormat.EmptyTree().String(), opts.AfterCommitID}
|
diffPaths = []string{objectFormat.EmptyTree().String(), opts.AfterCommitID}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, nil, diffPaths...)
|
diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, nil, diffPaths...)
|
||||||
if err != nil && strings.Contains(err.Error(), "no merge base") {
|
if err != nil && strings.Contains(err.Error(), "no merge base") {
|
||||||
// git >= 2.28 now returns an error if base and head have become unrelated.
|
// git >= 2.28 now returns an error if base and head have become unrelated.
|
||||||
// previously it would return the results of git diff --shortstat base head so let's try that...
|
// previously it would return the results of git diff --shortstat base head so let's try that...
|
||||||
diffPaths = []string{opts.BeforeCommitID, opts.AfterCommitID}
|
diffPaths = []string{opts.BeforeCommitID, opts.AfterCommitID}
|
||||||
_, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, nil, diffPaths...)
|
diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, nil, diffPaths...)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -56,8 +56,6 @@ func GetTemplateConfig(gitRepo *git.Repository, path string, commit *git.Commit)
|
||||||
return GetDefaultTemplateConfig(), nil
|
return GetDefaultTemplateConfig(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
|
||||||
|
|
||||||
treeEntry, err := commit.GetTreeEntryByPath(path)
|
treeEntry, err := commit.GetTreeEntryByPath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return GetDefaultTemplateConfig(), err
|
return GetDefaultTemplateConfig(), err
|
||||||
|
|
|
@ -192,6 +192,11 @@ func BatchHandler(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if setting.LFS.MaxBatchSize != 0 && len(br.Objects) > setting.LFS.MaxBatchSize {
|
||||||
|
writeStatus(ctx, http.StatusRequestEntityTooLarge)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
contentStore := lfs_module.NewContentStore()
|
contentStore := lfs_module.NewContentStore()
|
||||||
|
|
||||||
var responseObjects []*lfs_module.ObjectResponse
|
var responseObjects []*lfs_module.ObjectResponse
|
||||||
|
@ -480,7 +485,7 @@ func buildObjectResponse(rc *requestContext, pointer lfs_module.Pointer, downloa
|
||||||
var link *lfs_module.Link
|
var link *lfs_module.Link
|
||||||
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.LFS.URL(pointer.RelativePath(), pointer.Oid)
|
u, err := storage.LFS.URL(pointer.RelativePath(), pointer.Oid, nil)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
// Presigned url does not need the Authorization header
|
// Presigned url does not need the Authorization header
|
||||||
// https://github.com/go-gitea/gitea/issues/21525
|
// https://github.com/go-gitea/gitea/issues/21525
|
||||||
|
|
|
@ -602,12 +602,12 @@ func GetPackageFileStream(ctx context.Context, pf *packages_model.PackageFile) (
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return GetPackageBlobStream(ctx, pf, pb)
|
return GetPackageBlobStream(ctx, pf, pb, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPackageBlobStream returns the content of the specific package blob
|
// GetPackageBlobStream returns the content of the specific package blob
|
||||||
// If the storage supports direct serving and it's enabled, only the direct serving url is returned.
|
// If the storage supports direct serving and it's enabled, only the direct serving url is returned.
|
||||||
func GetPackageBlobStream(ctx context.Context, pf *packages_model.PackageFile, pb *packages_model.PackageBlob) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
|
func GetPackageBlobStream(ctx context.Context, pf *packages_model.PackageFile, pb *packages_model.PackageBlob, serveDirectReqParams url.Values) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
|
||||||
key := packages_module.BlobHash256Key(pb.HashSHA256)
|
key := packages_module.BlobHash256Key(pb.HashSHA256)
|
||||||
|
|
||||||
cs := packages_module.NewContentStore()
|
cs := packages_module.NewContentStore()
|
||||||
|
@ -617,7 +617,7 @@ func GetPackageBlobStream(ctx context.Context, pf *packages_model.PackageFile, p
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if cs.ShouldServeDirect() {
|
if cs.ShouldServeDirect() {
|
||||||
u, err = cs.GetServeDirectURL(key, pf.Name)
|
u, err = cs.GetServeDirectURL(key, pf.Name, serveDirectReqParams)
|
||||||
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
|
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
|
||||||
log.Error("Error getting serve direct url: %v", err)
|
log.Error("Error getting serve direct url: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
{{template "base/head" .}}
|
{{template "base/head" .}}
|
||||||
<div role="main" aria-label="{{.Title}}" class="page-content repository projects view-project">
|
<div role="main" aria-label="{{.Title}}" class="page-content organization repository projects view-project">
|
||||||
{{template "shared/user/org_profile_avatar" .}}
|
{{if .ContextUser.IsOrganization}}
|
||||||
<div class="ui container tw-mb-4">
|
{{template "org/header" .}}
|
||||||
{{template "user/overview/header" .}}
|
{{else}}
|
||||||
</div>
|
{{template "shared/user/org_profile_avatar" .}}
|
||||||
|
<div class="ui container tw-mb-4">
|
||||||
|
{{template "user/overview/header" .}}
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
<div class="ui container fluid padded">
|
<div class="ui container fluid padded">
|
||||||
{{template "projects/view" .}}
|
{{template "projects/view" .}}
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -117,7 +117,7 @@
|
||||||
{{$sameBase := ne $.BaseName $.HeadUserName}}
|
{{$sameBase := ne $.BaseName $.HeadUserName}}
|
||||||
{{$differentBranch := ne . $.HeadBranch}}
|
{{$differentBranch := ne . $.HeadBranch}}
|
||||||
{{if or $sameBase $differentBranch}}
|
{{if or $sameBase $differentBranch}}
|
||||||
<div class="item {{if eq $.BaseBranch .}}selected{{end}}" data-branch="{{.}}">{{$.BaseName}}{{if $.HeadRepo}}/{{$.HeadRepo}}{{end}}:{{.}}</div>
|
<div class="item {{if eq $.BaseBranch .}}selected{{end}}" data-branch="{{.}}">{{$.BaseName}}:{{.}}</div>
|
||||||
{{end}}
|
{{end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -38,21 +38,21 @@ func TestActionsArtifactUploadSingleFile(t *testing.T) {
|
||||||
|
|
||||||
// get upload url
|
// get upload url
|
||||||
idx := strings.Index(uploadResp.FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
idx := strings.Index(uploadResp.FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
||||||
url := uploadResp.FileContainerResourceURL[idx:] + "?itemPath=artifact/abc.txt"
|
url := uploadResp.FileContainerResourceURL[idx:] + "?itemPath=artifact/abc-2.txt"
|
||||||
|
|
||||||
// upload artifact chunk
|
// upload artifact chunk
|
||||||
body := strings.Repeat("A", 1024)
|
body := strings.Repeat("C", 1024)
|
||||||
req = NewRequestWithBody(t, "PUT", url, strings.NewReader(body)).
|
req = NewRequestWithBody(t, "PUT", url, strings.NewReader(body)).
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a").
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a").
|
||||||
SetHeader("Content-Range", "bytes 0-1023/1024").
|
SetHeader("Content-Range", "bytes 0-1023/1024").
|
||||||
SetHeader("x-tfs-filelength", "1024").
|
SetHeader("x-tfs-filelength", "1024").
|
||||||
SetHeader("x-actions-results-md5", "1HsSe8LeLWh93ILaw1TEFQ==") // base64(md5(body))
|
SetHeader("x-actions-results-md5", "XVlf820rMInUi64wmMi6EA==") // base64(md5(body))
|
||||||
MakeRequest(t, req, http.StatusOK)
|
MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
t.Logf("Create artifact confirm")
|
t.Logf("Create artifact confirm")
|
||||||
|
|
||||||
// confirm artifact upload
|
// confirm artifact upload
|
||||||
req = NewRequest(t, "PATCH", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts?artifactName=artifact").
|
req = NewRequest(t, "PATCH", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts?artifactName=artifact-single").
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
MakeRequest(t, req, http.StatusOK)
|
MakeRequest(t, req, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
@ -115,29 +115,40 @@ func TestActionsArtifactDownload(t *testing.T) {
|
||||||
resp := MakeRequest(t, req, http.StatusOK)
|
resp := MakeRequest(t, req, http.StatusOK)
|
||||||
var listResp listArtifactsResponse
|
var listResp listArtifactsResponse
|
||||||
DecodeJSON(t, resp, &listResp)
|
DecodeJSON(t, resp, &listResp)
|
||||||
assert.Equal(t, int64(1), listResp.Count)
|
assert.Equal(t, int64(2), listResp.Count)
|
||||||
assert.Equal(t, "artifact", listResp.Value[0].Name)
|
|
||||||
assert.Contains(t, listResp.Value[0].FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts")
|
|
||||||
|
|
||||||
idx := strings.Index(listResp.Value[0].FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
// Return list might be in any order. Get one file.
|
||||||
url := listResp.Value[0].FileContainerResourceURL[idx+1:] + "?itemPath=artifact"
|
var artifactIdx int
|
||||||
|
for i, artifact := range listResp.Value {
|
||||||
|
if artifact.Name == "artifact-download" {
|
||||||
|
artifactIdx = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.NotNil(t, artifactIdx)
|
||||||
|
assert.Equal(t, "artifact-download", listResp.Value[artifactIdx].Name)
|
||||||
|
assert.Contains(t, listResp.Value[artifactIdx].FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts")
|
||||||
|
|
||||||
|
idx := strings.Index(listResp.Value[artifactIdx].FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
||||||
|
url := listResp.Value[artifactIdx].FileContainerResourceURL[idx+1:] + "?itemPath=artifact-download"
|
||||||
req = NewRequest(t, "GET", url).
|
req = NewRequest(t, "GET", url).
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
resp = MakeRequest(t, req, http.StatusOK)
|
||||||
var downloadResp downloadArtifactResponse
|
var downloadResp downloadArtifactResponse
|
||||||
DecodeJSON(t, resp, &downloadResp)
|
DecodeJSON(t, resp, &downloadResp)
|
||||||
assert.Len(t, downloadResp.Value, 1)
|
assert.Len(t, downloadResp.Value, 1)
|
||||||
assert.Equal(t, "artifact/abc.txt", downloadResp.Value[0].Path)
|
assert.Equal(t, "artifact-download/abc.txt", downloadResp.Value[artifactIdx].Path)
|
||||||
assert.Equal(t, "file", downloadResp.Value[0].ItemType)
|
assert.Equal(t, "file", downloadResp.Value[artifactIdx].ItemType)
|
||||||
assert.Contains(t, downloadResp.Value[0].ContentLocation, "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts")
|
assert.Contains(t, downloadResp.Value[artifactIdx].ContentLocation, "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts")
|
||||||
|
|
||||||
idx = strings.Index(downloadResp.Value[0].ContentLocation, "/api/actions_pipeline/_apis/pipelines/")
|
idx = strings.Index(downloadResp.Value[artifactIdx].ContentLocation, "/api/actions_pipeline/_apis/pipelines/")
|
||||||
url = downloadResp.Value[0].ContentLocation[idx:]
|
url = downloadResp.Value[artifactIdx].ContentLocation[idx:]
|
||||||
req = NewRequest(t, "GET", url).
|
req = NewRequest(t, "GET", url).
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
resp = MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
body := strings.Repeat("A", 1024)
|
body := strings.Repeat("A", 1024)
|
||||||
assert.Equal(t, resp.Body.String(), body)
|
assert.Equal(t, body, resp.Body.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestActionsArtifactUploadMultipleFile(t *testing.T) {
|
func TestActionsArtifactUploadMultipleFile(t *testing.T) {
|
||||||
|
@ -163,14 +174,14 @@ func TestActionsArtifactUploadMultipleFile(t *testing.T) {
|
||||||
|
|
||||||
files := []uploadingFile{
|
files := []uploadingFile{
|
||||||
{
|
{
|
||||||
Path: "abc.txt",
|
Path: "abc-3.txt",
|
||||||
Content: strings.Repeat("A", 1024),
|
Content: strings.Repeat("D", 1024),
|
||||||
MD5: "1HsSe8LeLWh93ILaw1TEFQ==",
|
MD5: "9nqj7E8HZmfQtPifCJ5Zww==",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: "xyz/def.txt",
|
Path: "xyz/def-2.txt",
|
||||||
Content: strings.Repeat("B", 1024),
|
Content: strings.Repeat("E", 1024),
|
||||||
MD5: "6fgADK/7zjadf+6cB9Q1CQ==",
|
MD5: "/s1kKvxeHlUX85vaTaVxuA==",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,7 +210,7 @@ func TestActionsArtifactUploadMultipleFile(t *testing.T) {
|
||||||
func TestActionsArtifactDownloadMultiFiles(t *testing.T) {
|
func TestActionsArtifactDownloadMultiFiles(t *testing.T) {
|
||||||
defer tests.PrepareTestEnv(t)()
|
defer tests.PrepareTestEnv(t)()
|
||||||
|
|
||||||
const testArtifactName = "multi-files"
|
const testArtifactName = "multi-file-download"
|
||||||
|
|
||||||
req := NewRequest(t, "GET", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts").
|
req := NewRequest(t, "GET", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts").
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
|
@ -226,7 +237,7 @@ func TestActionsArtifactDownloadMultiFiles(t *testing.T) {
|
||||||
DecodeJSON(t, resp, &downloadResp)
|
DecodeJSON(t, resp, &downloadResp)
|
||||||
assert.Len(t, downloadResp.Value, 2)
|
assert.Len(t, downloadResp.Value, 2)
|
||||||
|
|
||||||
downloads := [][]string{{"multi-files/abc.txt", "A"}, {"multi-files/xyz/def.txt", "B"}}
|
downloads := [][]string{{"multi-file-download/abc.txt", "B"}, {"multi-file-download/xyz/def.txt", "C"}}
|
||||||
for _, v := range downloadResp.Value {
|
for _, v := range downloadResp.Value {
|
||||||
var bodyChar string
|
var bodyChar string
|
||||||
var path string
|
var path string
|
||||||
|
@ -247,8 +258,7 @@ func TestActionsArtifactDownloadMultiFiles(t *testing.T) {
|
||||||
req = NewRequest(t, "GET", url).
|
req = NewRequest(t, "GET", url).
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
resp = MakeRequest(t, req, http.StatusOK)
|
||||||
body := strings.Repeat(bodyChar, 1024)
|
assert.Equal(t, strings.Repeat(bodyChar, 1024), resp.Body.String())
|
||||||
assert.Equal(t, resp.Body.String(), body)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -300,7 +310,7 @@ func TestActionsArtifactOverwrite(t *testing.T) {
|
||||||
DecodeJSON(t, resp, &listResp)
|
DecodeJSON(t, resp, &listResp)
|
||||||
|
|
||||||
idx := strings.Index(listResp.Value[0].FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
idx := strings.Index(listResp.Value[0].FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
||||||
url := listResp.Value[0].FileContainerResourceURL[idx+1:] + "?itemPath=artifact"
|
url := listResp.Value[0].FileContainerResourceURL[idx+1:] + "?itemPath=artifact-download"
|
||||||
req = NewRequest(t, "GET", url).
|
req = NewRequest(t, "GET", url).
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
resp = MakeRequest(t, req, http.StatusOK)
|
||||||
|
@ -320,14 +330,14 @@ func TestActionsArtifactOverwrite(t *testing.T) {
|
||||||
// upload same artifact, it uses 4096 B
|
// upload same artifact, it uses 4096 B
|
||||||
req := NewRequestWithJSON(t, "POST", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts", getUploadArtifactRequest{
|
req := NewRequestWithJSON(t, "POST", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts", getUploadArtifactRequest{
|
||||||
Type: "actions_storage",
|
Type: "actions_storage",
|
||||||
Name: "artifact",
|
Name: "artifact-download",
|
||||||
}).AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
}).AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
resp := MakeRequest(t, req, http.StatusOK)
|
resp := MakeRequest(t, req, http.StatusOK)
|
||||||
var uploadResp uploadArtifactResponse
|
var uploadResp uploadArtifactResponse
|
||||||
DecodeJSON(t, resp, &uploadResp)
|
DecodeJSON(t, resp, &uploadResp)
|
||||||
|
|
||||||
idx := strings.Index(uploadResp.FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
idx := strings.Index(uploadResp.FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
||||||
url := uploadResp.FileContainerResourceURL[idx:] + "?itemPath=artifact/abc.txt"
|
url := uploadResp.FileContainerResourceURL[idx:] + "?itemPath=artifact-download/abc.txt"
|
||||||
body := strings.Repeat("B", 4096)
|
body := strings.Repeat("B", 4096)
|
||||||
req = NewRequestWithBody(t, "PUT", url, strings.NewReader(body)).
|
req = NewRequestWithBody(t, "PUT", url, strings.NewReader(body)).
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a").
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a").
|
||||||
|
@ -337,7 +347,7 @@ func TestActionsArtifactOverwrite(t *testing.T) {
|
||||||
MakeRequest(t, req, http.StatusOK)
|
MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
// confirm artifact upload
|
// confirm artifact upload
|
||||||
req = NewRequest(t, "PATCH", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts?artifactName=artifact").
|
req = NewRequest(t, "PATCH", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts?artifactName=artifact-download").
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
MakeRequest(t, req, http.StatusOK)
|
MakeRequest(t, req, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
@ -352,15 +362,15 @@ func TestActionsArtifactOverwrite(t *testing.T) {
|
||||||
|
|
||||||
var uploadedItem listArtifactsResponseItem
|
var uploadedItem listArtifactsResponseItem
|
||||||
for _, item := range listResp.Value {
|
for _, item := range listResp.Value {
|
||||||
if item.Name == "artifact" {
|
if item.Name == "artifact-download" {
|
||||||
uploadedItem = item
|
uploadedItem = item
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert.Equal(t, "artifact", uploadedItem.Name)
|
assert.Equal(t, "artifact-download", uploadedItem.Name)
|
||||||
|
|
||||||
idx := strings.Index(uploadedItem.FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
idx := strings.Index(uploadedItem.FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/")
|
||||||
url := uploadedItem.FileContainerResourceURL[idx+1:] + "?itemPath=artifact"
|
url := uploadedItem.FileContainerResourceURL[idx+1:] + "?itemPath=artifact-download"
|
||||||
req = NewRequest(t, "GET", url).
|
req = NewRequest(t, "GET", url).
|
||||||
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
AddTokenAuth("8061e833a55f6fc0157c98b883e91fcfeeb1a71a")
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
resp = MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
|
@ -313,7 +313,7 @@ func TestActionsArtifactV4DownloadSingle(t *testing.T) {
|
||||||
|
|
||||||
// acquire artifact upload url
|
// acquire artifact upload url
|
||||||
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/ListArtifacts", toProtoJSON(&actions.ListArtifactsRequest{
|
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/ListArtifacts", toProtoJSON(&actions.ListArtifactsRequest{
|
||||||
NameFilter: wrapperspb.String("artifact"),
|
NameFilter: wrapperspb.String("artifact-v4-download"),
|
||||||
WorkflowRunBackendId: "792",
|
WorkflowRunBackendId: "792",
|
||||||
WorkflowJobRunBackendId: "193",
|
WorkflowJobRunBackendId: "193",
|
||||||
})).AddTokenAuth(token)
|
})).AddTokenAuth(token)
|
||||||
|
@ -324,7 +324,7 @@ func TestActionsArtifactV4DownloadSingle(t *testing.T) {
|
||||||
|
|
||||||
// confirm artifact upload
|
// confirm artifact upload
|
||||||
req = NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL", toProtoJSON(&actions.GetSignedArtifactURLRequest{
|
req = NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL", toProtoJSON(&actions.GetSignedArtifactURLRequest{
|
||||||
Name: "artifact",
|
Name: "artifact-v4-download",
|
||||||
WorkflowRunBackendId: "792",
|
WorkflowRunBackendId: "792",
|
||||||
WorkflowJobRunBackendId: "193",
|
WorkflowJobRunBackendId: "193",
|
||||||
})).
|
})).
|
||||||
|
@ -336,20 +336,20 @@ func TestActionsArtifactV4DownloadSingle(t *testing.T) {
|
||||||
|
|
||||||
req = NewRequest(t, "GET", finalizeResp.SignedUrl)
|
req = NewRequest(t, "GET", finalizeResp.SignedUrl)
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
resp = MakeRequest(t, req, http.StatusOK)
|
||||||
body := strings.Repeat("A", 1024)
|
body := strings.Repeat("D", 1024)
|
||||||
assert.Equal(t, "bytes", resp.Header().Get("accept-ranges"))
|
assert.Equal(t, "bytes", resp.Header().Get("accept-ranges"))
|
||||||
assert.Equal(t, body, resp.Body.String())
|
assert.Equal(t, body, resp.Body.String())
|
||||||
|
|
||||||
// Download artifact via user-facing URL
|
// Download artifact via user-facing URL
|
||||||
req = NewRequest(t, "GET", "/user5/repo4/actions/runs/188/artifacts/artifact")
|
req = NewRequest(t, "GET", "/user5/repo4/actions/runs/188/artifacts/artifact-v4-download")
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
resp = MakeRequest(t, req, http.StatusOK)
|
||||||
assert.Equal(t, "bytes", resp.Header().Get("accept-ranges"))
|
assert.Equal(t, "bytes", resp.Header().Get("accept-ranges"))
|
||||||
assert.Equal(t, body, resp.Body.String())
|
assert.Equal(t, body, resp.Body.String())
|
||||||
|
|
||||||
// Partial artifact download
|
// Partial artifact download
|
||||||
req = NewRequest(t, "GET", "/user5/repo4/actions/runs/188/artifacts/artifact").SetHeader("range", "bytes=0-99")
|
req = NewRequest(t, "GET", "/user5/repo4/actions/runs/188/artifacts/artifact-v4-download").SetHeader("range", "bytes=0-99")
|
||||||
resp = MakeRequest(t, req, http.StatusPartialContent)
|
resp = MakeRequest(t, req, http.StatusPartialContent)
|
||||||
body = strings.Repeat("A", 100)
|
body = strings.Repeat("D", 100)
|
||||||
assert.Equal(t, "bytes 0-99/1024", resp.Header().Get("content-range"))
|
assert.Equal(t, "bytes 0-99/1024", resp.Header().Get("content-range"))
|
||||||
assert.Equal(t, body, resp.Body.String())
|
assert.Equal(t, body, resp.Body.String())
|
||||||
}
|
}
|
||||||
|
@ -357,13 +357,13 @@ func TestActionsArtifactV4DownloadSingle(t *testing.T) {
|
||||||
func TestActionsArtifactV4DownloadRange(t *testing.T) {
|
func TestActionsArtifactV4DownloadRange(t *testing.T) {
|
||||||
defer tests.PrepareTestEnv(t)()
|
defer tests.PrepareTestEnv(t)()
|
||||||
|
|
||||||
bstr := strings.Repeat("B", 100)
|
bstr := strings.Repeat("D", 100)
|
||||||
body := strings.Repeat("A", 100) + bstr
|
body := strings.Repeat("A", 100) + bstr
|
||||||
token := uploadArtifact(t, body)
|
token := uploadArtifact(t, body)
|
||||||
|
|
||||||
// Download (Actions API)
|
// Download (Actions API)
|
||||||
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL", toProtoJSON(&actions.GetSignedArtifactURLRequest{
|
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL", toProtoJSON(&actions.GetSignedArtifactURLRequest{
|
||||||
Name: "artifact",
|
Name: "artifact-v4-download",
|
||||||
WorkflowRunBackendId: "792",
|
WorkflowRunBackendId: "792",
|
||||||
WorkflowJobRunBackendId: "193",
|
WorkflowJobRunBackendId: "193",
|
||||||
})).
|
})).
|
||||||
|
@ -375,13 +375,13 @@ func TestActionsArtifactV4DownloadRange(t *testing.T) {
|
||||||
|
|
||||||
req = NewRequest(t, "GET", finalizeResp.SignedUrl).SetHeader("range", "bytes=100-199")
|
req = NewRequest(t, "GET", finalizeResp.SignedUrl).SetHeader("range", "bytes=100-199")
|
||||||
resp = MakeRequest(t, req, http.StatusPartialContent)
|
resp = MakeRequest(t, req, http.StatusPartialContent)
|
||||||
assert.Equal(t, "bytes 100-199/200", resp.Header().Get("content-range"))
|
assert.Equal(t, "bytes 100-199/1024", resp.Header().Get("content-range"))
|
||||||
assert.Equal(t, bstr, resp.Body.String())
|
assert.Equal(t, bstr, resp.Body.String())
|
||||||
|
|
||||||
// Download (user-facing API)
|
// Download (user-facing API)
|
||||||
req = NewRequest(t, "GET", "/user5/repo4/actions/runs/188/artifacts/artifact").SetHeader("range", "bytes=100-199")
|
req = NewRequest(t, "GET", "/user5/repo4/actions/runs/188/artifacts/artifact-v4-download").SetHeader("range", "bytes=100-199")
|
||||||
resp = MakeRequest(t, req, http.StatusPartialContent)
|
resp = MakeRequest(t, req, http.StatusPartialContent)
|
||||||
assert.Equal(t, "bytes 100-199/200", resp.Header().Get("content-range"))
|
assert.Equal(t, "bytes 100-199/1024", resp.Header().Get("content-range"))
|
||||||
assert.Equal(t, bstr, resp.Body.String())
|
assert.Equal(t, bstr, resp.Body.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -393,7 +393,7 @@ func TestActionsArtifactV4Delete(t *testing.T) {
|
||||||
|
|
||||||
// delete artifact by name
|
// delete artifact by name
|
||||||
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/DeleteArtifact", toProtoJSON(&actions.DeleteArtifactRequest{
|
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/DeleteArtifact", toProtoJSON(&actions.DeleteArtifactRequest{
|
||||||
Name: "artifact",
|
Name: "artifact-v4-download",
|
||||||
WorkflowRunBackendId: "792",
|
WorkflowRunBackendId: "792",
|
||||||
WorkflowJobRunBackendId: "193",
|
WorkflowJobRunBackendId: "193",
|
||||||
})).AddTokenAuth(token)
|
})).AddTokenAuth(token)
|
||||||
|
|
|
@ -224,6 +224,20 @@ func cancelProcesses(t testing.TB, delay time.Duration) {
|
||||||
t.Logf("PrepareTestEnv: all processes cancelled within %s", time.Since(start))
|
t.Logf("PrepareTestEnv: all processes cancelled within %s", time.Since(start))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PrepareArtifactsStorage(t testing.TB) {
|
||||||
|
// prepare actions artifacts directory and files
|
||||||
|
require.NoError(t, storage.Clean(storage.ActionsArtifacts))
|
||||||
|
|
||||||
|
s, err := storage.NewStorage(setting.LocalStorageType, &setting.Storage{
|
||||||
|
Path: filepath.Join(filepath.Dir(setting.AppPath), "tests", "testdata", "data", "artifacts"),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, s.IterateObjects("", func(p string, obj storage.Object) error {
|
||||||
|
_, err = storage.Copy(storage.ActionsArtifacts, p, s, p)
|
||||||
|
return err
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
func PrepareTestEnv(t testing.TB, skip ...int) func() {
|
func PrepareTestEnv(t testing.TB, skip ...int) func() {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
ourSkip := 1
|
ourSkip := 1
|
||||||
|
@ -263,6 +277,9 @@ func PrepareTestEnv(t testing.TB, skip ...int) func() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize actions artifact data
|
||||||
|
PrepareArtifactsStorage(t)
|
||||||
|
|
||||||
// load LFS object fixtures
|
// load LFS object fixtures
|
||||||
// (LFS storage can be on any of several backends, including remote servers, so we init it with the storage API)
|
// (LFS storage can be on any of several backends, including remote servers, so we init it with the storage API)
|
||||||
lfsFixtures, err := storage.NewStorage(setting.LocalStorageType, &setting.Storage{
|
lfsFixtures, err := storage.NewStorage(setting.LocalStorageType, &setting.Storage{
|
||||||
|
|
1
tests/testdata/data/artifacts/26/1/1712166500347189545.chunk
vendored
Normal file
1
tests/testdata/data/artifacts/26/1/1712166500347189545.chunk
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
1
tests/testdata/data/artifacts/26/19/1712348022422036662.chunk
vendored
Normal file
1
tests/testdata/data/artifacts/26/19/1712348022422036662.chunk
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
|
1
tests/testdata/data/artifacts/26/20/1712348022423431524.chunk
vendored
Normal file
1
tests/testdata/data/artifacts/26/20/1712348022423431524.chunk
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
|
1
tests/testdata/data/artifacts/27/5/1730330775594233150.chunk
vendored
Normal file
1
tests/testdata/data/artifacts/27/5/1730330775594233150.chunk
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD
|
Loading…
Reference in a new issue