mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-10 12:15:43 +01:00
chore(models): rewrite code format. (#14754)
* chore: rewrite format. * chore: update format Signed-off-by: Bo-Yi Wu <appleboy.tw@gmail.com> * chore: update format Signed-off-by: Bo-Yi Wu <appleboy.tw@gmail.com> * chore: Adjacent parameters with the same type should be grouped together * chore: update format.
This commit is contained in:
parent
164e35ead3
commit
167b0f46ef
103 changed files with 474 additions and 460 deletions
|
@ -121,8 +121,8 @@ func (user *User) GetRepositoryAccesses() (map[*Repository]AccessMode, error) {
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
var repos = make(map[*Repository]AccessMode, 10)
|
repos := make(map[*Repository]AccessMode, 10)
|
||||||
var ownerCache = make(map[int64]*User, 10)
|
ownerCache := make(map[int64]*User, 10)
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var repo repoAccess
|
var repo repoAccess
|
||||||
err = rows.Scan(&repo)
|
err = rows.Scan(&repo)
|
||||||
|
|
|
@ -186,7 +186,7 @@ func (a *Action) GetRepoLink() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRepositoryFromMatch returns a *Repository from a username and repo strings
|
// GetRepositoryFromMatch returns a *Repository from a username and repo strings
|
||||||
func GetRepositoryFromMatch(ownerName string, repoName string) (*Repository, error) {
|
func GetRepositoryFromMatch(ownerName, repoName string) (*Repository, error) {
|
||||||
var err error
|
var err error
|
||||||
refRepo, err := GetRepositoryByOwnerAndName(ownerName, repoName)
|
refRepo, err := GetRepositoryByOwnerAndName(ownerName, repoName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -218,7 +218,7 @@ func (a *Action) getCommentLink(e Engine) string {
|
||||||
if len(a.GetIssueInfos()) == 0 {
|
if len(a.GetIssueInfos()) == 0 {
|
||||||
return "#"
|
return "#"
|
||||||
}
|
}
|
||||||
//Return link to issue
|
// Return link to issue
|
||||||
issueIDString := a.GetIssueInfos()[0]
|
issueIDString := a.GetIssueInfos()[0]
|
||||||
issueID, err := strconv.ParseInt(issueIDString, 10, 64)
|
issueID, err := strconv.ParseInt(issueIDString, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -322,7 +322,7 @@ func GetFeeds(opts GetFeedsOptions) ([]*Action, error) {
|
||||||
return actions, nil
|
return actions, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func activityReadable(user *User, doer *User) bool {
|
func activityReadable(user, doer *User) bool {
|
||||||
var doerID int64
|
var doerID int64
|
||||||
if doer != nil {
|
if doer != nil {
|
||||||
doerID = doer.ID
|
doerID = doer.ID
|
||||||
|
|
|
@ -14,11 +14,11 @@ import (
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
//NoticeType describes the notice type
|
// NoticeType describes the notice type
|
||||||
type NoticeType int
|
type NoticeType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
//NoticeRepository type
|
// NoticeRepository type
|
||||||
NoticeRepository NoticeType = iota + 1
|
NoticeRepository NoticeType = iota + 1
|
||||||
// NoticeTask type
|
// NoticeTask type
|
||||||
NoticeTask
|
NoticeTask
|
||||||
|
|
|
@ -193,7 +193,7 @@ func DeleteAttachments(attachments []*Attachment, remove bool) (int, error) {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var ids = make([]int64, 0, len(attachments))
|
ids := make([]int64, 0, len(attachments))
|
||||||
for _, a := range attachments {
|
for _, a := range attachments {
|
||||||
ids = append(ids, a.ID)
|
ids = append(ids, a.ID)
|
||||||
}
|
}
|
||||||
|
@ -216,7 +216,6 @@ func DeleteAttachments(attachments []*Attachment, remove bool) (int, error) {
|
||||||
// DeleteAttachmentsByIssue deletes all attachments associated with the given issue.
|
// DeleteAttachmentsByIssue deletes all attachments associated with the given issue.
|
||||||
func DeleteAttachmentsByIssue(issueID int64, remove bool) (int, error) {
|
func DeleteAttachmentsByIssue(issueID int64, remove bool) (int, error) {
|
||||||
attachments, err := GetAttachmentsByIssueID(issueID)
|
attachments, err := GetAttachmentsByIssueID(issueID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -227,7 +226,6 @@ func DeleteAttachmentsByIssue(issueID int64, remove bool) (int, error) {
|
||||||
// DeleteAttachmentsByComment deletes all attachments associated with the given comment.
|
// DeleteAttachmentsByComment deletes all attachments associated with the given comment.
|
||||||
func DeleteAttachmentsByComment(commentID int64, remove bool) (int, error) {
|
func DeleteAttachmentsByComment(commentID int64, remove bool) (int, error) {
|
||||||
attachments, err := GetAttachmentsByCommentID(commentID)
|
attachments, err := GetAttachmentsByCommentID(commentID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -263,7 +261,7 @@ func IterateAttachment(f func(attach *Attachment) error) error {
|
||||||
var start int
|
var start int
|
||||||
const batchSize = 100
|
const batchSize = 100
|
||||||
for {
|
for {
|
||||||
var attachments = make([]*Attachment, 0, batchSize)
|
attachments := make([]*Attachment, 0, batchSize)
|
||||||
if err := x.Limit(batchSize, start).Find(&attachments); err != nil {
|
if err := x.Limit(batchSize, start).Find(&attachments); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,12 +17,12 @@ func TestUploadAttachment(t *testing.T) {
|
||||||
|
|
||||||
user := AssertExistsAndLoadBean(t, &User{ID: 1}).(*User)
|
user := AssertExistsAndLoadBean(t, &User{ID: 1}).(*User)
|
||||||
|
|
||||||
var fPath = "./attachment_test.go"
|
fPath := "./attachment_test.go"
|
||||||
f, err := os.Open(fPath)
|
f, err := os.Open(fPath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
var buf = make([]byte, 1024)
|
buf := make([]byte, 1024)
|
||||||
n, err := f.Read(buf)
|
n, err := f.Read(buf)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
|
@ -152,7 +152,6 @@ func TestLinkedRepository(t *testing.T) {
|
||||||
assert.Equal(t, tc.expectedRepo.ID, repo.ID)
|
assert.Equal(t, tc.expectedRepo.ID, repo.ID)
|
||||||
}
|
}
|
||||||
assert.Equal(t, tc.expectedUnitType, unitType)
|
assert.Equal(t, tc.expectedUnitType, unitType)
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -176,12 +176,11 @@ func FindRepoRecentCommitStatusContexts(repoID int64, before time.Duration) ([]s
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var contexts = make([]string, 0, len(ids))
|
contexts := make([]string, 0, len(ids))
|
||||||
if len(ids) == 0 {
|
if len(ids) == 0 {
|
||||||
return contexts, nil
|
return contexts, nil
|
||||||
}
|
}
|
||||||
return contexts, x.Select("context").Table("commit_status").In("id", ids).Find(&contexts)
|
return contexts, x.Select("context").Table("commit_status").In("id", ids).Find(&contexts)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCommitStatusOptions holds options for creating a CommitStatus
|
// NewCommitStatusOptions holds options for creating a CommitStatus
|
||||||
|
|
|
@ -72,8 +72,7 @@ func (err ErrNameCharsNotAllowed) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrSSHDisabled represents an "SSH disabled" error.
|
// ErrSSHDisabled represents an "SSH disabled" error.
|
||||||
type ErrSSHDisabled struct {
|
type ErrSSHDisabled struct{}
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrSSHDisabled checks if an error is a ErrSSHDisabled.
|
// IsErrSSHDisabled checks if an error is a ErrSSHDisabled.
|
||||||
func IsErrSSHDisabled(err error) bool {
|
func IsErrSSHDisabled(err error) bool {
|
||||||
|
@ -269,8 +268,7 @@ func (err ErrUserHasOrgs) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrUserNotAllowedCreateOrg represents a "UserNotAllowedCreateOrg" kind of error.
|
// ErrUserNotAllowedCreateOrg represents a "UserNotAllowedCreateOrg" kind of error.
|
||||||
type ErrUserNotAllowedCreateOrg struct {
|
type ErrUserNotAllowedCreateOrg struct{}
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrUserNotAllowedCreateOrg checks if an error is an ErrUserNotAllowedCreateOrg.
|
// IsErrUserNotAllowedCreateOrg checks if an error is an ErrUserNotAllowedCreateOrg.
|
||||||
func IsErrUserNotAllowedCreateOrg(err error) bool {
|
func IsErrUserNotAllowedCreateOrg(err error) bool {
|
||||||
|
@ -603,8 +601,7 @@ func (err ErrAccessTokenNotExist) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrAccessTokenEmpty represents a "AccessTokenEmpty" kind of error.
|
// ErrAccessTokenEmpty represents a "AccessTokenEmpty" kind of error.
|
||||||
type ErrAccessTokenEmpty struct {
|
type ErrAccessTokenEmpty struct{}
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrAccessTokenEmpty checks if an error is a ErrAccessTokenEmpty.
|
// IsErrAccessTokenEmpty checks if an error is a ErrAccessTokenEmpty.
|
||||||
func IsErrAccessTokenEmpty(err error) bool {
|
func IsErrAccessTokenEmpty(err error) bool {
|
||||||
|
|
|
@ -45,7 +45,6 @@ func ListAccountLinks(user *User) ([]*ExternalLoginUser, error) {
|
||||||
err := x.Where("user_id=?", user.ID).
|
err := x.Where("user_id=?", user.ID).
|
||||||
Desc("login_source_id").
|
Desc("login_source_id").
|
||||||
Find(&externalAccounts)
|
Find(&externalAccounts)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -87,7 +86,7 @@ func removeAllAccountLinks(e Engine, user *User) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUserIDByExternalUserID get user id according to provider and userID
|
// GetUserIDByExternalUserID get user id according to provider and userID
|
||||||
func GetUserIDByExternalUserID(provider string, userID string) (int64, error) {
|
func GetUserIDByExternalUserID(provider, userID string) (int64, error) {
|
||||||
var id int64
|
var id int64
|
||||||
_, err := x.Table("external_login_user").
|
_, err := x.Table("external_login_user").
|
||||||
Select("user_id").
|
Select("user_id").
|
||||||
|
@ -147,7 +146,7 @@ type FindExternalUserOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts FindExternalUserOptions) toConds() builder.Cond {
|
func (opts FindExternalUserOptions) toConds() builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
if len(opts.Provider) > 0 {
|
if len(opts.Provider) > 0 {
|
||||||
cond = cond.And(builder.Eq{"provider": opts.Provider})
|
cond = cond.And(builder.Eq{"provider": opts.Provider})
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
// GetYamlFixturesAccess returns a string containing the contents
|
// GetYamlFixturesAccess returns a string containing the contents
|
||||||
// for the access table, as recalculated using repo.RecalculateAccesses()
|
// for the access table, as recalculated using repo.RecalculateAccesses()
|
||||||
func GetYamlFixturesAccess() (string, error) {
|
func GetYamlFixturesAccess() (string, error) {
|
||||||
|
|
||||||
repos := make([]*Repository, 0, 50)
|
repos := make([]*Repository, 0, 50)
|
||||||
if err := x.Find(&repos); err != nil {
|
if err := x.Find(&repos); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
@ -44,7 +44,7 @@ type GPGKey struct {
|
||||||
CanCertify bool
|
CanCertify bool
|
||||||
}
|
}
|
||||||
|
|
||||||
//GPGKeyImport the original import of key
|
// GPGKeyImport the original import of key
|
||||||
type GPGKeyImport struct {
|
type GPGKeyImport struct {
|
||||||
KeyID string `xorm:"pk CHAR(16) NOT NULL"`
|
KeyID string `xorm:"pk CHAR(16) NOT NULL"`
|
||||||
Content string `xorm:"TEXT NOT NULL"`
|
Content string `xorm:"TEXT NOT NULL"`
|
||||||
|
@ -118,9 +118,9 @@ func checkArmoredGPGKeyString(content string) (openpgp.EntityList, error) {
|
||||||
return list, nil
|
return list, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//addGPGKey add key, import and subkeys to database
|
// addGPGKey add key, import and subkeys to database
|
||||||
func addGPGKey(e Engine, key *GPGKey, content string) (err error) {
|
func addGPGKey(e Engine, key *GPGKey, content string) (err error) {
|
||||||
//Add GPGKeyImport
|
// Add GPGKeyImport
|
||||||
if _, err = e.Insert(GPGKeyImport{
|
if _, err = e.Insert(GPGKeyImport{
|
||||||
KeyID: key.KeyID,
|
KeyID: key.KeyID,
|
||||||
Content: content,
|
Content: content,
|
||||||
|
@ -140,7 +140,7 @@ func addGPGKey(e Engine, key *GPGKey, content string) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//addGPGSubKey add subkeys to database
|
// addGPGSubKey add subkeys to database
|
||||||
func addGPGSubKey(e Engine, key *GPGKey) (err error) {
|
func addGPGSubKey(e Engine, key *GPGKey) (err error) {
|
||||||
// Save GPG primary key.
|
// Save GPG primary key.
|
||||||
if _, err = e.Insert(key); err != nil {
|
if _, err = e.Insert(key); err != nil {
|
||||||
|
@ -177,7 +177,7 @@ func AddGPGKey(ownerID int64, content string) ([]*GPGKey, error) {
|
||||||
return nil, ErrGPGKeyIDAlreadyUsed{ekey.PrimaryKey.KeyIdString()}
|
return nil, ErrGPGKeyIDAlreadyUsed{ekey.PrimaryKey.KeyIdString()}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Get DB session
|
// Get DB session
|
||||||
|
|
||||||
key, err := parseGPGKey(ownerID, ekey)
|
key, err := parseGPGKey(ownerID, ekey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -192,7 +192,7 @@ func AddGPGKey(ownerID int64, content string) ([]*GPGKey, error) {
|
||||||
return keys, sess.Commit()
|
return keys, sess.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
//base64EncPubKey encode public key content to base 64
|
// base64EncPubKey encode public key content to base 64
|
||||||
func base64EncPubKey(pubkey *packet.PublicKey) (string, error) {
|
func base64EncPubKey(pubkey *packet.PublicKey) (string, error) {
|
||||||
var w bytes.Buffer
|
var w bytes.Buffer
|
||||||
err := pubkey.Serialize(&w)
|
err := pubkey.Serialize(&w)
|
||||||
|
@ -202,18 +202,18 @@ func base64EncPubKey(pubkey *packet.PublicKey) (string, error) {
|
||||||
return base64.StdEncoding.EncodeToString(w.Bytes()), nil
|
return base64.StdEncoding.EncodeToString(w.Bytes()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//base64DecPubKey decode public key content from base 64
|
// base64DecPubKey decode public key content from base 64
|
||||||
func base64DecPubKey(content string) (*packet.PublicKey, error) {
|
func base64DecPubKey(content string) (*packet.PublicKey, error) {
|
||||||
b, err := readerFromBase64(content)
|
b, err := readerFromBase64(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
//Read key
|
// Read key
|
||||||
p, err := packet.Read(b)
|
p, err := packet.Read(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
//Check type
|
// Check type
|
||||||
pkey, ok := p.(*packet.PublicKey)
|
pkey, ok := p.(*packet.PublicKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("key is not a public key")
|
return nil, fmt.Errorf("key is not a public key")
|
||||||
|
@ -221,7 +221,7 @@ func base64DecPubKey(content string) (*packet.PublicKey, error) {
|
||||||
return pkey, nil
|
return pkey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//GPGKeyToEntity retrieve the imported key and the traducted entity
|
// GPGKeyToEntity retrieve the imported key and the traducted entity
|
||||||
func GPGKeyToEntity(k *GPGKey) (*openpgp.Entity, error) {
|
func GPGKeyToEntity(k *GPGKey) (*openpgp.Entity, error) {
|
||||||
impKey, err := GetGPGImportByKeyID(k.KeyID)
|
impKey, err := GetGPGImportByKeyID(k.KeyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -234,7 +234,7 @@ func GPGKeyToEntity(k *GPGKey) (*openpgp.Entity, error) {
|
||||||
return keys[0], err
|
return keys[0], err
|
||||||
}
|
}
|
||||||
|
|
||||||
//parseSubGPGKey parse a sub Key
|
// parseSubGPGKey parse a sub Key
|
||||||
func parseSubGPGKey(ownerID int64, primaryID string, pubkey *packet.PublicKey, expiry time.Time) (*GPGKey, error) {
|
func parseSubGPGKey(ownerID int64, primaryID string, pubkey *packet.PublicKey, expiry time.Time) (*GPGKey, error) {
|
||||||
content, err := base64EncPubKey(pubkey)
|
content, err := base64EncPubKey(pubkey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -254,10 +254,10 @@ func parseSubGPGKey(ownerID int64, primaryID string, pubkey *packet.PublicKey, e
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//getExpiryTime extract the expire time of primary key based on sig
|
// getExpiryTime extract the expire time of primary key based on sig
|
||||||
func getExpiryTime(e *openpgp.Entity) time.Time {
|
func getExpiryTime(e *openpgp.Entity) time.Time {
|
||||||
expiry := time.Time{}
|
expiry := time.Time{}
|
||||||
//Extract self-sign for expire date based on : https://github.com/golang/crypto/blob/master/openpgp/keys.go#L165
|
// Extract self-sign for expire date based on : https://github.com/golang/crypto/blob/master/openpgp/keys.go#L165
|
||||||
var selfSig *packet.Signature
|
var selfSig *packet.Signature
|
||||||
for _, ident := range e.Identities {
|
for _, ident := range e.Identities {
|
||||||
if selfSig == nil {
|
if selfSig == nil {
|
||||||
|
@ -273,12 +273,12 @@ func getExpiryTime(e *openpgp.Entity) time.Time {
|
||||||
return expiry
|
return expiry
|
||||||
}
|
}
|
||||||
|
|
||||||
//parseGPGKey parse a PrimaryKey entity (primary key + subs keys + self-signature)
|
// parseGPGKey parse a PrimaryKey entity (primary key + subs keys + self-signature)
|
||||||
func parseGPGKey(ownerID int64, e *openpgp.Entity) (*GPGKey, error) {
|
func parseGPGKey(ownerID int64, e *openpgp.Entity) (*GPGKey, error) {
|
||||||
pubkey := e.PrimaryKey
|
pubkey := e.PrimaryKey
|
||||||
expiry := getExpiryTime(e)
|
expiry := getExpiryTime(e)
|
||||||
|
|
||||||
//Parse Subkeys
|
// Parse Subkeys
|
||||||
subkeys := make([]*GPGKey, len(e.Subkeys))
|
subkeys := make([]*GPGKey, len(e.Subkeys))
|
||||||
for i, k := range e.Subkeys {
|
for i, k := range e.Subkeys {
|
||||||
subs, err := parseSubGPGKey(ownerID, pubkey.KeyIdString(), k.PublicKey, expiry)
|
subs, err := parseSubGPGKey(ownerID, pubkey.KeyIdString(), k.PublicKey, expiry)
|
||||||
|
@ -288,7 +288,7 @@ func parseGPGKey(ownerID int64, e *openpgp.Entity) (*GPGKey, error) {
|
||||||
subkeys[i] = subs
|
subkeys[i] = subs
|
||||||
}
|
}
|
||||||
|
|
||||||
//Check emails
|
// Check emails
|
||||||
userEmails, err := GetEmailAddresses(ownerID)
|
userEmails, err := GetEmailAddresses(ownerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -308,7 +308,7 @@ func parseGPGKey(ownerID int64, e *openpgp.Entity) (*GPGKey, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//In the case no email as been found
|
// In the case no email as been found
|
||||||
if len(emails) == 0 {
|
if len(emails) == 0 {
|
||||||
failedEmails := make([]string, 0, len(e.Identities))
|
failedEmails := make([]string, 0, len(e.Identities))
|
||||||
for _, ident := range e.Identities {
|
for _, ident := range e.Identities {
|
||||||
|
@ -340,9 +340,9 @@ func parseGPGKey(ownerID int64, e *openpgp.Entity) (*GPGKey, error) {
|
||||||
// deleteGPGKey does the actual key deletion
|
// deleteGPGKey does the actual key deletion
|
||||||
func deleteGPGKey(e *xorm.Session, keyID string) (int64, error) {
|
func deleteGPGKey(e *xorm.Session, keyID string) (int64, error) {
|
||||||
if keyID == "" {
|
if keyID == "" {
|
||||||
return 0, fmt.Errorf("empty KeyId forbidden") //Should never happen but just to be sure
|
return 0, fmt.Errorf("empty KeyId forbidden") // Should never happen but just to be sure
|
||||||
}
|
}
|
||||||
//Delete imported key
|
// Delete imported key
|
||||||
n, err := e.Where("key_id=?", keyID).Delete(new(GPGKeyImport))
|
n, err := e.Where("key_id=?", keyID).Delete(new(GPGKeyImport))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
|
@ -452,11 +452,11 @@ func extractSignature(s string) (*packet.Signature, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifySign(s *packet.Signature, h hash.Hash, k *GPGKey) error {
|
func verifySign(s *packet.Signature, h hash.Hash, k *GPGKey) error {
|
||||||
//Check if key can sign
|
// Check if key can sign
|
||||||
if !k.CanSign {
|
if !k.CanSign {
|
||||||
return fmt.Errorf("key can not sign")
|
return fmt.Errorf("key can not sign")
|
||||||
}
|
}
|
||||||
//Decode key
|
// Decode key
|
||||||
pkey, err := base64DecPubKey(k.Content)
|
pkey, err := base64DecPubKey(k.Content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -465,9 +465,9 @@ func verifySign(s *packet.Signature, h hash.Hash, k *GPGKey) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hashAndVerify(sig *packet.Signature, payload string, k *GPGKey, committer, signer *User, email string) *CommitVerification {
|
func hashAndVerify(sig *packet.Signature, payload string, k *GPGKey, committer, signer *User, email string) *CommitVerification {
|
||||||
//Generating hash of commit
|
// Generating hash of commit
|
||||||
hash, err := populateHash(sig.Hash, []byte(payload))
|
hash, err := populateHash(sig.Hash, []byte(payload))
|
||||||
if err != nil { //Skipping failed to generate hash
|
if err != nil { // Skipping failed to generate hash
|
||||||
log.Error("PopulateHash: %v", err)
|
log.Error("PopulateHash: %v", err)
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
CommittingUser: committer,
|
CommittingUser: committer,
|
||||||
|
@ -477,7 +477,7 @@ func hashAndVerify(sig *packet.Signature, payload string, k *GPGKey, committer,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := verifySign(sig, hash, k); err == nil {
|
if err := verifySign(sig, hash, k); err == nil {
|
||||||
return &CommitVerification{ //Everything is ok
|
return &CommitVerification{ // Everything is ok
|
||||||
CommittingUser: committer,
|
CommittingUser: committer,
|
||||||
Verified: true,
|
Verified: true,
|
||||||
Reason: fmt.Sprintf("%s / %s", signer.Name, k.KeyID),
|
Reason: fmt.Sprintf("%s / %s", signer.Name, k.KeyID),
|
||||||
|
@ -495,7 +495,7 @@ func hashAndVerifyWithSubKeys(sig *packet.Signature, payload string, k *GPGKey,
|
||||||
return commitVerification
|
return commitVerification
|
||||||
}
|
}
|
||||||
|
|
||||||
//And test also SubsKey
|
// And test also SubsKey
|
||||||
for _, sk := range k.SubsKey {
|
for _, sk := range k.SubsKey {
|
||||||
commitVerification := hashAndVerify(sig, payload, sk, committer, signer, email)
|
commitVerification := hashAndVerify(sig, payload, sk, committer, signer, email)
|
||||||
if commitVerification != nil {
|
if commitVerification != nil {
|
||||||
|
@ -620,9 +620,9 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
var committer *User
|
var committer *User
|
||||||
if c.Committer != nil {
|
if c.Committer != nil {
|
||||||
var err error
|
var err error
|
||||||
//Find Committer account
|
// Find Committer account
|
||||||
committer, err = GetUserByEmail(c.Committer.Email) //This finds the user by primary email or activated email so commit will not be valid if email is not
|
committer, err = GetUserByEmail(c.Committer.Email) // This finds the user by primary email or activated email so commit will not be valid if email is not
|
||||||
if err != nil { //Skipping not user for commiter
|
if err != nil { // Skipping not user for commiter
|
||||||
committer = &User{
|
committer = &User{
|
||||||
Name: c.Committer.Name,
|
Name: c.Committer.Name,
|
||||||
Email: c.Committer.Email,
|
Email: c.Committer.Email,
|
||||||
|
@ -645,14 +645,14 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
if c.Signature == nil {
|
if c.Signature == nil {
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
CommittingUser: committer,
|
CommittingUser: committer,
|
||||||
Verified: false, //Default value
|
Verified: false, // Default value
|
||||||
Reason: "gpg.error.not_signed_commit", //Default value
|
Reason: "gpg.error.not_signed_commit", // Default value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Parsing signature
|
// Parsing signature
|
||||||
sig, err := extractSignature(c.Signature.Signature)
|
sig, err := extractSignature(c.Signature.Signature)
|
||||||
if err != nil { //Skipping failed to extract sign
|
if err != nil { // Skipping failed to extract sign
|
||||||
log.Error("SignatureRead err: %v", err)
|
log.Error("SignatureRead err: %v", err)
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
CommittingUser: committer,
|
CommittingUser: committer,
|
||||||
|
@ -688,7 +688,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
// Now try to associate the signature with the committer, if present
|
// Now try to associate the signature with the committer, if present
|
||||||
if committer.ID != 0 {
|
if committer.ID != 0 {
|
||||||
keys, err := ListGPGKeys(committer.ID, ListOptions{})
|
keys, err := ListGPGKeys(committer.ID, ListOptions{})
|
||||||
if err != nil { //Skipping failed to get gpg keys of user
|
if err != nil { // Skipping failed to get gpg keys of user
|
||||||
log.Error("ListGPGKeys: %v", err)
|
log.Error("ListGPGKeys: %v", err)
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
CommittingUser: committer,
|
CommittingUser: committer,
|
||||||
|
@ -698,7 +698,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
//Pre-check (& optimization) that emails attached to key can be attached to the commiter email and can validate
|
// Pre-check (& optimization) that emails attached to key can be attached to the commiter email and can validate
|
||||||
canValidate := false
|
canValidate := false
|
||||||
email := ""
|
email := ""
|
||||||
for _, e := range k.Emails {
|
for _, e := range k.Emails {
|
||||||
|
@ -709,7 +709,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !canValidate {
|
if !canValidate {
|
||||||
continue //Skip this key
|
continue // Skip this key
|
||||||
}
|
}
|
||||||
|
|
||||||
commitVerification := hashAndVerifyWithSubKeys(sig, c.Signature.Payload, k, committer, committer, email)
|
commitVerification := hashAndVerifyWithSubKeys(sig, c.Signature.Payload, k, committer, committer, email)
|
||||||
|
@ -753,7 +753,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &CommitVerification{ //Default at this stage
|
return &CommitVerification{ // Default at this stage
|
||||||
CommittingUser: committer,
|
CommittingUser: committer,
|
||||||
Verified: false,
|
Verified: false,
|
||||||
Warning: defaultReason != NoKeyFound,
|
Warning: defaultReason != NoKeyFound,
|
||||||
|
|
|
@ -47,7 +47,7 @@ MkM/fdpyc2hY7Dl/+qFmN5MG5yGmMpQcX+RNNR222ibNC1D3wg==
|
||||||
|
|
||||||
key, err := checkArmoredGPGKeyString(testGPGArmor)
|
key, err := checkArmoredGPGKeyString(testGPGArmor)
|
||||||
assert.NoError(t, err, "Could not parse a valid GPG public armored rsa key", key)
|
assert.NoError(t, err, "Could not parse a valid GPG public armored rsa key", key)
|
||||||
//TODO verify value of key
|
// TODO verify value of key
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckArmoredbrainpoolP256r1GPGKeyString(t *testing.T) {
|
func TestCheckArmoredbrainpoolP256r1GPGKeyString(t *testing.T) {
|
||||||
|
@ -68,7 +68,7 @@ OyjLLnFQiVmq7kEA/0z0CQe3ZQiQIq5zrs7Nh1XRkFAo8GlU/SGC9XFFi722
|
||||||
|
|
||||||
key, err := checkArmoredGPGKeyString(testGPGArmor)
|
key, err := checkArmoredGPGKeyString(testGPGArmor)
|
||||||
assert.NoError(t, err, "Could not parse a valid GPG public armored brainpoolP256r1 key", key)
|
assert.NoError(t, err, "Could not parse a valid GPG public armored brainpoolP256r1 key", key)
|
||||||
//TODO verify value of key
|
// TODO verify value of key
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExtractSignature(t *testing.T) {
|
func TestExtractSignature(t *testing.T) {
|
||||||
|
@ -167,19 +167,19 @@ committer Antoine GIRARD <sapk@sapk.fr> 1489013107 +0100
|
||||||
|
|
||||||
Unknown GPG key with good email
|
Unknown GPG key with good email
|
||||||
`
|
`
|
||||||
//Reading Sign
|
// Reading Sign
|
||||||
goodSig, err := extractSignature(testGoodSigArmor)
|
goodSig, err := extractSignature(testGoodSigArmor)
|
||||||
assert.NoError(t, err, "Could not parse a valid GPG armored signature", testGoodSigArmor)
|
assert.NoError(t, err, "Could not parse a valid GPG armored signature", testGoodSigArmor)
|
||||||
badSig, err := extractSignature(testBadSigArmor)
|
badSig, err := extractSignature(testBadSigArmor)
|
||||||
assert.NoError(t, err, "Could not parse a valid GPG armored signature", testBadSigArmor)
|
assert.NoError(t, err, "Could not parse a valid GPG armored signature", testBadSigArmor)
|
||||||
|
|
||||||
//Generating hash of commit
|
// Generating hash of commit
|
||||||
goodHash, err := populateHash(goodSig.Hash, []byte(testGoodPayload))
|
goodHash, err := populateHash(goodSig.Hash, []byte(testGoodPayload))
|
||||||
assert.NoError(t, err, "Could not generate a valid hash of payload", testGoodPayload)
|
assert.NoError(t, err, "Could not generate a valid hash of payload", testGoodPayload)
|
||||||
badHash, err := populateHash(badSig.Hash, []byte(testBadPayload))
|
badHash, err := populateHash(badSig.Hash, []byte(testBadPayload))
|
||||||
assert.NoError(t, err, "Could not generate a valid hash of payload", testBadPayload)
|
assert.NoError(t, err, "Could not generate a valid hash of payload", testBadPayload)
|
||||||
|
|
||||||
//Verify
|
// Verify
|
||||||
err = verifySign(goodSig, goodHash, key)
|
err = verifySign(goodSig, goodHash, key)
|
||||||
assert.NoError(t, err, "Could not validate a good signature")
|
assert.NoError(t, err, "Could not validate a good signature")
|
||||||
err = verifySign(badSig, badHash, key)
|
err = verifySign(badSig, badHash, key)
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
package models
|
package models
|
||||||
|
|
||||||
func keysInt64(m map[int64]struct{}) []int64 {
|
func keysInt64(m map[int64]struct{}) []int64 {
|
||||||
var keys = make([]int64, 0, len(m))
|
keys := make([]int64, 0, len(m))
|
||||||
for k := range m {
|
for k := range m {
|
||||||
keys = append(keys, k)
|
keys = append(keys, k)
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ func keysInt64(m map[int64]struct{}) []int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func valuesRepository(m map[int64]*Repository) []*Repository {
|
func valuesRepository(m map[int64]*Repository) []*Repository {
|
||||||
var values = make([]*Repository, 0, len(m))
|
values := make([]*Repository, 0, len(m))
|
||||||
for _, v := range m {
|
for _, v := range m {
|
||||||
values = append(values, v)
|
values = append(values, v)
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ func valuesRepository(m map[int64]*Repository) []*Repository {
|
||||||
}
|
}
|
||||||
|
|
||||||
func valuesUser(m map[int64]*User) []*User {
|
func valuesUser(m map[int64]*User) []*User {
|
||||||
var values = make([]*User, 0, len(m))
|
values := make([]*User, 0, len(m))
|
||||||
for _, v := range m {
|
for _, v := range m {
|
||||||
values = append(values, v)
|
values = append(values, v)
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,5 +74,4 @@ func FullPushingEnvironment(author, committer *User, repo *Repository, repoName
|
||||||
}
|
}
|
||||||
|
|
||||||
return environ
|
return environ
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,9 +77,11 @@ var (
|
||||||
issueTasksDonePat *regexp.Regexp
|
issueTasksDonePat *regexp.Regexp
|
||||||
)
|
)
|
||||||
|
|
||||||
const issueTasksRegexpStr = `(^\s*[-*]\s\[[\sxX]\]\s.)|(\n\s*[-*]\s\[[\sxX]\]\s.)`
|
const (
|
||||||
const issueTasksDoneRegexpStr = `(^\s*[-*]\s\[[xX]\]\s.)|(\n\s*[-*]\s\[[xX]\]\s.)`
|
issueTasksRegexpStr = `(^\s*[-*]\s\[[\sxX]\]\s.)|(\n\s*[-*]\s\[[\sxX]\]\s.)`
|
||||||
const issueMaxDupIndexAttempts = 3
|
issueTasksDoneRegexpStr = `(^\s*[-*]\s\[[xX]\]\s.)|(\n\s*[-*]\s\[[xX]\]\s.)`
|
||||||
|
issueMaxDupIndexAttempts = 3
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
issueTasksPat = regexp.MustCompile(issueTasksRegexpStr)
|
issueTasksPat = regexp.MustCompile(issueTasksRegexpStr)
|
||||||
|
@ -714,7 +716,7 @@ func (issue *Issue) ChangeTitle(doer *User, oldTitle string) (err error) {
|
||||||
return fmt.Errorf("loadRepo: %v", err)
|
return fmt.Errorf("loadRepo: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: CommentTypeChangeTitle,
|
Type: CommentTypeChangeTitle,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: issue.Repo,
|
Repo: issue.Repo,
|
||||||
|
@ -759,7 +761,7 @@ func AddDeletePRBranchComment(doer *User, repo *Repository, issueID int64, branc
|
||||||
if err := sess.Begin(); err != nil {
|
if err := sess.Begin(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: CommentTypeDeleteBranch,
|
Type: CommentTypeDeleteBranch,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: repo,
|
Repo: repo,
|
||||||
|
@ -914,7 +916,7 @@ func newIssue(e *xorm.Session, doer *User, opts NewIssueOptions) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: CommentTypeMilestone,
|
Type: CommentTypeMilestone,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: opts.Repo,
|
Repo: opts.Repo,
|
||||||
|
@ -1083,7 +1085,7 @@ func getIssuesByIDs(e Engine, issueIDs []int64) ([]*Issue, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIssueIDsByRepoID(e Engine, repoID int64) ([]int64, error) {
|
func getIssueIDsByRepoID(e Engine, repoID int64) ([]int64, error) {
|
||||||
var ids = make([]int64, 0, 10)
|
ids := make([]int64, 0, 10)
|
||||||
err := e.Table("issue").Where("repo_id = ?", repoID).Find(&ids)
|
err := e.Table("issue").Where("repo_id = ?", repoID).Find(&ids)
|
||||||
return ids, err
|
return ids, err
|
||||||
}
|
}
|
||||||
|
@ -1689,7 +1691,7 @@ func GetUserIssueStats(opts UserIssueStatsOptions) (*IssueStats, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRepoIssueStats returns number of open and closed repository issues by given filter mode.
|
// GetRepoIssueStats returns number of open and closed repository issues by given filter mode.
|
||||||
func GetRepoIssueStats(repoID, uid int64, filterMode int, isPull bool) (numOpen int64, numClosed int64) {
|
func GetRepoIssueStats(repoID, uid int64, filterMode int, isPull bool) (numOpen, numClosed int64) {
|
||||||
countSession := func(isClosed, isPull bool, repoID int64) *xorm.Session {
|
countSession := func(isClosed, isPull bool, repoID int64) *xorm.Session {
|
||||||
sess := x.
|
sess := x.
|
||||||
Where("is_closed = ?", isClosed).
|
Where("is_closed = ?", isClosed).
|
||||||
|
@ -1719,10 +1721,10 @@ func GetRepoIssueStats(repoID, uid int64, filterMode int, isPull bool) (numOpen
|
||||||
|
|
||||||
// SearchIssueIDsByKeyword search issues on database
|
// SearchIssueIDsByKeyword search issues on database
|
||||||
func SearchIssueIDsByKeyword(kw string, repoIDs []int64, limit, start int) (int64, []int64, error) {
|
func SearchIssueIDsByKeyword(kw string, repoIDs []int64, limit, start int) (int64, []int64, error) {
|
||||||
var repoCond = builder.In("repo_id", repoIDs)
|
repoCond := builder.In("repo_id", repoIDs)
|
||||||
var subQuery = builder.Select("id").From("issue").Where(repoCond)
|
subQuery := builder.Select("id").From("issue").Where(repoCond)
|
||||||
kw = strings.ToUpper(kw)
|
kw = strings.ToUpper(kw)
|
||||||
var cond = builder.And(
|
cond := builder.And(
|
||||||
repoCond,
|
repoCond,
|
||||||
builder.Or(
|
builder.Or(
|
||||||
builder.Like{"UPPER(name)", kw},
|
builder.Like{"UPPER(name)", kw},
|
||||||
|
@ -1738,8 +1740,8 @@ func SearchIssueIDsByKeyword(kw string, repoIDs []int64, limit, start int) (int6
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
var ids = make([]int64, 0, limit)
|
ids := make([]int64, 0, limit)
|
||||||
var res = make([]struct {
|
res := make([]struct {
|
||||||
ID int64
|
ID int64
|
||||||
UpdatedUnix int64
|
UpdatedUnix int64
|
||||||
}, 0, limit)
|
}, 0, limit)
|
||||||
|
@ -1790,7 +1792,7 @@ func UpdateIssueByAPI(issue *Issue, doer *User) (statusChangeComment *Comment, t
|
||||||
|
|
||||||
titleChanged = currentIssue.Title != issue.Title
|
titleChanged = currentIssue.Title != issue.Title
|
||||||
if titleChanged {
|
if titleChanged {
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: CommentTypeChangeTitle,
|
Type: CommentTypeChangeTitle,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: issue.Repo,
|
Repo: issue.Repo,
|
||||||
|
@ -1819,7 +1821,6 @@ func UpdateIssueByAPI(issue *Issue, doer *User) (statusChangeComment *Comment, t
|
||||||
|
|
||||||
// UpdateIssueDeadline updates an issue deadline and adds comments. Setting a deadline to 0 means deleting it.
|
// UpdateIssueDeadline updates an issue deadline and adds comments. Setting a deadline to 0 means deleting it.
|
||||||
func UpdateIssueDeadline(issue *Issue, deadlineUnix timeutil.TimeStamp, doer *User) (err error) {
|
func UpdateIssueDeadline(issue *Issue, deadlineUnix timeutil.TimeStamp, doer *User) (err error) {
|
||||||
|
|
||||||
// if the deadline hasn't changed do nothing
|
// if the deadline hasn't changed do nothing
|
||||||
if issue.DeadlineUnix == deadlineUnix {
|
if issue.DeadlineUnix == deadlineUnix {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1879,7 +1880,7 @@ func (issue *Issue) getBlockedByDependencies(e Engine) (issueDeps []*DependencyI
|
||||||
Join("INNER", "repository", "repository.id = issue.repo_id").
|
Join("INNER", "repository", "repository.id = issue.repo_id").
|
||||||
Join("INNER", "issue_dependency", "issue_dependency.dependency_id = issue.id").
|
Join("INNER", "issue_dependency", "issue_dependency.dependency_id = issue.id").
|
||||||
Where("issue_id = ?", issue.ID).
|
Where("issue_id = ?", issue.ID).
|
||||||
//sort by repo id then created date, with the issues of the same repo at the beginning of the list
|
// sort by repo id then created date, with the issues of the same repo at the beginning of the list
|
||||||
OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(issue.RepoID, 10) + " THEN 0 ELSE issue.repo_id END, issue.created_unix DESC").
|
OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(issue.RepoID, 10) + " THEN 0 ELSE issue.repo_id END, issue.created_unix DESC").
|
||||||
Find(&issueDeps)
|
Find(&issueDeps)
|
||||||
}
|
}
|
||||||
|
@ -1891,7 +1892,7 @@ func (issue *Issue) getBlockingDependencies(e Engine) (issueDeps []*DependencyIn
|
||||||
Join("INNER", "repository", "repository.id = issue.repo_id").
|
Join("INNER", "repository", "repository.id = issue.repo_id").
|
||||||
Join("INNER", "issue_dependency", "issue_dependency.issue_id = issue.id").
|
Join("INNER", "issue_dependency", "issue_dependency.issue_id = issue.id").
|
||||||
Where("dependency_id = ?", issue.ID).
|
Where("dependency_id = ?", issue.ID).
|
||||||
//sort by repo id then created date, with the issues of the same repo at the beginning of the list
|
// sort by repo id then created date, with the issues of the same repo at the beginning of the list
|
||||||
OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(issue.RepoID, 10) + " THEN 0 ELSE issue.repo_id END, issue.created_unix DESC").
|
OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(issue.RepoID, 10) + " THEN 0 ELSE issue.repo_id END, issue.created_unix DESC").
|
||||||
Find(&issueDeps)
|
Find(&issueDeps)
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,7 @@ func (issue *Issue) toggleAssignee(sess *xorm.Session, doer *User, assigneeID in
|
||||||
return false, nil, fmt.Errorf("loadRepo: %v", err)
|
return false, nil, fmt.Errorf("loadRepo: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: CommentTypeAssignees,
|
Type: CommentTypeAssignees,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: issue.Repo,
|
Repo: issue.Repo,
|
||||||
|
@ -143,7 +143,6 @@ func (issue *Issue) toggleAssignee(sess *xorm.Session, doer *User, assigneeID in
|
||||||
|
|
||||||
// toggles user assignee state in database
|
// toggles user assignee state in database
|
||||||
func toggleUserAssignee(e *xorm.Session, issue *Issue, assigneeID int64) (removed bool, err error) {
|
func toggleUserAssignee(e *xorm.Session, issue *Issue, assigneeID int64) (removed bool, err error) {
|
||||||
|
|
||||||
// Check if the user exists
|
// Check if the user exists
|
||||||
assignee, err := getUserByID(e, assigneeID)
|
assignee, err := getUserByID(e, assigneeID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -180,7 +179,6 @@ func toggleUserAssignee(e *xorm.Session, issue *Issue, assigneeID int64) (remove
|
||||||
|
|
||||||
// MakeIDsFromAPIAssigneesToAdd returns an array with all assignee IDs
|
// MakeIDsFromAPIAssigneesToAdd returns an array with all assignee IDs
|
||||||
func MakeIDsFromAPIAssigneesToAdd(oneAssignee string, multipleAssignees []string) (assigneeIDs []int64, err error) {
|
func MakeIDsFromAPIAssigneesToAdd(oneAssignee string, multipleAssignees []string) (assigneeIDs []int64, err error) {
|
||||||
|
|
||||||
var requestAssignees []string
|
var requestAssignees []string
|
||||||
|
|
||||||
// Keeping the old assigning method for compatibility reasons
|
// Keeping the old assigning method for compatibility reasons
|
||||||
|
@ -188,7 +186,7 @@ func MakeIDsFromAPIAssigneesToAdd(oneAssignee string, multipleAssignees []string
|
||||||
requestAssignees = append(requestAssignees, oneAssignee)
|
requestAssignees = append(requestAssignees, oneAssignee)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Prevent empty assignees
|
// Prevent empty assignees
|
||||||
if len(multipleAssignees) > 0 && multipleAssignees[0] != "" {
|
if len(multipleAssignees) > 0 && multipleAssignees[0] != "" {
|
||||||
requestAssignees = append(requestAssignees, multipleAssignees...)
|
requestAssignees = append(requestAssignees, multipleAssignees...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -267,7 +267,6 @@ func (c *Comment) AfterDelete() {
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := DeleteAttachmentsByComment(c.ID, true)
|
_, err := DeleteAttachmentsByComment(c.ID, true)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Could not delete files for comment %d on issue #%d: %s", c.ID, c.IssueID, err)
|
log.Info("Could not delete files for comment %d on issue #%d: %s", c.ID, c.IssueID, err)
|
||||||
}
|
}
|
||||||
|
@ -391,7 +390,6 @@ func (c *Comment) LoadLabel() error {
|
||||||
|
|
||||||
// LoadProject if comment.Type is CommentTypeProject, then load project.
|
// LoadProject if comment.Type is CommentTypeProject, then load project.
|
||||||
func (c *Comment) LoadProject() error {
|
func (c *Comment) LoadProject() error {
|
||||||
|
|
||||||
if c.OldProjectID > 0 {
|
if c.OldProjectID > 0 {
|
||||||
var oldProject Project
|
var oldProject Project
|
||||||
has, err := x.ID(c.OldProjectID).Get(&oldProject)
|
has, err := x.ID(c.OldProjectID).Get(&oldProject)
|
||||||
|
@ -813,7 +811,7 @@ func createDeadlineComment(e *xorm.Session, doer *User, issue *Issue, newDeadlin
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: commentType,
|
Type: commentType,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: issue.Repo,
|
Repo: issue.Repo,
|
||||||
|
@ -828,7 +826,7 @@ func createDeadlineComment(e *xorm.Session, doer *User, issue *Issue, newDeadlin
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates issue dependency comment
|
// Creates issue dependency comment
|
||||||
func createIssueDependencyComment(e *xorm.Session, doer *User, issue *Issue, dependentIssue *Issue, add bool) (err error) {
|
func createIssueDependencyComment(e *xorm.Session, doer *User, issue, dependentIssue *Issue, add bool) (err error) {
|
||||||
cType := CommentTypeAddDependency
|
cType := CommentTypeAddDependency
|
||||||
if !add {
|
if !add {
|
||||||
cType = CommentTypeRemoveDependency
|
cType = CommentTypeRemoveDependency
|
||||||
|
@ -838,7 +836,7 @@ func createIssueDependencyComment(e *xorm.Session, doer *User, issue *Issue, dep
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make two comments, one in each issue
|
// Make two comments, one in each issue
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: cType,
|
Type: cType,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: issue.Repo,
|
Repo: issue.Repo,
|
||||||
|
@ -977,7 +975,7 @@ type FindCommentsOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts *FindCommentsOptions) toConds() builder.Cond {
|
func (opts *FindCommentsOptions) toConds() builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
if opts.RepoID > 0 {
|
if opts.RepoID > 0 {
|
||||||
cond = cond.And(builder.Eq{"issue.repo_id": opts.RepoID})
|
cond = cond.And(builder.Eq{"issue.repo_id": opts.RepoID})
|
||||||
}
|
}
|
||||||
|
@ -1149,7 +1147,7 @@ func findCodeComments(e Engine, opts FindCommentsOptions, issue *Issue, currentU
|
||||||
|
|
||||||
// Find all reviews by ReviewID
|
// Find all reviews by ReviewID
|
||||||
reviews := make(map[int64]*Review)
|
reviews := make(map[int64]*Review)
|
||||||
var ids = make([]int64, 0, len(comments))
|
ids := make([]int64, 0, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if comment.ReviewID != 0 {
|
if comment.ReviewID != 0 {
|
||||||
ids = append(ids, comment.ReviewID)
|
ids = append(ids, comment.ReviewID)
|
||||||
|
|
|
@ -24,9 +24,9 @@ func (comments CommentList) loadPosters(e Engine) error {
|
||||||
|
|
||||||
posterIDs := comments.getPosterIDs()
|
posterIDs := comments.getPosterIDs()
|
||||||
posterMaps := make(map[int64]*User, len(posterIDs))
|
posterMaps := make(map[int64]*User, len(posterIDs))
|
||||||
var left = len(posterIDs)
|
left := len(posterIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ func (comments CommentList) loadPosters(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (comments CommentList) getCommentIDs() []int64 {
|
func (comments CommentList) getCommentIDs() []int64 {
|
||||||
var ids = make([]int64, 0, len(comments))
|
ids := make([]int64, 0, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
ids = append(ids, comment.ID)
|
ids = append(ids, comment.ID)
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ func (comments CommentList) getCommentIDs() []int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (comments CommentList) getLabelIDs() []int64 {
|
func (comments CommentList) getLabelIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(comments))
|
ids := make(map[int64]struct{}, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if _, ok := ids[comment.LabelID]; !ok {
|
if _, ok := ids[comment.LabelID]; !ok {
|
||||||
ids[comment.LabelID] = struct{}{}
|
ids[comment.LabelID] = struct{}{}
|
||||||
|
@ -75,11 +75,11 @@ func (comments CommentList) loadLabels(e Engine) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var labelIDs = comments.getLabelIDs()
|
labelIDs := comments.getLabelIDs()
|
||||||
var commentLabels = make(map[int64]*Label, len(labelIDs))
|
commentLabels := make(map[int64]*Label, len(labelIDs))
|
||||||
var left = len(labelIDs)
|
left := len(labelIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,7 @@ func (comments CommentList) loadLabels(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (comments CommentList) getMilestoneIDs() []int64 {
|
func (comments CommentList) getMilestoneIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(comments))
|
ids := make(map[int64]struct{}, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if _, ok := ids[comment.MilestoneID]; !ok {
|
if _, ok := ids[comment.MilestoneID]; !ok {
|
||||||
ids[comment.MilestoneID] = struct{}{}
|
ids[comment.MilestoneID] = struct{}{}
|
||||||
|
@ -131,9 +131,9 @@ func (comments CommentList) loadMilestones(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
var left = len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -154,7 +154,7 @@ func (comments CommentList) loadMilestones(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (comments CommentList) getOldMilestoneIDs() []int64 {
|
func (comments CommentList) getOldMilestoneIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(comments))
|
ids := make(map[int64]struct{}, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if _, ok := ids[comment.OldMilestoneID]; !ok {
|
if _, ok := ids[comment.OldMilestoneID]; !ok {
|
||||||
ids[comment.OldMilestoneID] = struct{}{}
|
ids[comment.OldMilestoneID] = struct{}{}
|
||||||
|
@ -174,9 +174,9 @@ func (comments CommentList) loadOldMilestones(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
var left = len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -197,7 +197,7 @@ func (comments CommentList) loadOldMilestones(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (comments CommentList) getAssigneeIDs() []int64 {
|
func (comments CommentList) getAssigneeIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(comments))
|
ids := make(map[int64]struct{}, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if _, ok := ids[comment.AssigneeID]; !ok {
|
if _, ok := ids[comment.AssigneeID]; !ok {
|
||||||
ids[comment.AssigneeID] = struct{}{}
|
ids[comment.AssigneeID] = struct{}{}
|
||||||
|
@ -211,11 +211,11 @@ func (comments CommentList) loadAssignees(e Engine) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var assigneeIDs = comments.getAssigneeIDs()
|
assigneeIDs := comments.getAssigneeIDs()
|
||||||
var assignees = make(map[int64]*User, len(assigneeIDs))
|
assignees := make(map[int64]*User, len(assigneeIDs))
|
||||||
var left = len(assigneeIDs)
|
left := len(assigneeIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -250,7 +250,7 @@ func (comments CommentList) loadAssignees(e Engine) error {
|
||||||
|
|
||||||
// getIssueIDs returns all the issue ids on this comment list which issue hasn't been loaded
|
// getIssueIDs returns all the issue ids on this comment list which issue hasn't been loaded
|
||||||
func (comments CommentList) getIssueIDs() []int64 {
|
func (comments CommentList) getIssueIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(comments))
|
ids := make(map[int64]struct{}, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if comment.Issue != nil {
|
if comment.Issue != nil {
|
||||||
continue
|
continue
|
||||||
|
@ -264,7 +264,7 @@ func (comments CommentList) getIssueIDs() []int64 {
|
||||||
|
|
||||||
// Issues returns all the issues of comments
|
// Issues returns all the issues of comments
|
||||||
func (comments CommentList) Issues() IssueList {
|
func (comments CommentList) Issues() IssueList {
|
||||||
var issues = make(map[int64]*Issue, len(comments))
|
issues := make(map[int64]*Issue, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if comment.Issue != nil {
|
if comment.Issue != nil {
|
||||||
if _, ok := issues[comment.Issue.ID]; !ok {
|
if _, ok := issues[comment.Issue.ID]; !ok {
|
||||||
|
@ -273,7 +273,7 @@ func (comments CommentList) Issues() IssueList {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var issueList = make([]*Issue, 0, len(issues))
|
issueList := make([]*Issue, 0, len(issues))
|
||||||
for _, issue := range issues {
|
for _, issue := range issues {
|
||||||
issueList = append(issueList, issue)
|
issueList = append(issueList, issue)
|
||||||
}
|
}
|
||||||
|
@ -285,11 +285,11 @@ func (comments CommentList) loadIssues(e Engine) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var issueIDs = comments.getIssueIDs()
|
issueIDs := comments.getIssueIDs()
|
||||||
var issues = make(map[int64]*Issue, len(issueIDs))
|
issues := make(map[int64]*Issue, len(issueIDs))
|
||||||
var left = len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -325,7 +325,7 @@ func (comments CommentList) loadIssues(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (comments CommentList) getDependentIssueIDs() []int64 {
|
func (comments CommentList) getDependentIssueIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(comments))
|
ids := make(map[int64]struct{}, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if comment.DependentIssue != nil {
|
if comment.DependentIssue != nil {
|
||||||
continue
|
continue
|
||||||
|
@ -342,11 +342,11 @@ func (comments CommentList) loadDependentIssues(e Engine) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var issueIDs = comments.getDependentIssueIDs()
|
issueIDs := comments.getDependentIssueIDs()
|
||||||
var issues = make(map[int64]*Issue, len(issueIDs))
|
issues := make(map[int64]*Issue, len(issueIDs))
|
||||||
var left = len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -391,11 +391,11 @@ func (comments CommentList) loadAttachments(e Engine) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var attachments = make(map[int64][]*Attachment, len(comments))
|
attachments := make(map[int64][]*Attachment, len(comments))
|
||||||
var commentsIDs = comments.getCommentIDs()
|
commentsIDs := comments.getCommentIDs()
|
||||||
var left = len(commentsIDs)
|
left := len(commentsIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -429,7 +429,7 @@ func (comments CommentList) loadAttachments(e Engine) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (comments CommentList) getReviewIDs() []int64 {
|
func (comments CommentList) getReviewIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(comments))
|
ids := make(map[int64]struct{}, len(comments))
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
if _, ok := ids[comment.ReviewID]; !ok {
|
if _, ok := ids[comment.ReviewID]; !ok {
|
||||||
ids[comment.ReviewID] = struct{}{}
|
ids[comment.ReviewID] = struct{}{}
|
||||||
|
@ -443,11 +443,11 @@ func (comments CommentList) loadReviews(e Engine) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var reviewIDs = comments.getReviewIDs()
|
reviewIDs := comments.getReviewIDs()
|
||||||
var reviews = make(map[int64]*Review, len(reviewIDs))
|
reviews := make(map[int64]*Review, len(reviewIDs))
|
||||||
var left = len(reviewIDs)
|
left := len(reviewIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ func CreateIssueDependency(user *User, issue, dep *Issue) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveIssueDependency removes a dependency from an issue
|
// RemoveIssueDependency removes a dependency from an issue
|
||||||
func RemoveIssueDependency(user *User, issue *Issue, dep *Issue, depType DependencyType) (err error) {
|
func RemoveIssueDependency(user *User, issue, dep *Issue, depType DependencyType) (err error) {
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
if err = sess.Begin(); err != nil {
|
if err = sess.Begin(); err != nil {
|
||||||
|
@ -107,7 +107,7 @@ func RemoveIssueDependency(user *User, issue *Issue, dep *Issue, depType Depende
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the dependency already exists
|
// Check if the dependency already exists
|
||||||
func issueDepExists(e Engine, issueID int64, depID int64) (bool, error) {
|
func issueDepExists(e Engine, issueID, depID int64) (bool, error) {
|
||||||
return e.Where("(issue_id = ? AND dependency_id = ?)", issueID, depID).Exist(&IssueDependency{})
|
return e.Where("(issue_id = ? AND dependency_id = ?)", issueID, depID).Exist(&IssueDependency{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -256,7 +256,6 @@ func UpdateLabel(l *Label) error {
|
||||||
|
|
||||||
// DeleteLabel delete a label
|
// DeleteLabel delete a label
|
||||||
func DeleteLabel(id, labelID int64) error {
|
func DeleteLabel(id, labelID int64) error {
|
||||||
|
|
||||||
label, err := GetLabelByID(labelID)
|
label, err := GetLabelByID(labelID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if IsErrLabelNotExist(err) {
|
if IsErrLabelNotExist(err) {
|
||||||
|
@ -646,7 +645,7 @@ func newIssueLabel(e *xorm.Session, issue *Issue, label *Label, doer *User) (err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: CommentTypeLabel,
|
Type: CommentTypeLabel,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: issue.Repo,
|
Repo: issue.Repo,
|
||||||
|
@ -748,7 +747,7 @@ func deleteIssueLabel(e *xorm.Session, issue *Issue, label *Label, doer *User) (
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: CommentTypeLabel,
|
Type: CommentTypeLabel,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: issue.Repo,
|
Repo: issue.Repo,
|
||||||
|
|
|
@ -231,7 +231,6 @@ func TestGetLabelsByOrgID(t *testing.T) {
|
||||||
|
|
||||||
_, err = GetLabelsByOrgID(-1, "leastissues", ListOptions{})
|
_, err = GetLabelsByOrgID(-1, "leastissues", ListOptions{})
|
||||||
assert.True(t, IsErrOrgLabelNotExist(err))
|
assert.True(t, IsErrOrgLabelNotExist(err))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
|
@ -35,9 +35,9 @@ func (issues IssueList) loadRepositories(e Engine) ([]*Repository, error) {
|
||||||
|
|
||||||
repoIDs := issues.getRepoIDs()
|
repoIDs := issues.getRepoIDs()
|
||||||
repoMaps := make(map[int64]*Repository, len(repoIDs))
|
repoMaps := make(map[int64]*Repository, len(repoIDs))
|
||||||
var left = len(repoIDs)
|
left := len(repoIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -79,9 +79,9 @@ func (issues IssueList) loadPosters(e Engine) error {
|
||||||
|
|
||||||
posterIDs := issues.getPosterIDs()
|
posterIDs := issues.getPosterIDs()
|
||||||
posterMaps := make(map[int64]*User, len(posterIDs))
|
posterMaps := make(map[int64]*User, len(posterIDs))
|
||||||
var left = len(posterIDs)
|
left := len(posterIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ func (issues IssueList) loadPosters(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (issues IssueList) getIssueIDs() []int64 {
|
func (issues IssueList) getIssueIDs() []int64 {
|
||||||
var ids = make([]int64, 0, len(issues))
|
ids := make([]int64, 0, len(issues))
|
||||||
for _, issue := range issues {
|
for _, issue := range issues {
|
||||||
ids = append(ids, issue.ID)
|
ids = append(ids, issue.ID)
|
||||||
}
|
}
|
||||||
|
@ -125,11 +125,11 @@ func (issues IssueList) loadLabels(e Engine) error {
|
||||||
IssueLabel *IssueLabel `xorm:"extends"`
|
IssueLabel *IssueLabel `xorm:"extends"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var issueLabels = make(map[int64][]*Label, len(issues)*3)
|
issueLabels := make(map[int64][]*Label, len(issues)*3)
|
||||||
var issueIDs = issues.getIssueIDs()
|
issueIDs := issues.getIssueIDs()
|
||||||
var left = len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,7 @@ func (issues IssueList) loadLabels(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (issues IssueList) getMilestoneIDs() []int64 {
|
func (issues IssueList) getMilestoneIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(issues))
|
ids := make(map[int64]struct{}, len(issues))
|
||||||
for _, issue := range issues {
|
for _, issue := range issues {
|
||||||
if _, ok := ids[issue.MilestoneID]; !ok {
|
if _, ok := ids[issue.MilestoneID]; !ok {
|
||||||
ids[issue.MilestoneID] = struct{}{}
|
ids[issue.MilestoneID] = struct{}{}
|
||||||
|
@ -185,9 +185,9 @@ func (issues IssueList) loadMilestones(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
var left = len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -217,11 +217,11 @@ func (issues IssueList) loadAssignees(e Engine) error {
|
||||||
Assignee *User `xorm:"extends"`
|
Assignee *User `xorm:"extends"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var assignees = make(map[int64][]*User, len(issues))
|
assignees := make(map[int64][]*User, len(issues))
|
||||||
var issueIDs = issues.getIssueIDs()
|
issueIDs := issues.getIssueIDs()
|
||||||
var left = len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -259,7 +259,7 @@ func (issues IssueList) loadAssignees(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (issues IssueList) getPullIssueIDs() []int64 {
|
func (issues IssueList) getPullIssueIDs() []int64 {
|
||||||
var ids = make([]int64, 0, len(issues))
|
ids := make([]int64, 0, len(issues))
|
||||||
for _, issue := range issues {
|
for _, issue := range issues {
|
||||||
if issue.IsPull && issue.PullRequest == nil {
|
if issue.IsPull && issue.PullRequest == nil {
|
||||||
ids = append(ids, issue.ID)
|
ids = append(ids, issue.ID)
|
||||||
|
@ -275,9 +275,9 @@ func (issues IssueList) loadPullRequests(e Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs))
|
pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs))
|
||||||
var left = len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -317,11 +317,11 @@ func (issues IssueList) loadAttachments(e Engine) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var attachments = make(map[int64][]*Attachment, len(issues))
|
attachments := make(map[int64][]*Attachment, len(issues))
|
||||||
var issuesIDs = issues.getIssueIDs()
|
issuesIDs := issues.getIssueIDs()
|
||||||
var left = len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -362,11 +362,11 @@ func (issues IssueList) loadComments(e Engine, cond builder.Cond) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var comments = make(map[int64][]*Comment, len(issues))
|
comments := make(map[int64][]*Comment, len(issues))
|
||||||
var issuesIDs = issues.getIssueIDs()
|
issuesIDs := issues.getIssueIDs()
|
||||||
var left = len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -411,18 +411,18 @@ func (issues IssueList) loadTotalTrackedTimes(e Engine) (err error) {
|
||||||
if len(issues) == 0 {
|
if len(issues) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var trackedTimes = make(map[int64]int64, len(issues))
|
trackedTimes := make(map[int64]int64, len(issues))
|
||||||
|
|
||||||
var ids = make([]int64, 0, len(issues))
|
ids := make([]int64, 0, len(issues))
|
||||||
for _, issue := range issues {
|
for _, issue := range issues {
|
||||||
if issue.Repo.IsTimetrackerEnabled() {
|
if issue.Repo.IsTimetrackerEnabled() {
|
||||||
ids = append(ids, issue.ID)
|
ids = append(ids, issue.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var left = len(ids)
|
left := len(ids)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ func updateIssueLock(opts *IssueLockOptions, lock bool) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var opt = &CreateCommentOptions{
|
opt := &CreateCommentOptions{
|
||||||
Doer: opts.Doer,
|
Doer: opts.Doer,
|
||||||
Issue: opts.Issue,
|
Issue: opts.Issue,
|
||||||
Repo: opts.Issue.Repo,
|
Repo: opts.Issue.Repo,
|
||||||
|
|
|
@ -282,7 +282,7 @@ func changeMilestoneAssign(e *xorm.Session, doer *User, issue *Issue, oldMilesto
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: CommentTypeMilestone,
|
Type: CommentTypeMilestone,
|
||||||
Doer: doer,
|
Doer: doer,
|
||||||
Repo: issue.Repo,
|
Repo: issue.Repo,
|
||||||
|
@ -366,7 +366,7 @@ func DeleteMilestoneByRepoID(repoID, id int64) error {
|
||||||
type MilestoneList []*Milestone
|
type MilestoneList []*Milestone
|
||||||
|
|
||||||
func (milestones MilestoneList) getMilestoneIDs() []int64 {
|
func (milestones MilestoneList) getMilestoneIDs() []int64 {
|
||||||
var ids = make([]int64, 0, len(milestones))
|
ids := make([]int64, 0, len(milestones))
|
||||||
for _, ms := range milestones {
|
for _, ms := range milestones {
|
||||||
ids = append(ids, ms.ID)
|
ids = append(ids, ms.ID)
|
||||||
}
|
}
|
||||||
|
@ -596,7 +596,7 @@ func (milestones MilestoneList) loadTotalTrackedTimes(e Engine) error {
|
||||||
if len(milestones) == 0 {
|
if len(milestones) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var trackedTimes = make(map[int64]int64, len(milestones))
|
trackedTimes := make(map[int64]int64, len(milestones))
|
||||||
|
|
||||||
// Get total tracked time by milestone_id
|
// Get total tracked time by milestone_id
|
||||||
rows, err := e.Table("issue").
|
rows, err := e.Table("issue").
|
||||||
|
|
|
@ -38,14 +38,14 @@ type FindReactionsOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts *FindReactionsOptions) toConds() builder.Cond {
|
func (opts *FindReactionsOptions) toConds() builder.Cond {
|
||||||
//If Issue ID is set add to Query
|
// If Issue ID is set add to Query
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
if opts.IssueID > 0 {
|
if opts.IssueID > 0 {
|
||||||
cond = cond.And(builder.Eq{"reaction.issue_id": opts.IssueID})
|
cond = cond.And(builder.Eq{"reaction.issue_id": opts.IssueID})
|
||||||
}
|
}
|
||||||
//If CommentID is > 0 add to Query
|
// If CommentID is > 0 add to Query
|
||||||
//If it is 0 Query ignore CommentID to select
|
// If it is 0 Query ignore CommentID to select
|
||||||
//If it is -1 it explicit search of Issue Reactions where CommentID = 0
|
// If it is -1 it explicit search of Issue Reactions where CommentID = 0
|
||||||
if opts.CommentID > 0 {
|
if opts.CommentID > 0 {
|
||||||
cond = cond.And(builder.Eq{"reaction.comment_id": opts.CommentID})
|
cond = cond.And(builder.Eq{"reaction.comment_id": opts.CommentID})
|
||||||
} else if opts.CommentID == -1 {
|
} else if opts.CommentID == -1 {
|
||||||
|
@ -68,7 +68,8 @@ func (opts *FindReactionsOptions) toConds() builder.Cond {
|
||||||
func FindCommentReactions(comment *Comment) (ReactionList, error) {
|
func FindCommentReactions(comment *Comment) (ReactionList, error) {
|
||||||
return findReactions(x, FindReactionsOptions{
|
return findReactions(x, FindReactionsOptions{
|
||||||
IssueID: comment.IssueID,
|
IssueID: comment.IssueID,
|
||||||
CommentID: comment.ID})
|
CommentID: comment.ID,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindIssueReactions returns a ReactionList of all reactions from an issue
|
// FindIssueReactions returns a ReactionList of all reactions from an issue
|
||||||
|
@ -260,7 +261,7 @@ func (list ReactionList) HasUser(userID int64) bool {
|
||||||
|
|
||||||
// GroupByType returns reactions grouped by type
|
// GroupByType returns reactions grouped by type
|
||||||
func (list ReactionList) GroupByType() map[string]ReactionList {
|
func (list ReactionList) GroupByType() map[string]ReactionList {
|
||||||
var reactions = make(map[string]ReactionList)
|
reactions := make(map[string]ReactionList)
|
||||||
for _, reaction := range list {
|
for _, reaction := range list {
|
||||||
reactions[reaction.Type] = append(reactions[reaction.Type], reaction)
|
reactions[reaction.Type] = append(reactions[reaction.Type], reaction)
|
||||||
}
|
}
|
||||||
|
@ -314,7 +315,7 @@ func (list ReactionList) LoadUsers(repo *Repository) ([]*User, error) {
|
||||||
// GetFirstUsers returns first reacted user display names separated by comma
|
// GetFirstUsers returns first reacted user display names separated by comma
|
||||||
func (list ReactionList) GetFirstUsers() string {
|
func (list ReactionList) GetFirstUsers() string {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
var rem = setting.UI.ReactionMaxUserNum
|
rem := setting.UI.ReactionMaxUserNum
|
||||||
for _, reaction := range list {
|
for _, reaction := range list {
|
||||||
if buffer.Len() > 0 {
|
if buffer.Len() > 0 {
|
||||||
buffer.WriteString(", ")
|
buffer.WriteString(", ")
|
||||||
|
|
|
@ -54,7 +54,7 @@ func GetUserStopwatches(userID int64, listOptions ListOptions) ([]*Stopwatch, er
|
||||||
}
|
}
|
||||||
|
|
||||||
// StopwatchExists returns true if the stopwatch exists
|
// StopwatchExists returns true if the stopwatch exists
|
||||||
func StopwatchExists(userID int64, issueID int64) bool {
|
func StopwatchExists(userID, issueID int64) bool {
|
||||||
_, exists, _ := getStopwatch(x, userID, issueID)
|
_, exists, _ := getStopwatch(x, userID, issueID)
|
||||||
return exists
|
return exists
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ func CreateOrStopIssueStopwatch(user *User, issue *Issue) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
//if another stopwatch is running: stop it
|
// if another stopwatch is running: stop it
|
||||||
exists, sw, err := HasUserStopwatch(user.ID)
|
exists, sw, err := HasUserStopwatch(user.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -47,7 +47,7 @@ func TestIssueAPIURL(t *testing.T) {
|
||||||
|
|
||||||
func TestGetIssuesByIDs(t *testing.T) {
|
func TestGetIssuesByIDs(t *testing.T) {
|
||||||
assert.NoError(t, PrepareTestDatabase())
|
assert.NoError(t, PrepareTestDatabase())
|
||||||
testSuccess := func(expectedIssueIDs []int64, nonExistentIssueIDs []int64) {
|
testSuccess := func(expectedIssueIDs, nonExistentIssueIDs []int64) {
|
||||||
issues, err := GetIssuesByIDs(append(expectedIssueIDs, nonExistentIssueIDs...))
|
issues, err := GetIssuesByIDs(append(expectedIssueIDs, nonExistentIssueIDs...))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
actualIssueIDs := make([]int64, len(issues))
|
actualIssueIDs := make([]int64, len(issues))
|
||||||
|
@ -55,7 +55,6 @@ func TestGetIssuesByIDs(t *testing.T) {
|
||||||
actualIssueIDs[i] = issue.ID
|
actualIssueIDs[i] = issue.ID
|
||||||
}
|
}
|
||||||
assert.Equal(t, expectedIssueIDs, actualIssueIDs)
|
assert.Equal(t, expectedIssueIDs, actualIssueIDs)
|
||||||
|
|
||||||
}
|
}
|
||||||
testSuccess([]int64{1, 2, 3}, []int64{})
|
testSuccess([]int64{1, 2, 3}, []int64{})
|
||||||
testSuccess([]int64{1, 2, 3}, []int64{NonexistentID})
|
testSuccess([]int64{1, 2, 3}, []int64{NonexistentID})
|
||||||
|
@ -87,7 +86,7 @@ func TestGetParticipantIDsByIssue(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue_ClearLabels(t *testing.T) {
|
func TestIssue_ClearLabels(t *testing.T) {
|
||||||
var tests = []struct {
|
tests := []struct {
|
||||||
issueID int64
|
issueID int64
|
||||||
doerID int64
|
doerID int64
|
||||||
}{
|
}{
|
||||||
|
@ -342,7 +341,7 @@ func testInsertIssue(t *testing.T, title, content string) {
|
||||||
repo := AssertExistsAndLoadBean(t, &Repository{ID: 1}).(*Repository)
|
repo := AssertExistsAndLoadBean(t, &Repository{ID: 1}).(*Repository)
|
||||||
user := AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
|
user := AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
|
||||||
|
|
||||||
var issue = Issue{
|
issue := Issue{
|
||||||
RepoID: repo.ID,
|
RepoID: repo.ID,
|
||||||
PosterID: user.ID,
|
PosterID: user.ID,
|
||||||
Title: title,
|
Title: title,
|
||||||
|
|
|
@ -193,14 +193,14 @@ func TotalTimes(options FindTrackedTimesOptions) (map[*User]string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
//Adding total time per user ID
|
// Adding total time per user ID
|
||||||
totalTimesByUser := make(map[int64]int64)
|
totalTimesByUser := make(map[int64]int64)
|
||||||
for _, t := range trackedTimes {
|
for _, t := range trackedTimes {
|
||||||
totalTimesByUser[t.UserID] += t.Time
|
totalTimesByUser[t.UserID] += t.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
totalTimes := make(map[*User]string)
|
totalTimes := make(map[*User]string)
|
||||||
//Fetching User and making time human readable
|
// Fetching User and making time human readable
|
||||||
for userID, total := range totalTimesByUser {
|
for userID, total := range totalTimesByUser {
|
||||||
user, err := GetUserByID(userID)
|
user, err := GetUserByID(userID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -283,7 +283,6 @@ func DeleteTime(t *TrackedTime) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteTimes(e Engine, opts FindTrackedTimesOptions) (removedTime int64, err error) {
|
func deleteTimes(e Engine, opts FindTrackedTimesOptions) (removedTime int64, err error) {
|
||||||
|
|
||||||
removedTime, err = getTrackedSeconds(e, opts)
|
removedTime, err = getTrackedSeconds(e, opts)
|
||||||
if err != nil || removedTime == 0 {
|
if err != nil || removedTime == 0 {
|
||||||
return
|
return
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestAddTime(t *testing.T) {
|
||||||
issue1, err := GetIssueByID(1)
|
issue1, err := GetIssueByID(1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
//3661 = 1h 1min 1s
|
// 3661 = 1h 1min 1s
|
||||||
trackedTime, err := AddTime(user3, issue1, 3661, time.Now())
|
trackedTime, err := AddTime(user3, issue1, 3661, time.Now())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, int64(3), trackedTime.UserID)
|
assert.Equal(t, int64(3), trackedTime.UserID)
|
||||||
|
|
|
@ -119,7 +119,7 @@ func getIssueWatchers(e Engine, issueID int64, listOptions ListOptions) (IssueWa
|
||||||
return watches, sess.Find(&watches)
|
return watches, sess.Find(&watches)
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeIssueWatchersByRepoID(e Engine, userID int64, repoID int64) error {
|
func removeIssueWatchersByRepoID(e Engine, userID, repoID int64) error {
|
||||||
_, err := e.
|
_, err := e.
|
||||||
Join("INNER", "issue", "`issue`.id = `issue_watch`.issue_id AND `issue`.repo_id = ?", repoID).
|
Join("INNER", "issue", "`issue`.id = `issue_watch`.issue_id AND `issue`.repo_id = ?", repoID).
|
||||||
Where("`issue_watch`.user_id = ?", userID).
|
Where("`issue_watch`.user_id = ?", userID).
|
||||||
|
|
|
@ -27,7 +27,7 @@ type crossReferencesContext struct {
|
||||||
RemoveOld bool
|
RemoveOld bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func findOldCrossReferences(e Engine, issueID int64, commentID int64) ([]*Comment, error) {
|
func findOldCrossReferences(e Engine, issueID, commentID int64) ([]*Comment, error) {
|
||||||
active := make([]*Comment, 0, 10)
|
active := make([]*Comment, 0, 10)
|
||||||
return active, e.Where("`ref_action` IN (?, ?, ?)", references.XRefActionNone, references.XRefActionCloses, references.XRefActionReopens).
|
return active, e.Where("`ref_action` IN (?, ?, ?)", references.XRefActionNone, references.XRefActionCloses, references.XRefActionReopens).
|
||||||
And("`ref_issue_id` = ?", issueID).
|
And("`ref_issue_id` = ?", issueID).
|
||||||
|
@ -35,7 +35,7 @@ func findOldCrossReferences(e Engine, issueID int64, commentID int64) ([]*Commen
|
||||||
Find(&active)
|
Find(&active)
|
||||||
}
|
}
|
||||||
|
|
||||||
func neuterCrossReferences(e Engine, issueID int64, commentID int64) error {
|
func neuterCrossReferences(e Engine, issueID, commentID int64) error {
|
||||||
active, err := findOldCrossReferences(e, issueID, commentID)
|
active, err := findOldCrossReferences(e, issueID, commentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -115,7 +115,7 @@ func (issue *Issue) createCrossReferences(e *xorm.Session, ctx *crossReferencesC
|
||||||
if ctx.OrigComment != nil {
|
if ctx.OrigComment != nil {
|
||||||
refCommentID = ctx.OrigComment.ID
|
refCommentID = ctx.OrigComment.ID
|
||||||
}
|
}
|
||||||
var opts = &CreateCommentOptions{
|
opts := &CreateCommentOptions{
|
||||||
Type: ctx.Type,
|
Type: ctx.Type,
|
||||||
Doer: ctx.Doer,
|
Doer: ctx.Doer,
|
||||||
Repo: xref.Issue.Repo,
|
Repo: xref.Issue.Repo,
|
||||||
|
@ -194,7 +194,6 @@ func (issue *Issue) updateCrossReferenceList(list []*crossReference, xref *cross
|
||||||
// verifyReferencedIssue will check if the referenced issue exists, and whether the doer has permission to do what
|
// verifyReferencedIssue will check if the referenced issue exists, and whether the doer has permission to do what
|
||||||
func (issue *Issue) verifyReferencedIssue(e Engine, ctx *crossReferencesContext, repo *Repository,
|
func (issue *Issue) verifyReferencedIssue(e Engine, ctx *crossReferencesContext, repo *Repository,
|
||||||
ref references.IssueReference) (*Issue, references.XRefAction, error) {
|
ref references.IssueReference) (*Issue, references.XRefAction, error) {
|
||||||
|
|
||||||
refIssue := &Issue{RepoID: repo.ID, Index: ref.Index}
|
refIssue := &Issue{RepoID: repo.ID, Index: ref.Index}
|
||||||
refAction := ref.Action
|
refAction := ref.Action
|
||||||
|
|
||||||
|
|
|
@ -49,11 +49,9 @@ type LFSTokenResponse struct {
|
||||||
Href string `json:"href"`
|
Href string `json:"href"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
// ErrLFSObjectNotExist is returned from lfs models functions in order
|
||||||
// ErrLFSObjectNotExist is returned from lfs models functions in order
|
// to differentiate between database and missing object errors.
|
||||||
// to differentiate between database and missing object errors.
|
var ErrLFSObjectNotExist = errors.New("LFS Meta object does not exist")
|
||||||
ErrLFSObjectNotExist = errors.New("LFS Meta object does not exist")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// LFSMetaFileIdentifier is the string appearing at the first line of LFS pointer files.
|
// LFSMetaFileIdentifier is the string appearing at the first line of LFS pointer files.
|
||||||
|
@ -218,7 +216,7 @@ func IterateLFS(f func(mo *LFSMetaObject) error) error {
|
||||||
var start int
|
var start int
|
||||||
const batchSize = 100
|
const batchSize = 100
|
||||||
for {
|
for {
|
||||||
var mos = make([]*LFSMetaObject, 0, batchSize)
|
mos := make([]*LFSMetaObject, 0, batchSize)
|
||||||
if err := x.Limit(batchSize, start).Find(&mos); err != nil {
|
if err := x.Limit(batchSize, start).Find(&mos); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,7 +138,7 @@ func DeleteLFSLockByID(id int64, u *User, force bool) (*LFSLock, error) {
|
||||||
return lock, err
|
return lock, err
|
||||||
}
|
}
|
||||||
|
|
||||||
//CheckLFSAccessForRepo check needed access mode base on action
|
// CheckLFSAccessForRepo check needed access mode base on action
|
||||||
func CheckLFSAccessForRepo(u *User, repo *Repository, mode AccessMode) error {
|
func CheckLFSAccessForRepo(u *User, repo *Repository, mode AccessMode) error {
|
||||||
if u == nil {
|
if u == nil {
|
||||||
return ErrLFSUnauthorizedAction{repo.ID, "undefined", mode}
|
return ErrLFSUnauthorizedAction{repo.ID, "undefined", mode}
|
||||||
|
|
|
@ -477,7 +477,7 @@ func LoginViaLDAP(user *User, login, password string, source *LoginSource) (*Use
|
||||||
return nil, ErrUserNotExist{0, login, 0}
|
return nil, ErrUserNotExist{0, login, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
var isAttributeSSHPublicKeySet = len(strings.TrimSpace(source.LDAP().AttributeSSHPublicKey)) > 0
|
isAttributeSSHPublicKeySet := len(strings.TrimSpace(source.LDAP().AttributeSSHPublicKey)) > 0
|
||||||
|
|
||||||
// Update User admin flag if exist
|
// Update User admin flag if exist
|
||||||
if isExist, err := IsUserExist(0, sr.Username); err != nil {
|
if isExist, err := IsUserExist(0, sr.Username); err != nil {
|
||||||
|
|
|
@ -55,8 +55,8 @@ func insertIssue(sess *xorm.Session, issue *Issue) error {
|
||||||
if _, err := sess.NoAutoTime().Insert(issue); err != nil {
|
if _, err := sess.NoAutoTime().Insert(issue); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var issueLabels = make([]IssueLabel, 0, len(issue.Labels))
|
issueLabels := make([]IssueLabel, 0, len(issue.Labels))
|
||||||
var labelIDs = make([]int64, 0, len(issue.Labels))
|
labelIDs := make([]int64, 0, len(issue.Labels))
|
||||||
for _, label := range issue.Labels {
|
for _, label := range issue.Labels {
|
||||||
issueLabels = append(issueLabels, IssueLabel{
|
issueLabels = append(issueLabels, IssueLabel{
|
||||||
IssueID: issue.ID,
|
IssueID: issue.ID,
|
||||||
|
@ -137,7 +137,7 @@ func InsertIssueComments(comments []*Comment) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var issueIDs = make(map[int64]bool)
|
issueIDs := make(map[int64]bool)
|
||||||
for _, comment := range comments {
|
for _, comment := range comments {
|
||||||
issueIDs[comment.IssueID] = true
|
issueIDs[comment.IssueID] = true
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ func updateMigrationServiceTypes(x *xorm.Engine) error {
|
||||||
var last int
|
var last int
|
||||||
const batchSize = 50
|
const batchSize = 50
|
||||||
for {
|
for {
|
||||||
var results = make([]Repository, 0, batchSize)
|
results := make([]Repository, 0, batchSize)
|
||||||
err := x.Where("original_url <> '' AND original_url IS NOT NULL").
|
err := x.Where("original_url <> '' AND original_url IS NOT NULL").
|
||||||
And("original_service_type = 0 OR original_service_type IS NULL").
|
And("original_service_type = 0 OR original_service_type IS NULL").
|
||||||
OrderBy("id").
|
OrderBy("id").
|
||||||
|
@ -48,7 +48,7 @@ func updateMigrationServiceTypes(x *xorm.Engine) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var serviceType = PlainGitService
|
serviceType := PlainGitService
|
||||||
if strings.EqualFold(u.Host, "github.com") {
|
if strings.EqualFold(u.Host, "github.com") {
|
||||||
serviceType = GithubService
|
serviceType = GithubService
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func removeLabelUneededCols(x *xorm.Engine) error {
|
func removeLabelUneededCols(x *xorm.Engine) error {
|
||||||
|
|
||||||
// Make sure the columns exist before dropping them
|
// Make sure the columns exist before dropping them
|
||||||
type Label struct {
|
type Label struct {
|
||||||
QueryString string
|
QueryString string
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func addTeamIncludesAllRepositories(x *xorm.Engine) error {
|
func addTeamIncludesAllRepositories(x *xorm.Engine) error {
|
||||||
|
|
||||||
type Team struct {
|
type Team struct {
|
||||||
ID int64 `xorm:"pk autoincr"`
|
ID int64 `xorm:"pk autoincr"`
|
||||||
IncludesAllRepositories bool `xorm:"NOT NULL DEFAULT false"`
|
IncludesAllRepositories bool `xorm:"NOT NULL DEFAULT false"`
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func addTemplateToRepo(x *xorm.Engine) error {
|
func addTemplateToRepo(x *xorm.Engine) error {
|
||||||
|
|
||||||
type Repository struct {
|
type Repository struct {
|
||||||
IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"`
|
IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"`
|
||||||
TemplateID int64 `xorm:"INDEX"`
|
TemplateID int64 `xorm:"INDEX"`
|
||||||
|
|
|
@ -380,7 +380,7 @@ func addBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error {
|
||||||
}
|
}
|
||||||
totalPages := totalIssues / pageSize
|
totalPages := totalIssues / pageSize
|
||||||
|
|
||||||
var executeBody = func(page, pageSize int64) error {
|
executeBody := func(page, pageSize int64) error {
|
||||||
// Find latest review of each user in each pull request, and set official field if appropriate
|
// Find latest review of each user in each pull request, and set official field if appropriate
|
||||||
reviews := []*Review{}
|
reviews := []*Review{}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func sanitizeOriginalURL(x *xorm.Engine) error {
|
func sanitizeOriginalURL(x *xorm.Engine) error {
|
||||||
|
|
||||||
type Repository struct {
|
type Repository struct {
|
||||||
ID int64
|
ID int64
|
||||||
OriginalURL string `xorm:"VARCHAR(2048)"`
|
OriginalURL string `xorm:"VARCHAR(2048)"`
|
||||||
|
@ -20,7 +19,7 @@ func sanitizeOriginalURL(x *xorm.Engine) error {
|
||||||
var last int
|
var last int
|
||||||
const batchSize = 50
|
const batchSize = 50
|
||||||
for {
|
for {
|
||||||
var results = make([]Repository, 0, batchSize)
|
results := make([]Repository, 0, batchSize)
|
||||||
err := x.Where("original_url <> '' AND original_url IS NOT NULL").
|
err := x.Where("original_url <> '' AND original_url IS NOT NULL").
|
||||||
And("original_service_type = 0 OR original_service_type IS NULL").
|
And("original_service_type = 0 OR original_service_type IS NULL").
|
||||||
OrderBy("id").
|
OrderBy("id").
|
||||||
|
|
|
@ -151,7 +151,7 @@ func copyOldAvatarToNewLocation(userID int64, oldAvatar string) (string, error)
|
||||||
return newAvatar, nil
|
return newAvatar, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ioutil.WriteFile(filepath.Join(setting.Avatar.Path, newAvatar), data, 0666); err != nil {
|
if err := ioutil.WriteFile(filepath.Join(setting.Avatar.Path, newAvatar), data, 0o666); err != nil {
|
||||||
return "", fmt.Errorf("ioutil.WriteFile: %v", err)
|
return "", fmt.Errorf("ioutil.WriteFile: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func extendTrackedTimes(x *xorm.Engine) error {
|
func extendTrackedTimes(x *xorm.Engine) error {
|
||||||
|
|
||||||
type TrackedTime struct {
|
type TrackedTime struct {
|
||||||
Time int64 `xorm:"NOT NULL"`
|
Time int64 `xorm:"NOT NULL"`
|
||||||
Deleted bool `xorm:"NOT NULL DEFAULT false"`
|
Deleted bool `xorm:"NOT NULL DEFAULT false"`
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func addRequireSignedCommits(x *xorm.Engine) error {
|
func addRequireSignedCommits(x *xorm.Engine) error {
|
||||||
|
|
||||||
type ProtectedBranch struct {
|
type ProtectedBranch struct {
|
||||||
RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"`
|
RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func addUserRepoMissingColumns(x *xorm.Engine) error {
|
func addUserRepoMissingColumns(x *xorm.Engine) error {
|
||||||
|
|
||||||
type VisibleType int
|
type VisibleType int
|
||||||
type User struct {
|
type User struct {
|
||||||
PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'pbkdf2'"`
|
PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'pbkdf2'"`
|
||||||
|
|
|
@ -40,7 +40,7 @@ func fixMergeBase(x *xorm.Engine) error {
|
||||||
MergedCommitID string `xorm:"VARCHAR(40)"`
|
MergedCommitID string `xorm:"VARCHAR(40)"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var limit = setting.Database.IterateBufferSize
|
limit := setting.Database.IterateBufferSize
|
||||||
if limit <= 0 {
|
if limit <= 0 {
|
||||||
limit = 50
|
limit = 50
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func purgeUnusedDependencies(x *xorm.Engine) error {
|
func purgeUnusedDependencies(x *xorm.Engine) error {
|
||||||
|
|
||||||
if _, err := x.Exec("DELETE FROM issue_dependency WHERE issue_id NOT IN (SELECT id FROM issue)"); err != nil {
|
if _, err := x.Exec("DELETE FROM issue_dependency WHERE issue_id NOT IN (SELECT id FROM issue)"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func expandWebhooks(x *xorm.Engine) error {
|
func expandWebhooks(x *xorm.Engine) error {
|
||||||
|
|
||||||
type HookEvents struct {
|
type HookEvents struct {
|
||||||
Create bool `json:"create"`
|
Create bool `json:"create"`
|
||||||
Delete bool `json:"delete"`
|
Delete bool `json:"delete"`
|
||||||
|
@ -57,7 +56,7 @@ func expandWebhooks(x *xorm.Engine) error {
|
||||||
if err := sess.Begin(); err != nil {
|
if err := sess.Begin(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var results = make([]Webhook, 0, batchSize)
|
results := make([]Webhook, 0, batchSize)
|
||||||
err := x.OrderBy("id").
|
err := x.OrderBy("id").
|
||||||
Limit(batchSize, last).
|
Limit(batchSize, last).
|
||||||
Find(&results)
|
Find(&results)
|
||||||
|
|
|
@ -40,7 +40,7 @@ func refixMergeBase(x *xorm.Engine) error {
|
||||||
MergedCommitID string `xorm:"VARCHAR(40)"`
|
MergedCommitID string `xorm:"VARCHAR(40)"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var limit = setting.Database.IterateBufferSize
|
limit := setting.Database.IterateBufferSize
|
||||||
if limit <= 0 {
|
if limit <= 0 {
|
||||||
limit = 50
|
limit = 50
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,7 @@ func addCommitDivergenceToPulls(x *xorm.Engine) error {
|
||||||
if err := sess.Begin(); err != nil {
|
if err := sess.Begin(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var results = make([]*PullRequest, 0, batchSize)
|
results := make([]*PullRequest, 0, batchSize)
|
||||||
err := sess.Where("has_merged = ?", false).OrderBy("id").Limit(batchSize, last).Find(&results)
|
err := sess.Where("has_merged = ?", false).OrderBy("id").Limit(batchSize, last).Find(&results)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func updateMatrixWebhookHTTPMethod(x *xorm.Engine) error {
|
func updateMatrixWebhookHTTPMethod(x *xorm.Engine) error {
|
||||||
var matrixHookTaskType = 9 // value comes from the models package
|
matrixHookTaskType := 9 // value comes from the models package
|
||||||
type Webhook struct {
|
type Webhook struct {
|
||||||
HTTPMethod string
|
HTTPMethod string
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func addProjectsInfo(x *xorm.Engine) error {
|
func addProjectsInfo(x *xorm.Engine) error {
|
||||||
|
|
||||||
// Create new tables
|
// Create new tables
|
||||||
type (
|
type (
|
||||||
ProjectType uint8
|
ProjectType uint8
|
||||||
|
|
|
@ -83,7 +83,7 @@ func createReviewsForCodeComments(x *xorm.Engine) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var updateComment = func(comments []*Comment) error {
|
updateComment := func(comments []*Comment) error {
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
if err := sess.Begin(); err != nil {
|
if err := sess.Begin(); err != nil {
|
||||||
|
@ -131,10 +131,10 @@ func createReviewsForCodeComments(x *xorm.Engine) error {
|
||||||
return sess.Commit()
|
return sess.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
var start = 0
|
start := 0
|
||||||
var batchSize = 100
|
batchSize := 100
|
||||||
for {
|
for {
|
||||||
var comments = make([]*Comment, 0, batchSize)
|
comments := make([]*Comment, 0, batchSize)
|
||||||
if err := x.Where("review_id = 0 and type = 21").Limit(batchSize, start).Find(&comments); err != nil {
|
if err := x.Where("review_id = 0 and type = 21").Limit(batchSize, start).Find(&comments); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ func userPath(userName string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func fixPublisherIDforTagReleases(x *xorm.Engine) error {
|
func fixPublisherIDforTagReleases(x *xorm.Engine) error {
|
||||||
|
|
||||||
type Release struct {
|
type Release struct {
|
||||||
ID int64
|
ID int64
|
||||||
RepoID int64
|
RepoID int64
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func fixRepoTopics(x *xorm.Engine) error {
|
func fixRepoTopics(x *xorm.Engine) error {
|
||||||
|
|
||||||
type Topic struct {
|
type Topic struct {
|
||||||
ID int64 `xorm:"pk autoincr"`
|
ID int64 `xorm:"pk autoincr"`
|
||||||
Name string `xorm:"UNIQUE VARCHAR(25)"`
|
Name string `xorm:"UNIQUE VARCHAR(25)"`
|
||||||
|
|
|
@ -51,10 +51,12 @@ func updateCodeCommentReplies(x *xorm.Engine) error {
|
||||||
AND comment.id != first.id
|
AND comment.id != first.id
|
||||||
AND comment.commit_sha != first.commit_sha`
|
AND comment.commit_sha != first.commit_sha`
|
||||||
|
|
||||||
var sqlCmd string
|
var (
|
||||||
var start = 0
|
sqlCmd string
|
||||||
var batchSize = 100
|
start = 0
|
||||||
sess := x.NewSession()
|
batchSize = 100
|
||||||
|
sess = x.NewSession()
|
||||||
|
)
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
for {
|
for {
|
||||||
if err := sess.Begin(); err != nil {
|
if err := sess.Begin(); err != nil {
|
||||||
|
@ -68,7 +70,7 @@ func updateCodeCommentReplies(x *xorm.Engine) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var comments = make([]*Comment, 0, batchSize)
|
comments := make([]*Comment, 0, batchSize)
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case setting.Database.UseMySQL:
|
case setting.Database.UseMySQL:
|
||||||
|
|
|
@ -21,7 +21,7 @@ func convertTaskTypeToString(x *xorm.Engine) error {
|
||||||
MATRIX
|
MATRIX
|
||||||
)
|
)
|
||||||
|
|
||||||
var hookTaskTypes = map[int]string{
|
hookTaskTypes := map[int]string{
|
||||||
GITEA: "gitea",
|
GITEA: "gitea",
|
||||||
GOGS: "gogs",
|
GOGS: "gogs",
|
||||||
SLACK: "slack",
|
SLACK: "slack",
|
||||||
|
|
|
@ -21,7 +21,7 @@ func convertWebhookTaskTypeToString(x *xorm.Engine) error {
|
||||||
MATRIX
|
MATRIX
|
||||||
)
|
)
|
||||||
|
|
||||||
var hookTaskTypes = map[int]string{
|
hookTaskTypes := map[int]string{
|
||||||
GITEA: "gitea",
|
GITEA: "gitea",
|
||||||
GOGS: "gogs",
|
GOGS: "gogs",
|
||||||
SLACK: "slack",
|
SLACK: "slack",
|
||||||
|
|
|
@ -14,7 +14,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func addIssueDependencies(x *xorm.Engine) (err error) {
|
func addIssueDependencies(x *xorm.Engine) (err error) {
|
||||||
|
|
||||||
type IssueDependency struct {
|
type IssueDependency struct {
|
||||||
ID int64 `xorm:"pk autoincr"`
|
ID int64 `xorm:"pk autoincr"`
|
||||||
UserID int64 `xorm:"NOT NULL"`
|
UserID int64 `xorm:"NOT NULL"`
|
||||||
|
@ -90,7 +89,7 @@ func addIssueDependencies(x *xorm.Engine) (err error) {
|
||||||
Created time.Time `xorm:"-"`
|
Created time.Time `xorm:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
//Updating existing issue units
|
// Updating existing issue units
|
||||||
units := make([]*RepoUnit, 0, 100)
|
units := make([]*RepoUnit, 0, 100)
|
||||||
err = x.Where("`type` = ?", v16UnitTypeIssues).Find(&units)
|
err = x.Where("`type` = ?", v16UnitTypeIssues).Find(&units)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -79,7 +79,6 @@ func addScratchHash(x *xorm.Engine) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return sess.Commit()
|
return sess.Commit()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func hashToken(token, salt string) string {
|
func hashToken(token, salt string) string {
|
||||||
|
|
|
@ -40,7 +40,7 @@ func addPullRequestRebaseWithMerge(x *xorm.Engine) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
//Updating existing issue units
|
// Updating existing issue units
|
||||||
units := make([]*RepoUnit, 0, 100)
|
units := make([]*RepoUnit, 0, 100)
|
||||||
if err := sess.Where("`type` = ?", v16UnitTypePRs).Find(&units); err != nil {
|
if err := sess.Where("`type` = ?", v16UnitTypePRs).Find(&units); err != nil {
|
||||||
return fmt.Errorf("Query repo units: %v", err)
|
return fmt.Errorf("Query repo units: %v", err)
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func addCanCloseIssuesViaCommitInAnyBranch(x *xorm.Engine) error {
|
func addCanCloseIssuesViaCommitInAnyBranch(x *xorm.Engine) error {
|
||||||
|
|
||||||
type Repository struct {
|
type Repository struct {
|
||||||
ID int64 `xorm:"pk autoincr"`
|
ID int64 `xorm:"pk autoincr"`
|
||||||
CloseIssuesViaCommitInAnyBranch bool `xorm:"NOT NULL DEFAULT false"`
|
CloseIssuesViaCommitInAnyBranch bool `xorm:"NOT NULL DEFAULT false"`
|
||||||
|
|
|
@ -29,9 +29,9 @@ func addCommitStatusContext(x *xorm.Engine) error {
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
|
|
||||||
var start = 0
|
start := 0
|
||||||
for {
|
for {
|
||||||
var statuses = make([]*CommitStatus, 0, 100)
|
statuses := make([]*CommitStatus, 0, 100)
|
||||||
err := sess.OrderBy("id").Limit(100, start).Find(&statuses)
|
err := sess.OrderBy("id").Limit(100, start).Find(&statuses)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func removeLingeringIndexStatus(x *xorm.Engine) error {
|
func removeLingeringIndexStatus(x *xorm.Engine) error {
|
||||||
|
|
||||||
_, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_indexer_status`"))
|
_, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_indexer_status`"))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ func deleteOrphanedAttachments(x *xorm.Engine) error {
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
|
|
||||||
var limit = setting.Database.IterateBufferSize
|
limit := setting.Database.IterateBufferSize
|
||||||
if limit <= 0 {
|
if limit <= 0 {
|
||||||
limit = 50
|
limit = 50
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ func deleteOrphanedAttachments(x *xorm.Engine) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var ids = make([]int64, 0, limit)
|
ids := make([]int64, 0, limit)
|
||||||
for _, attachment := range attachements {
|
for _, attachment := range attachements {
|
||||||
ids = append(ids, attachment.ID)
|
ids = append(ids, attachment.ID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -305,7 +305,7 @@ func Ping() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DumpDatabase dumps all data from database according the special database SQL syntax to file system.
|
// DumpDatabase dumps all data from database according the special database SQL syntax to file system.
|
||||||
func DumpDatabase(filePath string, dbType string) error {
|
func DumpDatabase(filePath, dbType string) error {
|
||||||
var tbs []*schemas.Table
|
var tbs []*schemas.Table
|
||||||
for _, t := range tables {
|
for _, t := range tables {
|
||||||
t, err := x.TableInfo(t)
|
t, err := x.TableInfo(t)
|
||||||
|
|
|
@ -182,7 +182,6 @@ func createOrUpdateIssueNotifications(e Engine, issueID, commentID, notification
|
||||||
// init
|
// init
|
||||||
var toNotify map[int64]struct{}
|
var toNotify map[int64]struct{}
|
||||||
notifications, err := getNotificationsByIssueID(e, issueID)
|
notifications, err := getNotificationsByIssueID(e, issueID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -481,7 +480,7 @@ func (nl NotificationList) LoadAttributes() (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nl NotificationList) getPendingRepoIDs() []int64 {
|
func (nl NotificationList) getPendingRepoIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(nl))
|
ids := make(map[int64]struct{}, len(nl))
|
||||||
for _, notification := range nl {
|
for _, notification := range nl {
|
||||||
if notification.Repository != nil {
|
if notification.Repository != nil {
|
||||||
continue
|
continue
|
||||||
|
@ -499,11 +498,11 @@ func (nl NotificationList) LoadRepos() (RepositoryList, []int, error) {
|
||||||
return RepositoryList{}, []int{}, nil
|
return RepositoryList{}, []int{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var repoIDs = nl.getPendingRepoIDs()
|
repoIDs := nl.getPendingRepoIDs()
|
||||||
var repos = make(map[int64]*Repository, len(repoIDs))
|
repos := make(map[int64]*Repository, len(repoIDs))
|
||||||
var left = len(repoIDs)
|
left := len(repoIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -532,7 +531,7 @@ func (nl NotificationList) LoadRepos() (RepositoryList, []int, error) {
|
||||||
|
|
||||||
failed := []int{}
|
failed := []int{}
|
||||||
|
|
||||||
var reposList = make(RepositoryList, 0, len(repoIDs))
|
reposList := make(RepositoryList, 0, len(repoIDs))
|
||||||
for i, notification := range nl {
|
for i, notification := range nl {
|
||||||
if notification.Repository == nil {
|
if notification.Repository == nil {
|
||||||
notification.Repository = repos[notification.RepoID]
|
notification.Repository = repos[notification.RepoID]
|
||||||
|
@ -557,7 +556,7 @@ func (nl NotificationList) LoadRepos() (RepositoryList, []int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nl NotificationList) getPendingIssueIDs() []int64 {
|
func (nl NotificationList) getPendingIssueIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(nl))
|
ids := make(map[int64]struct{}, len(nl))
|
||||||
for _, notification := range nl {
|
for _, notification := range nl {
|
||||||
if notification.Issue != nil {
|
if notification.Issue != nil {
|
||||||
continue
|
continue
|
||||||
|
@ -575,11 +574,11 @@ func (nl NotificationList) LoadIssues() ([]int, error) {
|
||||||
return []int{}, nil
|
return []int{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var issueIDs = nl.getPendingIssueIDs()
|
issueIDs := nl.getPendingIssueIDs()
|
||||||
var issues = make(map[int64]*Issue, len(issueIDs))
|
issues := make(map[int64]*Issue, len(issueIDs))
|
||||||
var left = len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -643,7 +642,7 @@ func (nl NotificationList) Without(failures []int) NotificationList {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nl NotificationList) getPendingCommentIDs() []int64 {
|
func (nl NotificationList) getPendingCommentIDs() []int64 {
|
||||||
var ids = make(map[int64]struct{}, len(nl))
|
ids := make(map[int64]struct{}, len(nl))
|
||||||
for _, notification := range nl {
|
for _, notification := range nl {
|
||||||
if notification.CommentID == 0 || notification.Comment != nil {
|
if notification.CommentID == 0 || notification.Comment != nil {
|
||||||
continue
|
continue
|
||||||
|
@ -661,11 +660,11 @@ func (nl NotificationList) LoadComments() ([]int, error) {
|
||||||
return []int{}, nil
|
return []int{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var commentIDs = nl.getPendingCommentIDs()
|
commentIDs := nl.getPendingCommentIDs()
|
||||||
var comments = make(map[int64]*Comment, len(commentIDs))
|
comments := make(map[int64]*Comment, len(commentIDs))
|
||||||
var left = len(commentIDs)
|
left := len(commentIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
var limit = defaultMaxInSize
|
limit := defaultMaxInSize
|
||||||
if left < limit {
|
if left < limit {
|
||||||
limit = left
|
limit = left
|
||||||
}
|
}
|
||||||
|
@ -789,7 +788,6 @@ func getNotificationByID(e Engine, notificationID int64) (*Notification, error)
|
||||||
ok, err := e.
|
ok, err := e.
|
||||||
Where("id = ?", notificationID).
|
Where("id = ?", notificationID).
|
||||||
Get(notification)
|
Get(notification)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -802,7 +800,7 @@ func getNotificationByID(e Engine, notificationID int64) (*Notification, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateNotificationStatuses updates the statuses of all of a user's notifications that are of the currentStatus type to the desiredStatus
|
// UpdateNotificationStatuses updates the statuses of all of a user's notifications that are of the currentStatus type to the desiredStatus
|
||||||
func UpdateNotificationStatuses(user *User, currentStatus NotificationStatus, desiredStatus NotificationStatus) error {
|
func UpdateNotificationStatuses(user *User, currentStatus, desiredStatus NotificationStatus) error {
|
||||||
n := &Notification{Status: desiredStatus, UpdatedBy: user.ID}
|
n := &Notification{Status: desiredStatus, UpdatedBy: user.ID}
|
||||||
_, err := x.
|
_, err := x.
|
||||||
Where("user_id = ? AND status = ?", user.ID, currentStatus).
|
Where("user_id = ? AND status = ?", user.ID, currentStatus).
|
||||||
|
|
|
@ -26,7 +26,8 @@ var OAuth2Providers = map[string]OAuth2Provider{
|
||||||
"bitbucket": {Name: "bitbucket", DisplayName: "Bitbucket", Image: "/img/auth/bitbucket.png"},
|
"bitbucket": {Name: "bitbucket", DisplayName: "Bitbucket", Image: "/img/auth/bitbucket.png"},
|
||||||
"dropbox": {Name: "dropbox", DisplayName: "Dropbox", Image: "/img/auth/dropbox.png"},
|
"dropbox": {Name: "dropbox", DisplayName: "Dropbox", Image: "/img/auth/dropbox.png"},
|
||||||
"facebook": {Name: "facebook", DisplayName: "Facebook", Image: "/img/auth/facebook.png"},
|
"facebook": {Name: "facebook", DisplayName: "Facebook", Image: "/img/auth/facebook.png"},
|
||||||
"github": {Name: "github", DisplayName: "GitHub", Image: "/img/auth/github.png",
|
"github": {
|
||||||
|
Name: "github", DisplayName: "GitHub", Image: "/img/auth/github.png",
|
||||||
CustomURLMapping: &oauth2.CustomURLMapping{
|
CustomURLMapping: &oauth2.CustomURLMapping{
|
||||||
TokenURL: oauth2.GetDefaultTokenURL("github"),
|
TokenURL: oauth2.GetDefaultTokenURL("github"),
|
||||||
AuthURL: oauth2.GetDefaultAuthURL("github"),
|
AuthURL: oauth2.GetDefaultAuthURL("github"),
|
||||||
|
@ -34,7 +35,8 @@ var OAuth2Providers = map[string]OAuth2Provider{
|
||||||
EmailURL: oauth2.GetDefaultEmailURL("github"),
|
EmailURL: oauth2.GetDefaultEmailURL("github"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"gitlab": {Name: "gitlab", DisplayName: "GitLab", Image: "/img/auth/gitlab.png",
|
"gitlab": {
|
||||||
|
Name: "gitlab", DisplayName: "GitLab", Image: "/img/auth/gitlab.png",
|
||||||
CustomURLMapping: &oauth2.CustomURLMapping{
|
CustomURLMapping: &oauth2.CustomURLMapping{
|
||||||
TokenURL: oauth2.GetDefaultTokenURL("gitlab"),
|
TokenURL: oauth2.GetDefaultTokenURL("gitlab"),
|
||||||
AuthURL: oauth2.GetDefaultAuthURL("gitlab"),
|
AuthURL: oauth2.GetDefaultAuthURL("gitlab"),
|
||||||
|
@ -45,14 +47,16 @@ var OAuth2Providers = map[string]OAuth2Provider{
|
||||||
"openidConnect": {Name: "openidConnect", DisplayName: "OpenID Connect", Image: "/img/auth/openid_connect.svg"},
|
"openidConnect": {Name: "openidConnect", DisplayName: "OpenID Connect", Image: "/img/auth/openid_connect.svg"},
|
||||||
"twitter": {Name: "twitter", DisplayName: "Twitter", Image: "/img/auth/twitter.png"},
|
"twitter": {Name: "twitter", DisplayName: "Twitter", Image: "/img/auth/twitter.png"},
|
||||||
"discord": {Name: "discord", DisplayName: "Discord", Image: "/img/auth/discord.png"},
|
"discord": {Name: "discord", DisplayName: "Discord", Image: "/img/auth/discord.png"},
|
||||||
"gitea": {Name: "gitea", DisplayName: "Gitea", Image: "/img/auth/gitea.png",
|
"gitea": {
|
||||||
|
Name: "gitea", DisplayName: "Gitea", Image: "/img/auth/gitea.png",
|
||||||
CustomURLMapping: &oauth2.CustomURLMapping{
|
CustomURLMapping: &oauth2.CustomURLMapping{
|
||||||
TokenURL: oauth2.GetDefaultTokenURL("gitea"),
|
TokenURL: oauth2.GetDefaultTokenURL("gitea"),
|
||||||
AuthURL: oauth2.GetDefaultAuthURL("gitea"),
|
AuthURL: oauth2.GetDefaultAuthURL("gitea"),
|
||||||
ProfileURL: oauth2.GetDefaultProfileURL("gitea"),
|
ProfileURL: oauth2.GetDefaultProfileURL("gitea"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"nextcloud": {Name: "nextcloud", DisplayName: "Nextcloud", Image: "/img/auth/nextcloud.png",
|
"nextcloud": {
|
||||||
|
Name: "nextcloud", DisplayName: "Nextcloud", Image: "/img/auth/nextcloud.png",
|
||||||
CustomURLMapping: &oauth2.CustomURLMapping{
|
CustomURLMapping: &oauth2.CustomURLMapping{
|
||||||
TokenURL: oauth2.GetDefaultTokenURL("nextcloud"),
|
TokenURL: oauth2.GetDefaultTokenURL("nextcloud"),
|
||||||
AuthURL: oauth2.GetDefaultAuthURL("nextcloud"),
|
AuthURL: oauth2.GetDefaultAuthURL("nextcloud"),
|
||||||
|
@ -60,7 +64,8 @@ var OAuth2Providers = map[string]OAuth2Provider{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"yandex": {Name: "yandex", DisplayName: "Yandex", Image: "/img/auth/yandex.png"},
|
"yandex": {Name: "yandex", DisplayName: "Yandex", Image: "/img/auth/yandex.png"},
|
||||||
"mastodon": {Name: "mastodon", DisplayName: "Mastodon", Image: "/img/auth/mastodon.png",
|
"mastodon": {
|
||||||
|
Name: "mastodon", DisplayName: "Mastodon", Image: "/img/auth/mastodon.png",
|
||||||
CustomURLMapping: &oauth2.CustomURLMapping{
|
CustomURLMapping: &oauth2.CustomURLMapping{
|
||||||
AuthURL: oauth2.GetDefaultAuthURL("mastodon"),
|
AuthURL: oauth2.GetDefaultAuthURL("mastodon"),
|
||||||
},
|
},
|
||||||
|
|
|
@ -102,8 +102,8 @@ func FindOrgMembers(opts *FindOrgMembersOpts) (UserList, map[int64]bool, error)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var ids = make([]int64, len(ous))
|
ids := make([]int64, len(ous))
|
||||||
var idsIsPublic = make(map[int64]bool, len(ous))
|
idsIsPublic := make(map[int64]bool, len(ous))
|
||||||
for i, ou := range ous {
|
for i, ou := range ous {
|
||||||
ids[i] = ou.UID
|
ids[i] = ou.UID
|
||||||
idsIsPublic[ou.UID] = ou.IsPublic
|
idsIsPublic[ou.UID] = ou.IsPublic
|
||||||
|
@ -205,7 +205,7 @@ func CreateOrganization(org, owner *User) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// insert units for team
|
// insert units for team
|
||||||
var units = make([]TeamUnit, 0, len(AllRepoUnitTypes))
|
units := make([]TeamUnit, 0, len(AllRepoUnitTypes))
|
||||||
for _, tp := range AllRepoUnitTypes {
|
for _, tp := range AllRepoUnitTypes {
|
||||||
units = append(units, TeamUnit{
|
units = append(units, TeamUnit{
|
||||||
OrgID: org.ID,
|
OrgID: org.ID,
|
||||||
|
@ -437,11 +437,11 @@ func getOwnedOrgsByUserID(sess *xorm.Session, userID int64) ([]*User, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasOrgVisible tells if the given user can see the given org
|
// HasOrgVisible tells if the given user can see the given org
|
||||||
func HasOrgVisible(org *User, user *User) bool {
|
func HasOrgVisible(org, user *User) bool {
|
||||||
return hasOrgVisible(x, org, user)
|
return hasOrgVisible(x, org, user)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasOrgVisible(e Engine, org *User, user *User) bool {
|
func hasOrgVisible(e Engine, org, user *User) bool {
|
||||||
// Not SignedUser
|
// Not SignedUser
|
||||||
if user == nil {
|
if user == nil {
|
||||||
return org.Visibility == structs.VisibleTypePublic
|
return org.Visibility == structs.VisibleTypePublic
|
||||||
|
@ -813,7 +813,7 @@ func (org *User) AccessibleTeamReposEnv(team *Team) AccessibleReposEnvironment {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (env *accessibleReposEnv) cond() builder.Cond {
|
func (env *accessibleReposEnv) cond() builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
if env.team != nil {
|
if env.team != nil {
|
||||||
cond = cond.And(builder.Eq{"team_repo.team_id": env.team.ID})
|
cond = cond.And(builder.Eq{"team_repo.team_id": env.team.ID})
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -61,7 +61,7 @@ func SearchTeam(opts *SearchTeamOptions) ([]*Team, int64, error) {
|
||||||
opts.PageSize = 10
|
opts.PageSize = 10
|
||||||
}
|
}
|
||||||
|
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
|
|
||||||
if len(opts.Keyword) > 0 {
|
if len(opts.Keyword) > 0 {
|
||||||
lowerKeyword := strings.ToLower(opts.Keyword)
|
lowerKeyword := strings.ToLower(opts.Keyword)
|
||||||
|
@ -80,7 +80,6 @@ func SearchTeam(opts *SearchTeamOptions) ([]*Team, int64, error) {
|
||||||
count, err := sess.
|
count, err := sess.
|
||||||
Where(cond).
|
Where(cond).
|
||||||
Count(new(Team))
|
Count(new(Team))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -109,7 +108,6 @@ func (t *Team) ColorFormat(s fmt.State) {
|
||||||
t.Name,
|
t.Name,
|
||||||
log.NewColoredIDValue(t.OrgID),
|
log.NewColoredIDValue(t.OrgID),
|
||||||
t.Authorize)
|
t.Authorize)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUnits return a list of available units for a team
|
// GetUnits return a list of available units for a team
|
||||||
|
@ -608,7 +606,7 @@ func GetTeamNamesByID(teamIDs []int64) ([]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateTeam updates information of team.
|
// UpdateTeam updates information of team.
|
||||||
func UpdateTeam(t *Team, authChanged bool, includeAllChanged bool) (err error) {
|
func UpdateTeam(t *Team, authChanged, includeAllChanged bool) (err error) {
|
||||||
if len(t.Name) == 0 {
|
if len(t.Name) == 0 {
|
||||||
return errors.New("empty team name")
|
return errors.New("empty team name")
|
||||||
}
|
}
|
||||||
|
@ -963,7 +961,7 @@ func isUserInTeams(e Engine, userID int64, teamIDs []int64) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UsersInTeamsCount counts the number of users which are in userIDs and teamIDs
|
// UsersInTeamsCount counts the number of users which are in userIDs and teamIDs
|
||||||
func UsersInTeamsCount(userIDs []int64, teamIDs []int64) (int64, error) {
|
func UsersInTeamsCount(userIDs, teamIDs []int64) (int64, error) {
|
||||||
var ids []int64
|
var ids []int64
|
||||||
if err := x.In("uid", userIDs).In("team_id", teamIDs).
|
if err := x.In("uid", userIDs).In("team_id", teamIDs).
|
||||||
Table("team_user").
|
Table("team_user").
|
||||||
|
|
|
@ -364,7 +364,7 @@ func TestHasTeamRepo(t *testing.T) {
|
||||||
func TestUsersInTeamsCount(t *testing.T) {
|
func TestUsersInTeamsCount(t *testing.T) {
|
||||||
assert.NoError(t, PrepareTestDatabase())
|
assert.NoError(t, PrepareTestDatabase())
|
||||||
|
|
||||||
test := func(teamIDs []int64, userIDs []int64, expected int64) {
|
test := func(teamIDs, userIDs []int64, expected int64) {
|
||||||
count, err := UsersInTeamsCount(teamIDs, userIDs)
|
count, err := UsersInTeamsCount(teamIDs, userIDs)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, expected, count)
|
assert.Equal(t, expected, count)
|
||||||
|
|
|
@ -374,12 +374,14 @@ func TestGetOrgUsersByUserID(t *testing.T) {
|
||||||
ID: orgUsers[0].ID,
|
ID: orgUsers[0].ID,
|
||||||
OrgID: 6,
|
OrgID: 6,
|
||||||
UID: 5,
|
UID: 5,
|
||||||
IsPublic: true}, *orgUsers[0])
|
IsPublic: true,
|
||||||
|
}, *orgUsers[0])
|
||||||
assert.Equal(t, OrgUser{
|
assert.Equal(t, OrgUser{
|
||||||
ID: orgUsers[1].ID,
|
ID: orgUsers[1].ID,
|
||||||
OrgID: 7,
|
OrgID: 7,
|
||||||
UID: 5,
|
UID: 5,
|
||||||
IsPublic: false}, *orgUsers[1])
|
IsPublic: false,
|
||||||
|
}, *orgUsers[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
publicOrgUsers, err := GetOrgUsersByUserID(5, &SearchOrganizationsOptions{All: false})
|
publicOrgUsers, err := GetOrgUsersByUserID(5, &SearchOrganizationsOptions{All: false})
|
||||||
|
@ -406,12 +408,14 @@ func TestGetOrgUsersByOrgID(t *testing.T) {
|
||||||
ID: orgUsers[0].ID,
|
ID: orgUsers[0].ID,
|
||||||
OrgID: 3,
|
OrgID: 3,
|
||||||
UID: 2,
|
UID: 2,
|
||||||
IsPublic: true}, *orgUsers[0])
|
IsPublic: true,
|
||||||
|
}, *orgUsers[0])
|
||||||
assert.Equal(t, OrgUser{
|
assert.Equal(t, OrgUser{
|
||||||
ID: orgUsers[1].ID,
|
ID: orgUsers[1].ID,
|
||||||
OrgID: 3,
|
OrgID: 3,
|
||||||
UID: 4,
|
UID: 4,
|
||||||
IsPublic: false}, *orgUsers[1])
|
IsPublic: false,
|
||||||
|
}, *orgUsers[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
orgUsers, err = GetOrgUsersByOrgID(&FindOrgMembersOpts{
|
orgUsers, err = GetOrgUsersByOrgID(&FindOrgMembersOpts{
|
||||||
|
|
|
@ -89,7 +89,6 @@ func GetProjects(opts ProjectSearchOptions) ([]*Project, int64, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getProjects(e Engine, opts ProjectSearchOptions) ([]*Project, int64, error) {
|
func getProjects(e Engine, opts ProjectSearchOptions) ([]*Project, int64, error) {
|
||||||
|
|
||||||
projects := make([]*Project, 0, setting.UI.IssuePagingNum)
|
projects := make([]*Project, 0, setting.UI.IssuePagingNum)
|
||||||
|
|
||||||
var cond builder.Cond = builder.Eq{"repo_id": opts.RepoID}
|
var cond builder.Cond = builder.Eq{"repo_id": opts.RepoID}
|
||||||
|
|
|
@ -58,7 +58,6 @@ func IsProjectBoardTypeValid(p ProjectBoardType) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func createBoardsForProjectsType(sess *xorm.Session, project *Project) error {
|
func createBoardsForProjectsType(sess *xorm.Session, project *Project) error {
|
||||||
|
|
||||||
var items []string
|
var items []string
|
||||||
|
|
||||||
switch project.BoardType {
|
switch project.BoardType {
|
||||||
|
@ -79,7 +78,7 @@ func createBoardsForProjectsType(sess *xorm.Session, project *Project) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var boards = make([]ProjectBoard, 0, len(items))
|
boards := make([]ProjectBoard, 0, len(items))
|
||||||
|
|
||||||
for _, v := range items {
|
for _, v := range items {
|
||||||
boards = append(boards, ProjectBoard{
|
boards = append(boards, ProjectBoard{
|
||||||
|
@ -186,7 +185,7 @@ func GetProjectBoards(projectID int64) (ProjectBoardList, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getProjectBoards(e Engine, projectID int64) ([]*ProjectBoard, error) {
|
func getProjectBoards(e Engine, projectID int64) ([]*ProjectBoard, error) {
|
||||||
var boards = make([]*ProjectBoard, 0, 5)
|
boards := make([]*ProjectBoard, 0, 5)
|
||||||
|
|
||||||
if err := e.Where("project_id=? AND `default`=?", projectID, false).OrderBy("Sorting").Find(&boards); err != nil {
|
if err := e.Where("project_id=? AND `default`=?", projectID, false).OrderBy("Sorting").Find(&boards); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -294,7 +293,6 @@ func UpdateProjectBoardSorting(bs ProjectBoardList) error {
|
||||||
_, err := x.ID(bs[i].ID).Cols(
|
_, err := x.ID(bs[i].ID).Cols(
|
||||||
"sorting",
|
"sorting",
|
||||||
).Update(bs[i])
|
).Update(bs[i])
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,7 +124,6 @@ func (p *Project) NumOpenIssues() int {
|
||||||
|
|
||||||
// ChangeProjectAssign changes the project associated with an issue
|
// ChangeProjectAssign changes the project associated with an issue
|
||||||
func ChangeProjectAssign(issue *Issue, doer *User, newProjectID int64) error {
|
func ChangeProjectAssign(issue *Issue, doer *User, newProjectID int64) error {
|
||||||
|
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
if err := sess.Begin(); err != nil {
|
if err := sess.Begin(); err != nil {
|
||||||
|
@ -139,7 +138,6 @@ func ChangeProjectAssign(issue *Issue, doer *User, newProjectID int64) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func addUpdateIssueProject(e *xorm.Session, issue *Issue, doer *User, newProjectID int64) error {
|
func addUpdateIssueProject(e *xorm.Session, issue *Issue, doer *User, newProjectID int64) error {
|
||||||
|
|
||||||
oldProjectID := issue.projectID(e)
|
oldProjectID := issue.projectID(e)
|
||||||
|
|
||||||
if _, err := e.Where("project_issue.issue_id=?", issue.ID).Delete(&ProjectIssue{}); err != nil {
|
if _, err := e.Where("project_issue.issue_id=?", issue.ID).Delete(&ProjectIssue{}); err != nil {
|
||||||
|
@ -179,7 +177,6 @@ func addUpdateIssueProject(e *xorm.Session, issue *Issue, doer *User, newProject
|
||||||
|
|
||||||
// MoveIssueAcrossProjectBoards move a card from one board to another
|
// MoveIssueAcrossProjectBoards move a card from one board to another
|
||||||
func MoveIssueAcrossProjectBoards(issue *Issue, board *ProjectBoard) error {
|
func MoveIssueAcrossProjectBoards(issue *Issue, board *ProjectBoard) error {
|
||||||
|
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
if err := sess.Begin(); err != nil {
|
if err := sess.Begin(); err != nil {
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
func TestIsProjectTypeValid(t *testing.T) {
|
func TestIsProjectTypeValid(t *testing.T) {
|
||||||
const UnknownType ProjectType = 15
|
const UnknownType ProjectType = 15
|
||||||
|
|
||||||
var cases = []struct {
|
cases := []struct {
|
||||||
typ ProjectType
|
typ ProjectType
|
||||||
valid bool
|
valid bool
|
||||||
}{
|
}{
|
||||||
|
|
|
@ -241,7 +241,6 @@ func (pr *PullRequest) getApprovalCounts(e Engine) ([]*ReviewCount, error) {
|
||||||
|
|
||||||
// GetApprovers returns the approvers of the pull request
|
// GetApprovers returns the approvers of the pull request
|
||||||
func (pr *PullRequest) GetApprovers() string {
|
func (pr *PullRequest) GetApprovers() string {
|
||||||
|
|
||||||
stringBuilder := strings.Builder{}
|
stringBuilder := strings.Builder{}
|
||||||
if err := pr.getReviewedByLines(&stringBuilder); err != nil {
|
if err := pr.getReviewedByLines(&stringBuilder); err != nil {
|
||||||
log.Error("Unable to getReviewedByLines: Error: %v", err)
|
log.Error("Unable to getReviewedByLines: Error: %v", err)
|
||||||
|
@ -504,7 +503,7 @@ func GetLatestPullRequestByHeadInfo(repoID int64, branch string) (*PullRequest,
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPullRequestByIndex returns a pull request by the given index
|
// GetPullRequestByIndex returns a pull request by the given index
|
||||||
func GetPullRequestByIndex(repoID int64, index int64) (*PullRequest, error) {
|
func GetPullRequestByIndex(repoID, index int64) (*PullRequest, error) {
|
||||||
pr := &PullRequest{
|
pr := &PullRequest{
|
||||||
BaseRepoID: repoID,
|
BaseRepoID: repoID,
|
||||||
Index: index,
|
Index: index,
|
||||||
|
|
|
@ -173,7 +173,7 @@ type FindReleasesOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts *FindReleasesOptions) toConds(repoID int64) builder.Cond {
|
func (opts *FindReleasesOptions) toConds(repoID int64) builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
cond = cond.And(builder.Eq{"repo_id": repoID})
|
cond = cond.And(builder.Eq{"repo_id": repoID})
|
||||||
|
|
||||||
if !opts.IncludeDrafts {
|
if !opts.IncludeDrafts {
|
||||||
|
@ -246,10 +246,12 @@ type releaseMetaSearch struct {
|
||||||
func (s releaseMetaSearch) Len() int {
|
func (s releaseMetaSearch) Len() int {
|
||||||
return len(s.ID)
|
return len(s.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s releaseMetaSearch) Swap(i, j int) {
|
func (s releaseMetaSearch) Swap(i, j int) {
|
||||||
s.ID[i], s.ID[j] = s.ID[j], s.ID[i]
|
s.ID[i], s.ID[j] = s.ID[j], s.ID[i]
|
||||||
s.Rel[i], s.Rel[j] = s.Rel[j], s.Rel[i]
|
s.Rel[i], s.Rel[j] = s.Rel[j], s.Rel[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s releaseMetaSearch) Less(i, j int) bool {
|
func (s releaseMetaSearch) Less(i, j int) bool {
|
||||||
return s.ID[i] < s.ID[j]
|
return s.ID[i] < s.ID[j]
|
||||||
}
|
}
|
||||||
|
@ -269,7 +271,7 @@ func getReleaseAttachments(e Engine, rels ...*Release) (err error) {
|
||||||
// then merge join them
|
// then merge join them
|
||||||
|
|
||||||
// Sort
|
// Sort
|
||||||
var sortedRels = releaseMetaSearch{ID: make([]int64, len(rels)), Rel: make([]*Release, len(rels))}
|
sortedRels := releaseMetaSearch{ID: make([]int64, len(rels)), Rel: make([]*Release, len(rels))}
|
||||||
var attachments []*Attachment
|
var attachments []*Attachment
|
||||||
for index, element := range rels {
|
for index, element := range rels {
|
||||||
element.Attachments = []*Attachment{}
|
element.Attachments = []*Attachment{}
|
||||||
|
@ -288,7 +290,7 @@ func getReleaseAttachments(e Engine, rels ...*Release) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// merge join
|
// merge join
|
||||||
var currentIndex = 0
|
currentIndex := 0
|
||||||
for _, attachment := range attachments {
|
for _, attachment := range attachments {
|
||||||
for sortedRels.ID[currentIndex] < attachment.ReleaseID {
|
for sortedRels.ID[currentIndex] < attachment.ReleaseID {
|
||||||
currentIndex++
|
currentIndex++
|
||||||
|
|
|
@ -1070,7 +1070,7 @@ func CreateRepository(ctx DBContext, doer, u *User, repo *Repository, overwriteO
|
||||||
}
|
}
|
||||||
|
|
||||||
// insert units for repo
|
// insert units for repo
|
||||||
var units = make([]RepoUnit, 0, len(DefaultRepoUnits))
|
units := make([]RepoUnit, 0, len(DefaultRepoUnits))
|
||||||
for _, tp := range DefaultRepoUnits {
|
for _, tp := range DefaultRepoUnits {
|
||||||
if tp == UnitTypeIssues {
|
if tp == UnitTypeIssues {
|
||||||
units = append(units, RepoUnit{
|
units = append(units, RepoUnit{
|
||||||
|
@ -1636,7 +1636,7 @@ func GetRepositoryByIDCtx(ctx DBContext, id int64) (*Repository, error) {
|
||||||
|
|
||||||
// GetRepositoriesMapByIDs returns the repositories by given id slice.
|
// GetRepositoriesMapByIDs returns the repositories by given id slice.
|
||||||
func GetRepositoriesMapByIDs(ids []int64) (map[int64]*Repository, error) {
|
func GetRepositoriesMapByIDs(ids []int64) (map[int64]*Repository, error) {
|
||||||
var repos = make(map[int64]*Repository, len(ids))
|
repos := make(map[int64]*Repository, len(ids))
|
||||||
return repos, x.In("id", ids).Find(&repos)
|
return repos, x.In("id", ids).Find(&repos)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1646,7 +1646,7 @@ func GetUserRepositories(opts *SearchRepoOptions) ([]*Repository, int64, error)
|
||||||
opts.OrderBy = "updated_unix DESC"
|
opts.OrderBy = "updated_unix DESC"
|
||||||
}
|
}
|
||||||
|
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
cond = cond.And(builder.Eq{"owner_id": opts.Actor.ID})
|
cond = cond.And(builder.Eq{"owner_id": opts.Actor.ID})
|
||||||
if !opts.Private {
|
if !opts.Private {
|
||||||
cond = cond.And(builder.Eq{"is_private": false})
|
cond = cond.And(builder.Eq{"is_private": false})
|
||||||
|
@ -2096,9 +2096,9 @@ func DoctorUserStarNum() (err error) {
|
||||||
// IterateRepository iterate repositories
|
// IterateRepository iterate repositories
|
||||||
func IterateRepository(f func(repo *Repository) error) error {
|
func IterateRepository(f func(repo *Repository) error) error {
|
||||||
var start int
|
var start int
|
||||||
var batchSize = setting.Database.IterateBufferSize
|
batchSize := setting.Database.IterateBufferSize
|
||||||
for {
|
for {
|
||||||
var repos = make([]*Repository, 0, batchSize)
|
repos := make([]*Repository, 0, batchSize)
|
||||||
if err := x.Limit(batchSize, start).Find(&repos); err != nil {
|
if err := x.Limit(batchSize, start).Find(&repos); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -193,7 +193,7 @@ func CopyLanguageStat(originalRepo, destRepo *Repository) error {
|
||||||
RepoLang[i].RepoID = destRepo.ID
|
RepoLang[i].RepoID = destRepo.ID
|
||||||
RepoLang[i].CreatedUnix = timeutil.TimeStampNow()
|
RepoLang[i].CreatedUnix = timeutil.TimeStampNow()
|
||||||
}
|
}
|
||||||
//update destRepo's indexer status
|
// update destRepo's indexer status
|
||||||
tmpCommitID := RepoLang[0].CommitID
|
tmpCommitID := RepoLang[0].CommitID
|
||||||
if err := destRepo.updateIndexerStatus(sess, RepoIndexerTypeStats, tmpCommitID); err != nil {
|
if err := destRepo.updateIndexerStatus(sess, RepoIndexerTypeStats, tmpCommitID); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -180,7 +180,7 @@ type SearchRepoOptions struct {
|
||||||
LowerNames []string
|
LowerNames []string
|
||||||
}
|
}
|
||||||
|
|
||||||
//SearchOrderBy is used to sort the result
|
// SearchOrderBy is used to sort the result
|
||||||
type SearchOrderBy string
|
type SearchOrderBy string
|
||||||
|
|
||||||
func (s SearchOrderBy) String() string {
|
func (s SearchOrderBy) String() string {
|
||||||
|
@ -207,7 +207,7 @@ const (
|
||||||
|
|
||||||
// SearchRepositoryCondition creates a query condition according search repository options
|
// SearchRepositoryCondition creates a query condition according search repository options
|
||||||
func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
|
func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
|
|
||||||
if opts.Private {
|
if opts.Private {
|
||||||
if opts.Actor != nil && !opts.Actor.IsAdmin && opts.Actor.ID != opts.OwnerID {
|
if opts.Actor != nil && !opts.Actor.IsAdmin && opts.Actor.ID != opts.OwnerID {
|
||||||
|
@ -242,7 +242,7 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
|
||||||
|
|
||||||
// Restrict repositories to those the OwnerID owns or contributes to as per opts.Collaborate
|
// Restrict repositories to those the OwnerID owns or contributes to as per opts.Collaborate
|
||||||
if opts.OwnerID > 0 {
|
if opts.OwnerID > 0 {
|
||||||
var accessCond = builder.NewCond()
|
accessCond := builder.NewCond()
|
||||||
if opts.Collaborate != util.OptionalBoolTrue {
|
if opts.Collaborate != util.OptionalBoolTrue {
|
||||||
accessCond = builder.Eq{"owner_id": opts.OwnerID}
|
accessCond = builder.Eq{"owner_id": opts.OwnerID}
|
||||||
}
|
}
|
||||||
|
@ -301,7 +301,7 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
|
||||||
|
|
||||||
if opts.Keyword != "" {
|
if opts.Keyword != "" {
|
||||||
// separate keyword
|
// separate keyword
|
||||||
var subQueryCond = builder.NewCond()
|
subQueryCond := builder.NewCond()
|
||||||
for _, v := range strings.Split(opts.Keyword, ",") {
|
for _, v := range strings.Split(opts.Keyword, ",") {
|
||||||
if opts.TopicOnly {
|
if opts.TopicOnly {
|
||||||
subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
|
subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
|
||||||
|
@ -314,9 +314,9 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
|
||||||
Where(subQueryCond).
|
Where(subQueryCond).
|
||||||
GroupBy("repo_topic.repo_id")
|
GroupBy("repo_topic.repo_id")
|
||||||
|
|
||||||
var keywordCond = builder.In("id", subQuery)
|
keywordCond := builder.In("id", subQuery)
|
||||||
if !opts.TopicOnly {
|
if !opts.TopicOnly {
|
||||||
var likes = builder.NewCond()
|
likes := builder.NewCond()
|
||||||
for _, v := range strings.Split(opts.Keyword, ",") {
|
for _, v := range strings.Split(opts.Keyword, ",") {
|
||||||
likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
|
likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
|
||||||
if opts.IncludeDescription {
|
if opts.IncludeDescription {
|
||||||
|
@ -381,7 +381,6 @@ func SearchRepositoryByCondition(opts *SearchRepoOptions, cond builder.Cond, loa
|
||||||
count, err := sess.
|
count, err := sess.
|
||||||
Where(cond).
|
Where(cond).
|
||||||
Count(new(Repository))
|
Count(new(Repository))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, fmt.Errorf("Count: %v", err)
|
return nil, 0, fmt.Errorf("Count: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -406,7 +405,7 @@ func SearchRepositoryByCondition(opts *SearchRepoOptions, cond builder.Cond, loa
|
||||||
|
|
||||||
// accessibleRepositoryCondition takes a user a returns a condition for checking if a repository is accessible
|
// accessibleRepositoryCondition takes a user a returns a condition for checking if a repository is accessible
|
||||||
func accessibleRepositoryCondition(user *User) builder.Cond {
|
func accessibleRepositoryCondition(user *User) builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
|
|
||||||
if user == nil || !user.IsRestricted || user.ID <= 0 {
|
if user == nil || !user.IsRestricted || user.ID <= 0 {
|
||||||
orgVisibilityLimit := []structs.VisibleType{structs.VisibleTypePrivate}
|
orgVisibilityLimit := []structs.VisibleType{structs.VisibleTypePrivate}
|
||||||
|
|
|
@ -119,90 +119,146 @@ func TestSearchRepository(t *testing.T) {
|
||||||
opts *SearchRepoOptions
|
opts *SearchRepoOptions
|
||||||
count int
|
count int
|
||||||
}{
|
}{
|
||||||
{name: "PublicRepositoriesByName",
|
{
|
||||||
|
name: "PublicRepositoriesByName",
|
||||||
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{PageSize: 10}, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{PageSize: 10}, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 7},
|
count: 7,
|
||||||
{name: "PublicAndPrivateRepositoriesByName",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesByName",
|
||||||
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 10}, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 10}, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 14},
|
count: 14,
|
||||||
{name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFirstPage",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFirstPage",
|
||||||
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 14},
|
count: 14,
|
||||||
{name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitSecondPage",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitSecondPage",
|
||||||
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 2, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 2, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 14},
|
count: 14,
|
||||||
{name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitThirdPage",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitThirdPage",
|
||||||
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 14},
|
count: 14,
|
||||||
{name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFourthPage",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFourthPage",
|
||||||
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 14},
|
count: 14,
|
||||||
{name: "PublicRepositoriesOfUser",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicRepositoriesOfUser",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 2},
|
count: 2,
|
||||||
{name: "PublicRepositoriesOfUser2",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicRepositoriesOfUser2",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 0},
|
count: 0,
|
||||||
{name: "PublicRepositoriesOfUser3",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicRepositoriesOfUser3",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 2},
|
count: 2,
|
||||||
{name: "PublicAndPrivateRepositoriesOfUser",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesOfUser",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 4},
|
count: 4,
|
||||||
{name: "PublicAndPrivateRepositoriesOfUser2",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesOfUser2",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 0},
|
count: 0,
|
||||||
{name: "PublicAndPrivateRepositoriesOfUser3",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesOfUser3",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 4},
|
count: 4,
|
||||||
{name: "PublicRepositoriesOfUserIncludingCollaborative",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicRepositoriesOfUserIncludingCollaborative",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15},
|
||||||
count: 5},
|
count: 5,
|
||||||
{name: "PublicRepositoriesOfUser2IncludingCollaborative",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicRepositoriesOfUser2IncludingCollaborative",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18},
|
||||||
count: 1},
|
count: 1,
|
||||||
{name: "PublicRepositoriesOfUser3IncludingCollaborative",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicRepositoriesOfUser3IncludingCollaborative",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20},
|
||||||
count: 3},
|
count: 3,
|
||||||
{name: "PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true},
|
||||||
count: 9},
|
count: 9,
|
||||||
{name: "PublicAndPrivateRepositoriesOfUser2IncludingCollaborative",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesOfUser2IncludingCollaborative",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true},
|
||||||
count: 4},
|
count: 4,
|
||||||
{name: "PublicAndPrivateRepositoriesOfUser3IncludingCollaborative",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesOfUser3IncludingCollaborative",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true},
|
||||||
count: 7},
|
count: 7,
|
||||||
{name: "PublicRepositoriesOfOrganization",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicRepositoriesOfOrganization",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 1},
|
count: 1,
|
||||||
{name: "PublicAndPrivateRepositoriesOfOrganization",
|
},
|
||||||
|
{
|
||||||
|
name: "PublicAndPrivateRepositoriesOfOrganization",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Private: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Private: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 2},
|
count: 2,
|
||||||
{name: "AllPublic/PublicRepositoriesByName",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/PublicRepositoriesByName",
|
||||||
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{PageSize: 10}, AllPublic: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{PageSize: 10}, AllPublic: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 7},
|
count: 7,
|
||||||
{name: "AllPublic/PublicAndPrivateRepositoriesByName",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/PublicAndPrivateRepositoriesByName",
|
||||||
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 10}, Private: true, AllPublic: true, Collaborate: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 10}, Private: true, AllPublic: true, Collaborate: util.OptionalBoolFalse},
|
||||||
count: 14},
|
count: 14,
|
||||||
{name: "AllPublic/PublicRepositoriesOfUserIncludingCollaborative",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/PublicRepositoriesOfUserIncludingCollaborative",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, AllPublic: true, Template: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, AllPublic: true, Template: util.OptionalBoolFalse},
|
||||||
count: 28},
|
count: 28,
|
||||||
{name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true, AllLimited: true, Template: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true, AllLimited: true, Template: util.OptionalBoolFalse},
|
||||||
count: 33},
|
count: 33,
|
||||||
{name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborativeByName",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborativeByName",
|
||||||
opts: &SearchRepoOptions{Keyword: "test", ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true},
|
opts: &SearchRepoOptions{Keyword: "test", ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true},
|
||||||
count: 15},
|
count: 15,
|
||||||
{name: "AllPublic/PublicAndPrivateRepositoriesOfUser2IncludingCollaborativeByName",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/PublicAndPrivateRepositoriesOfUser2IncludingCollaborativeByName",
|
||||||
opts: &SearchRepoOptions{Keyword: "test", ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, AllPublic: true},
|
opts: &SearchRepoOptions{Keyword: "test", ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, AllPublic: true},
|
||||||
count: 13},
|
count: 13,
|
||||||
{name: "AllPublic/PublicRepositoriesOfOrganization",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/PublicRepositoriesOfOrganization",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, AllPublic: true, Collaborate: util.OptionalBoolFalse, Template: util.OptionalBoolFalse},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, AllPublic: true, Collaborate: util.OptionalBoolFalse, Template: util.OptionalBoolFalse},
|
||||||
count: 28},
|
count: 28,
|
||||||
{name: "AllTemplates",
|
},
|
||||||
|
{
|
||||||
|
name: "AllTemplates",
|
||||||
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, Template: util.OptionalBoolTrue},
|
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, Template: util.OptionalBoolTrue},
|
||||||
count: 2},
|
count: 2,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
|
@ -216,7 +272,7 @@ func TestSearchRepository(t *testing.T) {
|
||||||
if page <= 0 {
|
if page <= 0 {
|
||||||
page = 1
|
page = 1
|
||||||
}
|
}
|
||||||
var expectedLen = testCase.opts.PageSize
|
expectedLen := testCase.opts.PageSize
|
||||||
if testCase.opts.PageSize*page > testCase.count+testCase.opts.PageSize {
|
if testCase.opts.PageSize*page > testCase.count+testCase.opts.PageSize {
|
||||||
expectedLen = 0
|
expectedLen = 0
|
||||||
} else if testCase.opts.PageSize*page > testCase.count {
|
} else if testCase.opts.PageSize*page > testCase.count {
|
||||||
|
@ -274,15 +330,21 @@ func TestSearchRepositoryByTopicName(t *testing.T) {
|
||||||
opts *SearchRepoOptions
|
opts *SearchRepoOptions
|
||||||
count int
|
count int
|
||||||
}{
|
}{
|
||||||
{name: "AllPublic/SearchPublicRepositoriesFromTopicAndName",
|
{
|
||||||
|
name: "AllPublic/SearchPublicRepositoriesFromTopicAndName",
|
||||||
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql"},
|
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql"},
|
||||||
count: 2},
|
count: 2,
|
||||||
{name: "AllPublic/OnlySearchPublicRepositoriesFromTopic",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/OnlySearchPublicRepositoriesFromTopic",
|
||||||
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql", TopicOnly: true},
|
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql", TopicOnly: true},
|
||||||
count: 1},
|
count: 1,
|
||||||
{name: "AllPublic/OnlySearchMultipleKeywordPublicRepositoriesFromTopic",
|
},
|
||||||
|
{
|
||||||
|
name: "AllPublic/OnlySearchMultipleKeywordPublicRepositoriesFromTopic",
|
||||||
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql,golang", TopicOnly: true},
|
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql,golang", TopicOnly: true},
|
||||||
count: 2},
|
count: 2,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
|
|
|
@ -141,7 +141,6 @@ func TestRepoAPIURL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUploadAvatar(t *testing.T) {
|
func TestUploadAvatar(t *testing.T) {
|
||||||
|
|
||||||
// Generate image
|
// Generate image
|
||||||
myImage := image.NewRGBA(image.Rect(0, 0, 1, 1))
|
myImage := image.NewRGBA(image.Rect(0, 0, 1, 1))
|
||||||
var buff bytes.Buffer
|
var buff bytes.Buffer
|
||||||
|
@ -156,7 +155,6 @@ func TestUploadAvatar(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUploadBigAvatar(t *testing.T) {
|
func TestUploadBigAvatar(t *testing.T) {
|
||||||
|
|
||||||
// Generate BIG image
|
// Generate BIG image
|
||||||
myImage := image.NewRGBA(image.Rect(0, 0, 5000, 1))
|
myImage := image.NewRGBA(image.Rect(0, 0, 5000, 1))
|
||||||
var buff bytes.Buffer
|
var buff bytes.Buffer
|
||||||
|
@ -170,7 +168,6 @@ func TestUploadBigAvatar(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeleteAvatar(t *testing.T) {
|
func TestDeleteAvatar(t *testing.T) {
|
||||||
|
|
||||||
// Generate image
|
// Generate image
|
||||||
myImage := image.NewRGBA(image.Rect(0, 0, 1, 1))
|
myImage := image.NewRGBA(image.Rect(0, 0, 1, 1))
|
||||||
var buff bytes.Buffer
|
var buff bytes.Buffer
|
||||||
|
|
|
@ -40,7 +40,6 @@ func (r *RepoTransfer) LoadAttributes() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Recipient.IsOrganization() && len(r.TeamIDs) != len(r.Teams) {
|
if r.Recipient.IsOrganization() && len(r.TeamIDs) != len(r.Teams) {
|
||||||
|
|
||||||
for _, v := range r.TeamIDs {
|
for _, v := range r.TeamIDs {
|
||||||
team, err := GetTeamByID(v)
|
team, err := GetTeamByID(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -92,7 +91,7 @@ func (r *RepoTransfer) CanUserAcceptTransfer(u *User) bool {
|
||||||
// GetPendingRepositoryTransfer fetches the most recent and ongoing transfer
|
// GetPendingRepositoryTransfer fetches the most recent and ongoing transfer
|
||||||
// process for the repository
|
// process for the repository
|
||||||
func GetPendingRepositoryTransfer(repo *Repository) (*RepoTransfer, error) {
|
func GetPendingRepositoryTransfer(repo *Repository) (*RepoTransfer, error) {
|
||||||
var transfer = new(RepoTransfer)
|
transfer := new(RepoTransfer)
|
||||||
|
|
||||||
has, err := x.Where("repo_id = ? ", repo.ID).Get(transfer)
|
has, err := x.Where("repo_id = ? ", repo.ID).Get(transfer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRepositoryTransfer(t *testing.T) {
|
func TestRepositoryTransfer(t *testing.T) {
|
||||||
|
|
||||||
assert.NoError(t, PrepareTestDatabase())
|
assert.NoError(t, PrepareTestDatabase())
|
||||||
|
|
||||||
doer := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User)
|
doer := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User)
|
||||||
|
|
|
@ -24,8 +24,7 @@ type RepoUnit struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnitConfig describes common unit config
|
// UnitConfig describes common unit config
|
||||||
type UnitConfig struct {
|
type UnitConfig struct{}
|
||||||
}
|
|
||||||
|
|
||||||
// FromDB fills up a UnitConfig from serialized format.
|
// FromDB fills up a UnitConfig from serialized format.
|
||||||
func (cfg *UnitConfig) FromDB(bs []byte) error {
|
func (cfg *UnitConfig) FromDB(bs []byte) error {
|
||||||
|
|
|
@ -312,6 +312,6 @@ func watchIfAuto(e Engine, userID, repoID int64, isWrite bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchIfAuto subscribes to repo if AutoWatchOnChanges is set
|
// WatchIfAuto subscribes to repo if AutoWatchOnChanges is set
|
||||||
func WatchIfAuto(userID int64, repoID int64, isWrite bool) error {
|
func WatchIfAuto(userID, repoID int64, isWrite bool) error {
|
||||||
return watchIfAuto(x, userID, repoID, isWrite)
|
return watchIfAuto(x, userID, repoID, isWrite)
|
||||||
}
|
}
|
||||||
|
|
|
@ -175,7 +175,7 @@ type FindReviewOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts *FindReviewOptions) toCond() builder.Cond {
|
func (opts *FindReviewOptions) toCond() builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
if opts.IssueID > 0 {
|
if opts.IssueID > 0 {
|
||||||
cond = cond.And(builder.Eq{"issue_id": opts.IssueID})
|
cond = cond.And(builder.Eq{"issue_id": opts.IssueID})
|
||||||
}
|
}
|
||||||
|
@ -334,8 +334,7 @@ func GetCurrentReview(reviewer *User, issue *Issue) (*Review, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContentEmptyErr represents an content empty error
|
// ContentEmptyErr represents an content empty error
|
||||||
type ContentEmptyErr struct {
|
type ContentEmptyErr struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (ContentEmptyErr) Error() string {
|
func (ContentEmptyErr) Error() string {
|
||||||
return "Review content is empty"
|
return "Review content is empty"
|
||||||
|
@ -355,7 +354,7 @@ func SubmitReview(doer *User, issue *Issue, reviewType ReviewType, content, comm
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var official = false
|
official := false
|
||||||
|
|
||||||
review, err := getCurrentReview(sess, doer, issue)
|
review, err := getCurrentReview(sess, doer, issue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -668,7 +667,7 @@ func AddReviewRequest(issue *Issue, reviewer, doer *User) (*Comment, error) {
|
||||||
return comment, sess.Commit()
|
return comment, sess.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
//RemoveReviewRequest remove a review request from one reviewer
|
// RemoveReviewRequest remove a review request from one reviewer
|
||||||
func RemoveReviewRequest(issue *Issue, reviewer, doer *User) (*Comment, error) {
|
func RemoveReviewRequest(issue *Issue, reviewer, doer *User) (*Comment, error) {
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
|
@ -780,7 +779,7 @@ func AddTeamReviewRequest(issue *Issue, reviewer *Team, doer *User) (*Comment, e
|
||||||
return comment, sess.Commit()
|
return comment, sess.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
//RemoveTeamReviewRequest remove a review request from one team
|
// RemoveTeamReviewRequest remove a review request from one team
|
||||||
func RemoveTeamReviewRequest(issue *Issue, reviewer *Team, doer *User) (*Comment, error) {
|
func RemoveTeamReviewRequest(issue *Issue, reviewer *Team, doer *User) (*Comment, error) {
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
|
|
|
@ -34,7 +34,6 @@ func TestReview_LoadAttributes(t *testing.T) {
|
||||||
|
|
||||||
invalidReview2 := AssertExistsAndLoadBean(t, &Review{ID: 3}).(*Review)
|
invalidReview2 := AssertExistsAndLoadBean(t, &Review{ID: 3}).(*Review)
|
||||||
assert.Error(t, invalidReview2.LoadAttributes())
|
assert.Error(t, invalidReview2.LoadAttributes())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReview_LoadCodeComments(t *testing.T) {
|
func TestReview_LoadCodeComments(t *testing.T) {
|
||||||
|
|
|
@ -377,7 +377,7 @@ func appendAuthorizedKeysToFile(keys ...*PublicKey) error {
|
||||||
// This of course doesn't guarantee that this is the right directory for authorized_keys
|
// This of course doesn't guarantee that this is the right directory for authorized_keys
|
||||||
// but at least if it's supposed to be this directory and it doesn't exist and we're the
|
// but at least if it's supposed to be this directory and it doesn't exist and we're the
|
||||||
// right user it will at least be created properly.
|
// right user it will at least be created properly.
|
||||||
err := os.MkdirAll(setting.SSH.RootPath, 0700)
|
err := os.MkdirAll(setting.SSH.RootPath, 0o700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
|
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
|
||||||
return err
|
return err
|
||||||
|
@ -385,7 +385,7 @@ func appendAuthorizedKeysToFile(keys ...*PublicKey) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
|
fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
|
||||||
f, err := os.OpenFile(fPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
|
f, err := os.OpenFile(fPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -399,9 +399,9 @@ func appendAuthorizedKeysToFile(keys ...*PublicKey) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// .ssh directory should have mode 700, and authorized_keys file should have mode 600.
|
// .ssh directory should have mode 700, and authorized_keys file should have mode 600.
|
||||||
if fi.Mode().Perm() > 0600 {
|
if fi.Mode().Perm() > 0o600 {
|
||||||
log.Error("authorized_keys file has unusual permission flags: %s - setting to -rw-------", fi.Mode().Perm().String())
|
log.Error("authorized_keys file has unusual permission flags: %s - setting to -rw-------", fi.Mode().Perm().String())
|
||||||
if err = f.Chmod(0600); err != nil {
|
if err = f.Chmod(0o600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -465,7 +465,7 @@ func calcFingerprintNative(publicKeyContent string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func calcFingerprint(publicKeyContent string) (string, error) {
|
func calcFingerprint(publicKeyContent string) (string, error) {
|
||||||
//Call the method based on configuration
|
// Call the method based on configuration
|
||||||
var (
|
var (
|
||||||
fnName, fp string
|
fnName, fp string
|
||||||
err error
|
err error
|
||||||
|
@ -628,7 +628,7 @@ func ListPublicKeys(uid int64, listOptions ListOptions) ([]*PublicKey, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPublicLdapSSHKeys returns a list of synchronized public ldap ssh keys belongs to given user and login source.
|
// ListPublicLdapSSHKeys returns a list of synchronized public ldap ssh keys belongs to given user and login source.
|
||||||
func ListPublicLdapSSHKeys(uid int64, loginSourceID int64) ([]*PublicKey, error) {
|
func ListPublicLdapSSHKeys(uid, loginSourceID int64) ([]*PublicKey, error) {
|
||||||
keys := make([]*PublicKey, 0, 5)
|
keys := make([]*PublicKey, 0, 5)
|
||||||
return keys, x.
|
return keys, x.
|
||||||
Where("owner_id = ? AND login_source_id = ?", uid, loginSourceID).
|
Where("owner_id = ? AND login_source_id = ?", uid, loginSourceID).
|
||||||
|
@ -782,7 +782,7 @@ func RewriteAllPublicKeys() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func rewriteAllPublicKeys(e Engine) error {
|
func rewriteAllPublicKeys(e Engine) error {
|
||||||
//Don't rewrite key if internal server
|
// Don't rewrite key if internal server
|
||||||
if setting.SSH.StartBuiltinServer || !setting.SSH.CreateAuthorizedKeysFile {
|
if setting.SSH.StartBuiltinServer || !setting.SSH.CreateAuthorizedKeysFile {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -795,7 +795,7 @@ func rewriteAllPublicKeys(e Engine) error {
|
||||||
// This of course doesn't guarantee that this is the right directory for authorized_keys
|
// This of course doesn't guarantee that this is the right directory for authorized_keys
|
||||||
// but at least if it's supposed to be this directory and it doesn't exist and we're the
|
// but at least if it's supposed to be this directory and it doesn't exist and we're the
|
||||||
// right user it will at least be created properly.
|
// right user it will at least be created properly.
|
||||||
err := os.MkdirAll(setting.SSH.RootPath, 0700)
|
err := os.MkdirAll(setting.SSH.RootPath, 0o700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
|
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
|
||||||
return err
|
return err
|
||||||
|
@ -804,7 +804,7 @@ func rewriteAllPublicKeys(e Engine) error {
|
||||||
|
|
||||||
fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
|
fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
|
||||||
tmpPath := fPath + ".tmp"
|
tmpPath := fPath + ".tmp"
|
||||||
t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1147,7 +1147,7 @@ func listDeployKeys(e Engine, repoID int64, listOptions ListOptions) ([]*DeployK
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchDeployKeys returns a list of deploy keys matching the provided arguments.
|
// SearchDeployKeys returns a list of deploy keys matching the provided arguments.
|
||||||
func SearchDeployKeys(repoID int64, keyID int64, fingerprint string) ([]*DeployKey, error) {
|
func SearchDeployKeys(repoID, keyID int64, fingerprint string) ([]*DeployKey, error) {
|
||||||
keys := make([]*DeployKey, 0, 5)
|
keys := make([]*DeployKey, 0, 5)
|
||||||
cond := builder.NewCond()
|
cond := builder.NewCond()
|
||||||
if repoID != 0 {
|
if repoID != 0 {
|
||||||
|
@ -1279,7 +1279,7 @@ func rewriteAllPrincipalKeys(e Engine) error {
|
||||||
// This of course doesn't guarantee that this is the right directory for authorized_keys
|
// This of course doesn't guarantee that this is the right directory for authorized_keys
|
||||||
// but at least if it's supposed to be this directory and it doesn't exist and we're the
|
// but at least if it's supposed to be this directory and it doesn't exist and we're the
|
||||||
// right user it will at least be created properly.
|
// right user it will at least be created properly.
|
||||||
err := os.MkdirAll(setting.SSH.RootPath, 0700)
|
err := os.MkdirAll(setting.SSH.RootPath, 0o700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
|
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
|
||||||
return err
|
return err
|
||||||
|
@ -1288,7 +1288,7 @@ func rewriteAllPrincipalKeys(e Engine) error {
|
||||||
|
|
||||||
fPath := filepath.Join(setting.SSH.RootPath, authorizedPrincipalsFile)
|
fPath := filepath.Join(setting.SSH.RootPath, authorizedPrincipalsFile)
|
||||||
tmpPath := fPath + ".tmp"
|
tmpPath := fPath + ".tmp"
|
||||||
t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,7 @@ func (err ErrTaskDoesNotExist) Error() string {
|
||||||
|
|
||||||
// GetMigratingTask returns the migrating task by repo's id
|
// GetMigratingTask returns the migrating task by repo's id
|
||||||
func GetMigratingTask(repoID int64) (*Task, error) {
|
func GetMigratingTask(repoID int64) (*Task, error) {
|
||||||
var task = Task{
|
task := Task{
|
||||||
RepoID: repoID,
|
RepoID: repoID,
|
||||||
Type: structs.TaskTypeMigrateRepo,
|
Type: structs.TaskTypeMigrateRepo,
|
||||||
}
|
}
|
||||||
|
@ -150,7 +150,7 @@ func GetMigratingTask(repoID int64) (*Task, error) {
|
||||||
|
|
||||||
// GetMigratingTaskByID returns the migrating task by repo's id
|
// GetMigratingTaskByID returns the migrating task by repo's id
|
||||||
func GetMigratingTaskByID(id, doerID int64) (*Task, *migration.MigrateOptions, error) {
|
func GetMigratingTaskByID(id, doerID int64) (*Task, *migration.MigrateOptions, error) {
|
||||||
var task = Task{
|
task := Task{
|
||||||
ID: id,
|
ID: id,
|
||||||
DoerID: doerID,
|
DoerID: doerID,
|
||||||
Type: structs.TaskTypeMigrateRepo,
|
Type: structs.TaskTypeMigrateRepo,
|
||||||
|
@ -177,7 +177,7 @@ type FindTaskOptions struct {
|
||||||
|
|
||||||
// ToConds generates conditions for database operation.
|
// ToConds generates conditions for database operation.
|
||||||
func (opts FindTaskOptions) ToConds() builder.Cond {
|
func (opts FindTaskOptions) ToConds() builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
if opts.Status >= 0 {
|
if opts.Status >= 0 {
|
||||||
cond = cond.And(builder.Eq{"status": opts.Status})
|
cond = cond.And(builder.Eq{"status": opts.Status})
|
||||||
}
|
}
|
||||||
|
@ -186,7 +186,7 @@ func (opts FindTaskOptions) ToConds() builder.Cond {
|
||||||
|
|
||||||
// FindTasks find all tasks
|
// FindTasks find all tasks
|
||||||
func FindTasks(opts FindTaskOptions) ([]*Task, error) {
|
func FindTasks(opts FindTaskOptions) ([]*Task, error) {
|
||||||
var tasks = make([]*Task, 0, 10)
|
tasks := make([]*Task, 0, 10)
|
||||||
err := x.Where(opts.ToConds()).Find(&tasks)
|
err := x.Where(opts.ToConds()).Find(&tasks)
|
||||||
return tasks, err
|
return tasks, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ func TestNewAccessToken(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccessTokenByNameExists(t *testing.T) {
|
func TestAccessTokenByNameExists(t *testing.T) {
|
||||||
|
|
||||||
name := "Token Gitea"
|
name := "Token Gitea"
|
||||||
|
|
||||||
assert.NoError(t, PrepareTestDatabase())
|
assert.NoError(t, PrepareTestDatabase())
|
||||||
|
|
|
@ -60,7 +60,7 @@ func ValidateTopic(topic string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SanitizeAndValidateTopics sanitizes and checks an array or topics
|
// SanitizeAndValidateTopics sanitizes and checks an array or topics
|
||||||
func SanitizeAndValidateTopics(topics []string) (validTopics []string, invalidTopics []string) {
|
func SanitizeAndValidateTopics(topics []string) (validTopics, invalidTopics []string) {
|
||||||
validTopics = make([]string, 0)
|
validTopics = make([]string, 0)
|
||||||
mValidTopics := make(map[string]struct{})
|
mValidTopics := make(map[string]struct{})
|
||||||
invalidTopics = make([]string, 0)
|
invalidTopics = make([]string, 0)
|
||||||
|
@ -171,7 +171,7 @@ type FindTopicOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts *FindTopicOptions) toConds() builder.Cond {
|
func (opts *FindTopicOptions) toConds() builder.Cond {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
if opts.RepoID > 0 {
|
if opts.RepoID > 0 {
|
||||||
cond = cond.And(builder.Eq{"repo_topic.repo_id": opts.RepoID})
|
cond = cond.And(builder.Eq{"repo_topic.repo_id": opts.RepoID})
|
||||||
}
|
}
|
||||||
|
@ -199,8 +199,9 @@ func FindTopics(opts *FindTopicOptions) (topics []*Topic, err error) {
|
||||||
func GetRepoTopicByName(repoID int64, topicName string) (*Topic, error) {
|
func GetRepoTopicByName(repoID int64, topicName string) (*Topic, error) {
|
||||||
return getRepoTopicByName(x, repoID, topicName)
|
return getRepoTopicByName(x, repoID, topicName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRepoTopicByName(e Engine, repoID int64, topicName string) (*Topic, error) {
|
func getRepoTopicByName(e Engine, repoID int64, topicName string) (*Topic, error) {
|
||||||
var cond = builder.NewCond()
|
cond := builder.NewCond()
|
||||||
var topic Topic
|
var topic Topic
|
||||||
cond = cond.And(builder.Eq{"repo_topic.repo_id": repoID}).And(builder.Eq{"topic.name": topicName})
|
cond = cond.And(builder.Eq{"repo_topic.repo_id": repoID}).And(builder.Eq{"topic.name": topicName})
|
||||||
sess := e.Table("topic").Where(cond)
|
sess := e.Table("topic").Where(cond)
|
||||||
|
|
|
@ -207,7 +207,7 @@ func AssertSuccessfulInsert(t testing.TB, beans ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AssertCount assert the count of a bean
|
// AssertCount assert the count of a bean
|
||||||
func AssertCount(t testing.TB, bean interface{}, expected interface{}) {
|
func AssertCount(t testing.TB, bean, expected interface{}) {
|
||||||
assert.EqualValues(t, expected, GetCount(t, bean))
|
assert.EqualValues(t, expected, GetCount(t, bean))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1307,7 +1307,6 @@ func DeleteInactiveUsers(ctx context.Context, olderThan time.Duration) (err erro
|
||||||
Find(&users); err != nil {
|
Find(&users); err != nil {
|
||||||
return fmt.Errorf("get all inactive users: %v", err)
|
return fmt.Errorf("get all inactive users: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
// FIXME: should only update authorized_keys file once after all deletions.
|
// FIXME: should only update authorized_keys file once after all deletions.
|
||||||
for _, u := range users {
|
for _, u := range users {
|
||||||
|
@ -1572,7 +1571,6 @@ type SearchUserOptions struct {
|
||||||
|
|
||||||
func (opts *SearchUserOptions) toConds() builder.Cond {
|
func (opts *SearchUserOptions) toConds() builder.Cond {
|
||||||
var cond builder.Cond = builder.Eq{"type": opts.Type}
|
var cond builder.Cond = builder.Eq{"type": opts.Type}
|
||||||
|
|
||||||
if len(opts.Keyword) > 0 {
|
if len(opts.Keyword) > 0 {
|
||||||
lowerKeyword := strings.ToLower(opts.Keyword)
|
lowerKeyword := strings.ToLower(opts.Keyword)
|
||||||
keywordCond := builder.Or(
|
keywordCond := builder.Or(
|
||||||
|
@ -1601,7 +1599,8 @@ func (opts *SearchUserOptions) toConds() builder.Cond {
|
||||||
} else {
|
} else {
|
||||||
exprCond = builder.Expr("org_user.org_id = \"user\".id")
|
exprCond = builder.Expr("org_user.org_id = \"user\".id")
|
||||||
}
|
}
|
||||||
var accessCond = builder.NewCond()
|
|
||||||
|
var accessCond builder.Cond
|
||||||
if !opts.Actor.IsRestricted {
|
if !opts.Actor.IsRestricted {
|
||||||
accessCond = builder.Or(
|
accessCond = builder.Or(
|
||||||
builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.Actor.ID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))),
|
builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.Actor.ID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))),
|
||||||
|
@ -1847,7 +1846,7 @@ func SyncExternalUsers(ctx context.Context, updateExisting bool) error {
|
||||||
log.Trace("Doing: SyncExternalUsers[%s]", s.Name)
|
log.Trace("Doing: SyncExternalUsers[%s]", s.Name)
|
||||||
|
|
||||||
var existingUsers []int64
|
var existingUsers []int64
|
||||||
var isAttributeSSHPublicKeySet = len(strings.TrimSpace(s.LDAP().AttributeSSHPublicKey)) > 0
|
isAttributeSSHPublicKeySet := len(strings.TrimSpace(s.LDAP().AttributeSSHPublicKey)) > 0
|
||||||
var sshKeysNeedUpdate bool
|
var sshKeysNeedUpdate bool
|
||||||
|
|
||||||
// Find all users with this login type
|
// Find all users with this login type
|
||||||
|
@ -2021,9 +2020,9 @@ func SyncExternalUsers(ctx context.Context, updateExisting bool) error {
|
||||||
// IterateUser iterate users
|
// IterateUser iterate users
|
||||||
func IterateUser(f func(user *User) error) error {
|
func IterateUser(f func(user *User) error) error {
|
||||||
var start int
|
var start int
|
||||||
var batchSize = setting.Database.IterateBufferSize
|
batchSize := setting.Database.IterateBufferSize
|
||||||
for {
|
for {
|
||||||
var users = make([]*User, 0, batchSize)
|
users := make([]*User, 0, batchSize)
|
||||||
if err := x.Limit(batchSize, start).Find(&users); err != nil {
|
if err := x.Limit(batchSize, start).Find(&users); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ type UserHeatmapData struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUserHeatmapDataByUser returns an array of UserHeatmapData
|
// GetUserHeatmapDataByUser returns an array of UserHeatmapData
|
||||||
func GetUserHeatmapDataByUser(user *User, doer *User) ([]*UserHeatmapData, error) {
|
func GetUserHeatmapDataByUser(user, doer *User) ([]*UserHeatmapData, error) {
|
||||||
return getUserHeatmapData(user, nil, doer)
|
return getUserHeatmapData(user, nil, doer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ func getUserHeatmapData(user *User, team *Team, doer *User) ([]*UserHeatmapData,
|
||||||
}
|
}
|
||||||
|
|
||||||
var groupBy string
|
var groupBy string
|
||||||
var groupByName = "timestamp" // We need this extra case because mssql doesn't allow grouping by alias
|
groupByName := "timestamp" // We need this extra case because mssql doesn't allow grouping by alias
|
||||||
switch {
|
switch {
|
||||||
case setting.Database.UseSQLite3:
|
case setting.Database.UseSQLite3:
|
||||||
groupBy = "strftime('%s', strftime('%Y-%m-%d', created_unix, 'unixepoch'))"
|
groupBy = "strftime('%s', strftime('%Y-%m-%d', created_unix, 'unixepoch'))"
|
||||||
|
|
|
@ -55,7 +55,7 @@ func TestGetUserHeatmapDataByUser(t *testing.T) {
|
||||||
assert.Equal(t, len(actions), len(heatmap), "invalid action count: did the test data became too old?")
|
assert.Equal(t, len(actions), len(heatmap), "invalid action count: did the test data became too old?")
|
||||||
assert.Equal(t, tc.CountResult, len(heatmap), fmt.Sprintf("testcase %d", i))
|
assert.Equal(t, tc.CountResult, len(heatmap), fmt.Sprintf("testcase %d", i))
|
||||||
|
|
||||||
//Test JSON rendering
|
// Test JSON rendering
|
||||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
jsonData, err := json.Marshal(heatmap)
|
jsonData, err := json.Marshal(heatmap)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -18,10 +18,8 @@ import (
|
||||||
"xorm.io/builder"
|
"xorm.io/builder"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// ErrEmailAddressNotExist email address not exist
|
||||||
// ErrEmailAddressNotExist email address not exist
|
var ErrEmailAddressNotExist = errors.New("Email address does not exist")
|
||||||
ErrEmailAddressNotExist = errors.New("Email address does not exist")
|
|
||||||
)
|
|
||||||
|
|
||||||
// EmailAddress is the list of all email addresses of a user. Can contain the
|
// EmailAddress is the list of all email addresses of a user. Can contain the
|
||||||
// primary email address, but is not obligatory.
|
// primary email address, but is not obligatory.
|
||||||
|
@ -231,7 +229,7 @@ func (email *EmailAddress) updateActivation(e Engine, activate bool) error {
|
||||||
func DeleteEmailAddress(email *EmailAddress) (err error) {
|
func DeleteEmailAddress(email *EmailAddress) (err error) {
|
||||||
var deleted int64
|
var deleted int64
|
||||||
// ask to check UID
|
// ask to check UID
|
||||||
var address = EmailAddress{
|
address := EmailAddress{
|
||||||
UID: email.UID,
|
UID: email.UID,
|
||||||
}
|
}
|
||||||
if email.ID > 0 {
|
if email.ID > 0 {
|
||||||
|
|
|
@ -11,10 +11,8 @@ import (
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// ErrOpenIDNotExist openid is not known
|
||||||
// ErrOpenIDNotExist openid is not known
|
var ErrOpenIDNotExist = errors.New("OpenID is unknown")
|
||||||
ErrOpenIDNotExist = errors.New("OpenID is unknown")
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserOpenID is the list of all OpenID identities of a user.
|
// UserOpenID is the list of all OpenID identities of a user.
|
||||||
type UserOpenID struct {
|
type UserOpenID struct {
|
||||||
|
@ -72,7 +70,7 @@ func AddUserOpenID(openid *UserOpenID) error {
|
||||||
func DeleteUserOpenID(openid *UserOpenID) (err error) {
|
func DeleteUserOpenID(openid *UserOpenID) (err error) {
|
||||||
var deleted int64
|
var deleted int64
|
||||||
// ask to check UID
|
// ask to check UID
|
||||||
var address = UserOpenID{
|
address := UserOpenID{
|
||||||
UID: openid.UID,
|
UID: openid.UID,
|
||||||
}
|
}
|
||||||
if openid.ID > 0 {
|
if openid.ID > 0 {
|
||||||
|
|
|
@ -36,7 +36,7 @@ func TestUserIsPublicMember(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testUserIsPublicMember(t *testing.T, uid int64, orgID int64, expected bool) {
|
func testUserIsPublicMember(t *testing.T, uid, orgID int64, expected bool) {
|
||||||
user, err := GetUserByID(uid)
|
user, err := GetUserByID(uid)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, expected, user.IsPublicMember(orgID))
|
assert.Equal(t, expected, user.IsPublicMember(orgID))
|
||||||
|
@ -62,7 +62,7 @@ func TestIsUserOrgOwner(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testIsUserOrgOwner(t *testing.T, uid int64, orgID int64, expected bool) {
|
func testIsUserOrgOwner(t *testing.T, uid, orgID int64, expected bool) {
|
||||||
user, err := GetUserByID(uid)
|
user, err := GetUserByID(uid)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, expected, user.IsUserOrgOwner(orgID))
|
assert.Equal(t, expected, user.IsUserOrgOwner(orgID))
|
||||||
|
@ -338,7 +338,6 @@ func TestCreateUserInvalidEmail(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateUser_Issue5882(t *testing.T) {
|
func TestCreateUser_Issue5882(t *testing.T) {
|
||||||
|
|
||||||
// Init settings
|
// Init settings
|
||||||
_ = setting.Admin
|
_ = setting.Admin
|
||||||
|
|
||||||
|
@ -369,13 +368,12 @@ func TestCreateUser_Issue5882(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetUserIDsByNames(t *testing.T) {
|
func TestGetUserIDsByNames(t *testing.T) {
|
||||||
|
// ignore non existing
|
||||||
//ignore non existing
|
|
||||||
IDs, err := GetUserIDsByNames([]string{"user1", "user2", "none_existing_user"}, true)
|
IDs, err := GetUserIDsByNames([]string{"user1", "user2", "none_existing_user"}, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, []int64{1, 2}, IDs)
|
assert.Equal(t, []int64{1, 2}, IDs)
|
||||||
|
|
||||||
//ignore non existing
|
// ignore non existing
|
||||||
IDs, err = GetUserIDsByNames([]string{"user1", "do_not_exist"}, false)
|
IDs, err = GetUserIDsByNames([]string{"user1", "do_not_exist"}, false)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Equal(t, []int64(nil), IDs)
|
assert.Equal(t, []int64(nil), IDs)
|
||||||
|
|
|
@ -10,14 +10,14 @@ import (
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
//UserList is a list of user.
|
// UserList is a list of user.
|
||||||
// This type provide valuable methods to retrieve information for a group of users efficiently.
|
// This type provide valuable methods to retrieve information for a group of users efficiently.
|
||||||
type UserList []*User
|
type UserList []*User
|
||||||
|
|
||||||
func (users UserList) getUserIDs() []int64 {
|
func (users UserList) getUserIDs() []int64 {
|
||||||
userIDs := make([]int64, len(users))
|
userIDs := make([]int64, len(users))
|
||||||
for _, user := range users {
|
for _, user := range users {
|
||||||
userIDs = append(userIDs, user.ID) //Considering that user id are unique in the list
|
userIDs = append(userIDs, user.ID) // Considering that user id are unique in the list
|
||||||
}
|
}
|
||||||
return userIDs
|
return userIDs
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ func (users UserList) getUserIDs() []int64 {
|
||||||
func (users UserList) IsUserOrgOwner(orgID int64) map[int64]bool {
|
func (users UserList) IsUserOrgOwner(orgID int64) map[int64]bool {
|
||||||
results := make(map[int64]bool, len(users))
|
results := make(map[int64]bool, len(users))
|
||||||
for _, user := range users {
|
for _, user := range users {
|
||||||
results[user.ID] = false //Set default to false
|
results[user.ID] = false // Set default to false
|
||||||
}
|
}
|
||||||
ownerMaps, err := users.loadOrganizationOwners(x, orgID)
|
ownerMaps, err := users.loadOrganizationOwners(x, orgID)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -66,7 +66,7 @@ func (users UserList) loadOrganizationOwners(e Engine, orgID int64) (map[int64]*
|
||||||
func (users UserList) GetTwoFaStatus() map[int64]bool {
|
func (users UserList) GetTwoFaStatus() map[int64]bool {
|
||||||
results := make(map[int64]bool, len(users))
|
results := make(map[int64]bool, len(users))
|
||||||
for _, user := range users {
|
for _, user := range users {
|
||||||
results[user.ID] = false //Set default to false
|
results[user.ID] = false // Set default to false
|
||||||
}
|
}
|
||||||
tokenMaps, err := users.loadTwoFactorStatus(x)
|
tokenMaps, err := users.loadTwoFactorStatus(x)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue